1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright 1998, 2000 Marshall Kirk McKusick. 5 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org> 6 * All rights reserved. 7 * 8 * The soft updates code is derived from the appendix of a University 9 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 10 * "Soft Updates: A Solution to the Metadata Update Problem in File 11 * Systems", CSE-TR-254-95, August 1995). 12 * 13 * Further information about soft updates can be obtained from: 14 * 15 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 16 * 1614 Oxford Street mckusick@mckusick.com 17 * Berkeley, CA 94709-1608 +1-510-843-9542 18 * USA 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 24 * 1. Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * 2. Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in the 28 * documentation and/or other materials provided with the distribution. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 32 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 33 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 37 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 38 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 39 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 40 * 41 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 42 */ 43 44 #include <sys/cdefs.h> 45 __FBSDID("$FreeBSD$"); 46 47 #include "opt_ffs.h" 48 #include "opt_quota.h" 49 #include "opt_ddb.h" 50 51 #include <sys/param.h> 52 #include <sys/kernel.h> 53 #include <sys/systm.h> 54 #include <sys/bio.h> 55 #include <sys/buf.h> 56 #include <sys/kdb.h> 57 #include <sys/kthread.h> 58 #include <sys/ktr.h> 59 #include <sys/limits.h> 60 #include <sys/lock.h> 61 #include <sys/malloc.h> 62 #include <sys/mount.h> 63 #include <sys/mutex.h> 64 #include <sys/namei.h> 65 #include <sys/priv.h> 66 #include <sys/proc.h> 67 #include <sys/racct.h> 68 #include <sys/rwlock.h> 69 #include <sys/stat.h> 70 #include <sys/sysctl.h> 71 #include <sys/syslog.h> 72 #include <sys/vnode.h> 73 #include <sys/conf.h> 74 75 #include <ufs/ufs/dir.h> 76 #include <ufs/ufs/extattr.h> 77 #include <ufs/ufs/quota.h> 78 #include <ufs/ufs/inode.h> 79 #include <ufs/ufs/ufsmount.h> 80 #include <ufs/ffs/fs.h> 81 #include <ufs/ffs/softdep.h> 82 #include <ufs/ffs/ffs_extern.h> 83 #include <ufs/ufs/ufs_extern.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_extern.h> 87 #include <vm/vm_object.h> 88 89 #include <geom/geom.h> 90 #include <geom/geom_vfs.h> 91 92 #include <ddb/ddb.h> 93 94 #define KTR_SUJ 0 /* Define to KTR_SPARE. */ 95 96 #ifndef SOFTUPDATES 97 98 int 99 softdep_flushfiles(oldmnt, flags, td) 100 struct mount *oldmnt; 101 int flags; 102 struct thread *td; 103 { 104 105 panic("softdep_flushfiles called"); 106 } 107 108 int 109 softdep_mount(devvp, mp, fs, cred) 110 struct vnode *devvp; 111 struct mount *mp; 112 struct fs *fs; 113 struct ucred *cred; 114 { 115 116 return (0); 117 } 118 119 void 120 softdep_initialize() 121 { 122 123 return; 124 } 125 126 void 127 softdep_uninitialize() 128 { 129 130 return; 131 } 132 133 void 134 softdep_unmount(mp) 135 struct mount *mp; 136 { 137 138 panic("softdep_unmount called"); 139 } 140 141 void 142 softdep_setup_sbupdate(ump, fs, bp) 143 struct ufsmount *ump; 144 struct fs *fs; 145 struct buf *bp; 146 { 147 148 panic("softdep_setup_sbupdate called"); 149 } 150 151 void 152 softdep_setup_inomapdep(bp, ip, newinum, mode) 153 struct buf *bp; 154 struct inode *ip; 155 ino_t newinum; 156 int mode; 157 { 158 159 panic("softdep_setup_inomapdep called"); 160 } 161 162 void 163 softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 164 struct buf *bp; 165 struct mount *mp; 166 ufs2_daddr_t newblkno; 167 int frags; 168 int oldfrags; 169 { 170 171 panic("softdep_setup_blkmapdep called"); 172 } 173 174 void 175 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 176 struct inode *ip; 177 ufs_lbn_t lbn; 178 ufs2_daddr_t newblkno; 179 ufs2_daddr_t oldblkno; 180 long newsize; 181 long oldsize; 182 struct buf *bp; 183 { 184 185 panic("softdep_setup_allocdirect called"); 186 } 187 188 void 189 softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 190 struct inode *ip; 191 ufs_lbn_t lbn; 192 ufs2_daddr_t newblkno; 193 ufs2_daddr_t oldblkno; 194 long newsize; 195 long oldsize; 196 struct buf *bp; 197 { 198 199 panic("softdep_setup_allocext called"); 200 } 201 202 void 203 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 204 struct inode *ip; 205 ufs_lbn_t lbn; 206 struct buf *bp; 207 int ptrno; 208 ufs2_daddr_t newblkno; 209 ufs2_daddr_t oldblkno; 210 struct buf *nbp; 211 { 212 213 panic("softdep_setup_allocindir_page called"); 214 } 215 216 void 217 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 218 struct buf *nbp; 219 struct inode *ip; 220 struct buf *bp; 221 int ptrno; 222 ufs2_daddr_t newblkno; 223 { 224 225 panic("softdep_setup_allocindir_meta called"); 226 } 227 228 void 229 softdep_journal_freeblocks(ip, cred, length, flags) 230 struct inode *ip; 231 struct ucred *cred; 232 off_t length; 233 int flags; 234 { 235 236 panic("softdep_journal_freeblocks called"); 237 } 238 239 void 240 softdep_journal_fsync(ip) 241 struct inode *ip; 242 { 243 244 panic("softdep_journal_fsync called"); 245 } 246 247 void 248 softdep_setup_freeblocks(ip, length, flags) 249 struct inode *ip; 250 off_t length; 251 int flags; 252 { 253 254 panic("softdep_setup_freeblocks called"); 255 } 256 257 void 258 softdep_freefile(pvp, ino, mode) 259 struct vnode *pvp; 260 ino_t ino; 261 int mode; 262 { 263 264 panic("softdep_freefile called"); 265 } 266 267 int 268 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 269 struct buf *bp; 270 struct inode *dp; 271 off_t diroffset; 272 ino_t newinum; 273 struct buf *newdirbp; 274 int isnewblk; 275 { 276 277 panic("softdep_setup_directory_add called"); 278 } 279 280 void 281 softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 282 struct buf *bp; 283 struct inode *dp; 284 caddr_t base; 285 caddr_t oldloc; 286 caddr_t newloc; 287 int entrysize; 288 { 289 290 panic("softdep_change_directoryentry_offset called"); 291 } 292 293 void 294 softdep_setup_remove(bp, dp, ip, isrmdir) 295 struct buf *bp; 296 struct inode *dp; 297 struct inode *ip; 298 int isrmdir; 299 { 300 301 panic("softdep_setup_remove called"); 302 } 303 304 void 305 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 306 struct buf *bp; 307 struct inode *dp; 308 struct inode *ip; 309 ino_t newinum; 310 int isrmdir; 311 { 312 313 panic("softdep_setup_directory_change called"); 314 } 315 316 void 317 softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 318 struct mount *mp; 319 struct buf *bp; 320 ufs2_daddr_t blkno; 321 int frags; 322 struct workhead *wkhd; 323 { 324 325 panic("%s called", __FUNCTION__); 326 } 327 328 void 329 softdep_setup_inofree(mp, bp, ino, wkhd) 330 struct mount *mp; 331 struct buf *bp; 332 ino_t ino; 333 struct workhead *wkhd; 334 { 335 336 panic("%s called", __FUNCTION__); 337 } 338 339 void 340 softdep_setup_unlink(dp, ip) 341 struct inode *dp; 342 struct inode *ip; 343 { 344 345 panic("%s called", __FUNCTION__); 346 } 347 348 void 349 softdep_setup_link(dp, ip) 350 struct inode *dp; 351 struct inode *ip; 352 { 353 354 panic("%s called", __FUNCTION__); 355 } 356 357 void 358 softdep_revert_link(dp, ip) 359 struct inode *dp; 360 struct inode *ip; 361 { 362 363 panic("%s called", __FUNCTION__); 364 } 365 366 void 367 softdep_setup_rmdir(dp, ip) 368 struct inode *dp; 369 struct inode *ip; 370 { 371 372 panic("%s called", __FUNCTION__); 373 } 374 375 void 376 softdep_revert_rmdir(dp, ip) 377 struct inode *dp; 378 struct inode *ip; 379 { 380 381 panic("%s called", __FUNCTION__); 382 } 383 384 void 385 softdep_setup_create(dp, ip) 386 struct inode *dp; 387 struct inode *ip; 388 { 389 390 panic("%s called", __FUNCTION__); 391 } 392 393 void 394 softdep_revert_create(dp, ip) 395 struct inode *dp; 396 struct inode *ip; 397 { 398 399 panic("%s called", __FUNCTION__); 400 } 401 402 void 403 softdep_setup_mkdir(dp, ip) 404 struct inode *dp; 405 struct inode *ip; 406 { 407 408 panic("%s called", __FUNCTION__); 409 } 410 411 void 412 softdep_revert_mkdir(dp, ip) 413 struct inode *dp; 414 struct inode *ip; 415 { 416 417 panic("%s called", __FUNCTION__); 418 } 419 420 void 421 softdep_setup_dotdot_link(dp, ip) 422 struct inode *dp; 423 struct inode *ip; 424 { 425 426 panic("%s called", __FUNCTION__); 427 } 428 429 int 430 softdep_prealloc(vp, waitok) 431 struct vnode *vp; 432 int waitok; 433 { 434 435 panic("%s called", __FUNCTION__); 436 } 437 438 int 439 softdep_journal_lookup(mp, vpp) 440 struct mount *mp; 441 struct vnode **vpp; 442 { 443 444 return (ENOENT); 445 } 446 447 void 448 softdep_change_linkcnt(ip) 449 struct inode *ip; 450 { 451 452 panic("softdep_change_linkcnt called"); 453 } 454 455 void 456 softdep_load_inodeblock(ip) 457 struct inode *ip; 458 { 459 460 panic("softdep_load_inodeblock called"); 461 } 462 463 void 464 softdep_update_inodeblock(ip, bp, waitfor) 465 struct inode *ip; 466 struct buf *bp; 467 int waitfor; 468 { 469 470 panic("softdep_update_inodeblock called"); 471 } 472 473 int 474 softdep_fsync(vp) 475 struct vnode *vp; /* the "in_core" copy of the inode */ 476 { 477 478 return (0); 479 } 480 481 void 482 softdep_fsync_mountdev(vp) 483 struct vnode *vp; 484 { 485 486 return; 487 } 488 489 int 490 softdep_flushworklist(oldmnt, countp, td) 491 struct mount *oldmnt; 492 int *countp; 493 struct thread *td; 494 { 495 496 *countp = 0; 497 return (0); 498 } 499 500 int 501 softdep_sync_metadata(struct vnode *vp) 502 { 503 504 panic("softdep_sync_metadata called"); 505 } 506 507 int 508 softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 509 { 510 511 panic("softdep_sync_buf called"); 512 } 513 514 int 515 softdep_slowdown(vp) 516 struct vnode *vp; 517 { 518 519 panic("softdep_slowdown called"); 520 } 521 522 int 523 softdep_request_cleanup(fs, vp, cred, resource) 524 struct fs *fs; 525 struct vnode *vp; 526 struct ucred *cred; 527 int resource; 528 { 529 530 return (0); 531 } 532 533 int 534 softdep_check_suspend(struct mount *mp, 535 struct vnode *devvp, 536 int softdep_depcnt, 537 int softdep_accdepcnt, 538 int secondary_writes, 539 int secondary_accwrites) 540 { 541 struct bufobj *bo; 542 int error; 543 544 (void) softdep_depcnt, 545 (void) softdep_accdepcnt; 546 547 bo = &devvp->v_bufobj; 548 ASSERT_BO_WLOCKED(bo); 549 550 MNT_ILOCK(mp); 551 while (mp->mnt_secondary_writes != 0) { 552 BO_UNLOCK(bo); 553 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 554 (PUSER - 1) | PDROP, "secwr", 0); 555 BO_LOCK(bo); 556 MNT_ILOCK(mp); 557 } 558 559 /* 560 * Reasons for needing more work before suspend: 561 * - Dirty buffers on devvp. 562 * - Secondary writes occurred after start of vnode sync loop 563 */ 564 error = 0; 565 if (bo->bo_numoutput > 0 || 566 bo->bo_dirty.bv_cnt > 0 || 567 secondary_writes != 0 || 568 mp->mnt_secondary_writes != 0 || 569 secondary_accwrites != mp->mnt_secondary_accwrites) 570 error = EAGAIN; 571 BO_UNLOCK(bo); 572 return (error); 573 } 574 575 void 576 softdep_get_depcounts(struct mount *mp, 577 int *softdepactivep, 578 int *softdepactiveaccp) 579 { 580 (void) mp; 581 *softdepactivep = 0; 582 *softdepactiveaccp = 0; 583 } 584 585 void 586 softdep_buf_append(bp, wkhd) 587 struct buf *bp; 588 struct workhead *wkhd; 589 { 590 591 panic("softdep_buf_appendwork called"); 592 } 593 594 void 595 softdep_inode_append(ip, cred, wkhd) 596 struct inode *ip; 597 struct ucred *cred; 598 struct workhead *wkhd; 599 { 600 601 panic("softdep_inode_appendwork called"); 602 } 603 604 void 605 softdep_freework(wkhd) 606 struct workhead *wkhd; 607 { 608 609 panic("softdep_freework called"); 610 } 611 612 #else 613 614 FEATURE(softupdates, "FFS soft-updates support"); 615 616 static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 617 "soft updates stats"); 618 static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, 619 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 620 "total dependencies allocated"); 621 static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, 622 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 623 "high use dependencies allocated"); 624 static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, 625 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 626 "current dependencies allocated"); 627 static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, 628 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 629 "current dependencies written"); 630 631 unsigned long dep_current[D_LAST + 1]; 632 unsigned long dep_highuse[D_LAST + 1]; 633 unsigned long dep_total[D_LAST + 1]; 634 unsigned long dep_write[D_LAST + 1]; 635 636 #define SOFTDEP_TYPE(type, str, long) \ 637 static MALLOC_DEFINE(M_ ## type, #str, long); \ 638 SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \ 639 &dep_total[D_ ## type], 0, ""); \ 640 SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \ 641 &dep_current[D_ ## type], 0, ""); \ 642 SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, \ 643 &dep_highuse[D_ ## type], 0, ""); \ 644 SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \ 645 &dep_write[D_ ## type], 0, ""); 646 647 SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies"); 648 SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies"); 649 SOFTDEP_TYPE(BMSAFEMAP, bmsafemap, 650 "Block or frag allocated from cyl group map"); 651 SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency"); 652 SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode"); 653 SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies"); 654 SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block"); 655 SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode"); 656 SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode"); 657 SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated"); 658 SOFTDEP_TYPE(DIRADD, diradd, "New directory entry"); 659 SOFTDEP_TYPE(MKDIR, mkdir, "New directory"); 660 SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted"); 661 SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block"); 662 SOFTDEP_TYPE(FREEWORK, freework, "free an inode block"); 663 SOFTDEP_TYPE(FREEDEP, freedep, "track a block free"); 664 SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add"); 665 SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove"); 666 SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move"); 667 SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block"); 668 SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block"); 669 SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag"); 670 SOFTDEP_TYPE(JSEG, jseg, "Journal segment"); 671 SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete"); 672 SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency"); 673 SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation"); 674 SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete"); 675 676 static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel"); 677 678 static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes"); 679 static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations"); 680 static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data"); 681 682 #define M_SOFTDEP_FLAGS (M_WAITOK) 683 684 /* 685 * translate from workitem type to memory type 686 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 687 */ 688 static struct malloc_type *memtype[] = { 689 NULL, 690 M_PAGEDEP, 691 M_INODEDEP, 692 M_BMSAFEMAP, 693 M_NEWBLK, 694 M_ALLOCDIRECT, 695 M_INDIRDEP, 696 M_ALLOCINDIR, 697 M_FREEFRAG, 698 M_FREEBLKS, 699 M_FREEFILE, 700 M_DIRADD, 701 M_MKDIR, 702 M_DIRREM, 703 M_NEWDIRBLK, 704 M_FREEWORK, 705 M_FREEDEP, 706 M_JADDREF, 707 M_JREMREF, 708 M_JMVREF, 709 M_JNEWBLK, 710 M_JFREEBLK, 711 M_JFREEFRAG, 712 M_JSEG, 713 M_JSEGDEP, 714 M_SBDEP, 715 M_JTRUNC, 716 M_JFSYNC, 717 M_SENTINEL 718 }; 719 720 #define DtoM(type) (memtype[type]) 721 722 /* 723 * Names of malloc types. 724 */ 725 #define TYPENAME(type) \ 726 ((unsigned)(type) <= D_LAST && (unsigned)(type) >= D_FIRST ? \ 727 memtype[type]->ks_shortdesc : "???") 728 /* 729 * End system adaptation definitions. 730 */ 731 732 #define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino) 733 #define DOT_OFFSET offsetof(struct dirtemplate, dot_ino) 734 735 /* 736 * Internal function prototypes. 737 */ 738 static void check_clear_deps(struct mount *); 739 static void softdep_error(char *, int); 740 static int softdep_process_worklist(struct mount *, int); 741 static int softdep_waitidle(struct mount *, int); 742 static void drain_output(struct vnode *); 743 static struct buf *getdirtybuf(struct buf *, struct rwlock *, int); 744 static int check_inodedep_free(struct inodedep *); 745 static void clear_remove(struct mount *); 746 static void clear_inodedeps(struct mount *); 747 static void unlinked_inodedep(struct mount *, struct inodedep *); 748 static void clear_unlinked_inodedep(struct inodedep *); 749 static struct inodedep *first_unlinked_inodedep(struct ufsmount *); 750 static int flush_pagedep_deps(struct vnode *, struct mount *, 751 struct diraddhd *); 752 static int free_pagedep(struct pagedep *); 753 static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t); 754 static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t); 755 static int flush_deplist(struct allocdirectlst *, int, int *); 756 static int sync_cgs(struct mount *, int); 757 static int handle_written_filepage(struct pagedep *, struct buf *, int); 758 static int handle_written_sbdep(struct sbdep *, struct buf *); 759 static void initiate_write_sbdep(struct sbdep *); 760 static void diradd_inode_written(struct diradd *, struct inodedep *); 761 static int handle_written_indirdep(struct indirdep *, struct buf *, 762 struct buf**, int); 763 static int handle_written_inodeblock(struct inodedep *, struct buf *, int); 764 static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *, 765 uint8_t *); 766 static int handle_written_bmsafemap(struct bmsafemap *, struct buf *, int); 767 static void handle_written_jaddref(struct jaddref *); 768 static void handle_written_jremref(struct jremref *); 769 static void handle_written_jseg(struct jseg *, struct buf *); 770 static void handle_written_jnewblk(struct jnewblk *); 771 static void handle_written_jblkdep(struct jblkdep *); 772 static void handle_written_jfreefrag(struct jfreefrag *); 773 static void complete_jseg(struct jseg *); 774 static void complete_jsegs(struct jseg *); 775 static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *); 776 static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *); 777 static void jremref_write(struct jremref *, struct jseg *, uint8_t *); 778 static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *); 779 static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *); 780 static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data); 781 static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *); 782 static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *); 783 static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *); 784 static inline void inoref_write(struct inoref *, struct jseg *, 785 struct jrefrec *); 786 static void handle_allocdirect_partdone(struct allocdirect *, 787 struct workhead *); 788 static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *, 789 struct workhead *); 790 static void indirdep_complete(struct indirdep *); 791 static int indirblk_lookup(struct mount *, ufs2_daddr_t); 792 static void indirblk_insert(struct freework *); 793 static void indirblk_remove(struct freework *); 794 static void handle_allocindir_partdone(struct allocindir *); 795 static void initiate_write_filepage(struct pagedep *, struct buf *); 796 static void initiate_write_indirdep(struct indirdep*, struct buf *); 797 static void handle_written_mkdir(struct mkdir *, int); 798 static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *, 799 uint8_t *); 800 static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *); 801 static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 802 static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 803 static void handle_workitem_freefile(struct freefile *); 804 static int handle_workitem_remove(struct dirrem *, int); 805 static struct dirrem *newdirrem(struct buf *, struct inode *, 806 struct inode *, int, struct dirrem **); 807 static struct indirdep *indirdep_lookup(struct mount *, struct inode *, 808 struct buf *); 809 static void cancel_indirdep(struct indirdep *, struct buf *, 810 struct freeblks *); 811 static void free_indirdep(struct indirdep *); 812 static void free_diradd(struct diradd *, struct workhead *); 813 static void merge_diradd(struct inodedep *, struct diradd *); 814 static void complete_diradd(struct diradd *); 815 static struct diradd *diradd_lookup(struct pagedep *, int); 816 static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *, 817 struct jremref *); 818 static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *, 819 struct jremref *); 820 static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *, 821 struct jremref *, struct jremref *); 822 static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *, 823 struct jremref *); 824 static void cancel_allocindir(struct allocindir *, struct buf *bp, 825 struct freeblks *, int); 826 static int setup_trunc_indir(struct freeblks *, struct inode *, 827 ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t); 828 static void complete_trunc_indir(struct freework *); 829 static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *, 830 int); 831 static void complete_mkdir(struct mkdir *); 832 static void free_newdirblk(struct newdirblk *); 833 static void free_jremref(struct jremref *); 834 static void free_jaddref(struct jaddref *); 835 static void free_jsegdep(struct jsegdep *); 836 static void free_jsegs(struct jblocks *); 837 static void rele_jseg(struct jseg *); 838 static void free_jseg(struct jseg *, struct jblocks *); 839 static void free_jnewblk(struct jnewblk *); 840 static void free_jblkdep(struct jblkdep *); 841 static void free_jfreefrag(struct jfreefrag *); 842 static void free_freedep(struct freedep *); 843 static void journal_jremref(struct dirrem *, struct jremref *, 844 struct inodedep *); 845 static void cancel_jnewblk(struct jnewblk *, struct workhead *); 846 static int cancel_jaddref(struct jaddref *, struct inodedep *, 847 struct workhead *); 848 static void cancel_jfreefrag(struct jfreefrag *); 849 static inline void setup_freedirect(struct freeblks *, struct inode *, 850 int, int); 851 static inline void setup_freeext(struct freeblks *, struct inode *, int, int); 852 static inline void setup_freeindir(struct freeblks *, struct inode *, int, 853 ufs_lbn_t, int); 854 static inline struct freeblks *newfreeblks(struct mount *, struct inode *); 855 static void freeblks_free(struct ufsmount *, struct freeblks *, int); 856 static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t); 857 static ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t); 858 static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int); 859 static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t, 860 int, int); 861 static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int); 862 static int cancel_pagedep(struct pagedep *, struct freeblks *, int); 863 static int deallocate_dependencies(struct buf *, struct freeblks *, int); 864 static void newblk_freefrag(struct newblk*); 865 static void free_newblk(struct newblk *); 866 static void cancel_allocdirect(struct allocdirectlst *, 867 struct allocdirect *, struct freeblks *); 868 static int check_inode_unwritten(struct inodedep *); 869 static int free_inodedep(struct inodedep *); 870 static void freework_freeblock(struct freework *, u_long); 871 static void freework_enqueue(struct freework *); 872 static int handle_workitem_freeblocks(struct freeblks *, int); 873 static int handle_complete_freeblocks(struct freeblks *, int); 874 static void handle_workitem_indirblk(struct freework *); 875 static void handle_written_freework(struct freework *); 876 static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 877 static struct worklist *jnewblk_merge(struct worklist *, struct worklist *, 878 struct workhead *); 879 static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *, 880 struct inodedep *, struct allocindir *, ufs_lbn_t); 881 static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 882 ufs2_daddr_t, ufs_lbn_t); 883 static void handle_workitem_freefrag(struct freefrag *); 884 static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long, 885 ufs_lbn_t, u_long); 886 static void allocdirect_merge(struct allocdirectlst *, 887 struct allocdirect *, struct allocdirect *); 888 static struct freefrag *allocindir_merge(struct allocindir *, 889 struct allocindir *); 890 static int bmsafemap_find(struct bmsafemap_hashhead *, int, 891 struct bmsafemap **); 892 static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *, 893 int cg, struct bmsafemap *); 894 static int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int, 895 struct newblk **); 896 static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **); 897 static int inodedep_find(struct inodedep_hashhead *, ino_t, 898 struct inodedep **); 899 static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **); 900 static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t, 901 int, struct pagedep **); 902 static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t, 903 struct pagedep **); 904 static void pause_timer(void *); 905 static int request_cleanup(struct mount *, int); 906 static int softdep_request_cleanup_flush(struct mount *, struct ufsmount *); 907 static void schedule_cleanup(struct mount *); 908 static void softdep_ast_cleanup_proc(struct thread *); 909 static struct ufsmount *softdep_bp_to_mp(struct buf *bp); 910 static int process_worklist_item(struct mount *, int, int); 911 static void process_removes(struct vnode *); 912 static void process_truncates(struct vnode *); 913 static void jwork_move(struct workhead *, struct workhead *); 914 static void jwork_insert(struct workhead *, struct jsegdep *); 915 static void add_to_worklist(struct worklist *, int); 916 static void wake_worklist(struct worklist *); 917 static void wait_worklist(struct worklist *, char *); 918 static void remove_from_worklist(struct worklist *); 919 static void softdep_flush(void *); 920 static void softdep_flushjournal(struct mount *); 921 static int softdep_speedup(struct ufsmount *); 922 static void worklist_speedup(struct mount *); 923 static int journal_mount(struct mount *, struct fs *, struct ucred *); 924 static void journal_unmount(struct ufsmount *); 925 static int journal_space(struct ufsmount *, int); 926 static void journal_suspend(struct ufsmount *); 927 static int journal_unsuspend(struct ufsmount *ump); 928 static void softdep_prelink(struct vnode *, struct vnode *); 929 static void add_to_journal(struct worklist *); 930 static void remove_from_journal(struct worklist *); 931 static bool softdep_excess_items(struct ufsmount *, int); 932 static void softdep_process_journal(struct mount *, struct worklist *, int); 933 static struct jremref *newjremref(struct dirrem *, struct inode *, 934 struct inode *ip, off_t, nlink_t); 935 static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t, 936 uint16_t); 937 static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t, 938 uint16_t); 939 static inline struct jsegdep *inoref_jseg(struct inoref *); 940 static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t); 941 static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t, 942 ufs2_daddr_t, int); 943 static void adjust_newfreework(struct freeblks *, int); 944 static struct jtrunc *newjtrunc(struct freeblks *, off_t, int); 945 static void move_newblock_dep(struct jaddref *, struct inodedep *); 946 static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t); 947 static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *, 948 ufs2_daddr_t, long, ufs_lbn_t); 949 static struct freework *newfreework(struct ufsmount *, struct freeblks *, 950 struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int); 951 static int jwait(struct worklist *, int); 952 static struct inodedep *inodedep_lookup_ip(struct inode *); 953 static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *); 954 static struct freefile *handle_bufwait(struct inodedep *, struct workhead *); 955 static void handle_jwork(struct workhead *); 956 static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *, 957 struct mkdir **); 958 static struct jblocks *jblocks_create(void); 959 static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *); 960 static void jblocks_free(struct jblocks *, struct mount *, int); 961 static void jblocks_destroy(struct jblocks *); 962 static void jblocks_add(struct jblocks *, ufs2_daddr_t, int); 963 964 /* 965 * Exported softdep operations. 966 */ 967 static void softdep_disk_io_initiation(struct buf *); 968 static void softdep_disk_write_complete(struct buf *); 969 static void softdep_deallocate_dependencies(struct buf *); 970 static int softdep_count_dependencies(struct buf *bp, int); 971 972 /* 973 * Global lock over all of soft updates. 974 */ 975 static struct mtx lk; 976 MTX_SYSINIT(softdep_lock, &lk, "global softdep", MTX_DEF); 977 978 #define ACQUIRE_GBLLOCK(lk) mtx_lock(lk) 979 #define FREE_GBLLOCK(lk) mtx_unlock(lk) 980 #define GBLLOCK_OWNED(lk) mtx_assert((lk), MA_OWNED) 981 982 /* 983 * Per-filesystem soft-updates locking. 984 */ 985 #define LOCK_PTR(ump) (&(ump)->um_softdep->sd_fslock) 986 #define TRY_ACQUIRE_LOCK(ump) rw_try_wlock(&(ump)->um_softdep->sd_fslock) 987 #define ACQUIRE_LOCK(ump) rw_wlock(&(ump)->um_softdep->sd_fslock) 988 #define FREE_LOCK(ump) rw_wunlock(&(ump)->um_softdep->sd_fslock) 989 #define LOCK_OWNED(ump) rw_assert(&(ump)->um_softdep->sd_fslock, \ 990 RA_WLOCKED) 991 992 #define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) 993 #define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) 994 995 /* 996 * Worklist queue management. 997 * These routines require that the lock be held. 998 */ 999 #ifndef /* NOT */ INVARIANTS 1000 #define WORKLIST_INSERT(head, item) do { \ 1001 (item)->wk_state |= ONWORKLIST; \ 1002 LIST_INSERT_HEAD(head, item, wk_list); \ 1003 } while (0) 1004 #define WORKLIST_REMOVE(item) do { \ 1005 (item)->wk_state &= ~ONWORKLIST; \ 1006 LIST_REMOVE(item, wk_list); \ 1007 } while (0) 1008 #define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT 1009 #define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE 1010 1011 #else /* INVARIANTS */ 1012 static void worklist_insert(struct workhead *, struct worklist *, int, 1013 const char *, int); 1014 static void worklist_remove(struct worklist *, int, const char *, int); 1015 1016 #define WORKLIST_INSERT(head, item) \ 1017 worklist_insert(head, item, 1, __func__, __LINE__) 1018 #define WORKLIST_INSERT_UNLOCKED(head, item)\ 1019 worklist_insert(head, item, 0, __func__, __LINE__) 1020 #define WORKLIST_REMOVE(item)\ 1021 worklist_remove(item, 1, __func__, __LINE__) 1022 #define WORKLIST_REMOVE_UNLOCKED(item)\ 1023 worklist_remove(item, 0, __func__, __LINE__) 1024 1025 static void 1026 worklist_insert(head, item, locked, func, line) 1027 struct workhead *head; 1028 struct worklist *item; 1029 int locked; 1030 const char *func; 1031 int line; 1032 { 1033 1034 if (locked) 1035 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1036 if (item->wk_state & ONWORKLIST) 1037 panic("worklist_insert: %p %s(0x%X) already on list, " 1038 "added in function %s at line %d", 1039 item, TYPENAME(item->wk_type), item->wk_state, 1040 item->wk_func, item->wk_line); 1041 item->wk_state |= ONWORKLIST; 1042 item->wk_func = func; 1043 item->wk_line = line; 1044 LIST_INSERT_HEAD(head, item, wk_list); 1045 } 1046 1047 static void 1048 worklist_remove(item, locked, func, line) 1049 struct worklist *item; 1050 int locked; 1051 const char *func; 1052 int line; 1053 { 1054 1055 if (locked) 1056 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1057 if ((item->wk_state & ONWORKLIST) == 0) 1058 panic("worklist_remove: %p %s(0x%X) not on list, " 1059 "removed in function %s at line %d", 1060 item, TYPENAME(item->wk_type), item->wk_state, 1061 item->wk_func, item->wk_line); 1062 item->wk_state &= ~ONWORKLIST; 1063 item->wk_func = func; 1064 item->wk_line = line; 1065 LIST_REMOVE(item, wk_list); 1066 } 1067 #endif /* INVARIANTS */ 1068 1069 /* 1070 * Merge two jsegdeps keeping only the oldest one as newer references 1071 * can't be discarded until after older references. 1072 */ 1073 static inline struct jsegdep * 1074 jsegdep_merge(struct jsegdep *one, struct jsegdep *two) 1075 { 1076 struct jsegdep *swp; 1077 1078 if (two == NULL) 1079 return (one); 1080 1081 if (one->jd_seg->js_seq > two->jd_seg->js_seq) { 1082 swp = one; 1083 one = two; 1084 two = swp; 1085 } 1086 WORKLIST_REMOVE(&two->jd_list); 1087 free_jsegdep(two); 1088 1089 return (one); 1090 } 1091 1092 /* 1093 * If two freedeps are compatible free one to reduce list size. 1094 */ 1095 static inline struct freedep * 1096 freedep_merge(struct freedep *one, struct freedep *two) 1097 { 1098 if (two == NULL) 1099 return (one); 1100 1101 if (one->fd_freework == two->fd_freework) { 1102 WORKLIST_REMOVE(&two->fd_list); 1103 free_freedep(two); 1104 } 1105 return (one); 1106 } 1107 1108 /* 1109 * Move journal work from one list to another. Duplicate freedeps and 1110 * jsegdeps are coalesced to keep the lists as small as possible. 1111 */ 1112 static void 1113 jwork_move(dst, src) 1114 struct workhead *dst; 1115 struct workhead *src; 1116 { 1117 struct freedep *freedep; 1118 struct jsegdep *jsegdep; 1119 struct worklist *wkn; 1120 struct worklist *wk; 1121 1122 KASSERT(dst != src, 1123 ("jwork_move: dst == src")); 1124 freedep = NULL; 1125 jsegdep = NULL; 1126 LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) { 1127 if (wk->wk_type == D_JSEGDEP) 1128 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1129 else if (wk->wk_type == D_FREEDEP) 1130 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1131 } 1132 1133 while ((wk = LIST_FIRST(src)) != NULL) { 1134 WORKLIST_REMOVE(wk); 1135 WORKLIST_INSERT(dst, wk); 1136 if (wk->wk_type == D_JSEGDEP) { 1137 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1138 continue; 1139 } 1140 if (wk->wk_type == D_FREEDEP) 1141 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1142 } 1143 } 1144 1145 static void 1146 jwork_insert(dst, jsegdep) 1147 struct workhead *dst; 1148 struct jsegdep *jsegdep; 1149 { 1150 struct jsegdep *jsegdepn; 1151 struct worklist *wk; 1152 1153 LIST_FOREACH(wk, dst, wk_list) 1154 if (wk->wk_type == D_JSEGDEP) 1155 break; 1156 if (wk == NULL) { 1157 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1158 return; 1159 } 1160 jsegdepn = WK_JSEGDEP(wk); 1161 if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) { 1162 WORKLIST_REMOVE(wk); 1163 free_jsegdep(jsegdepn); 1164 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1165 } else 1166 free_jsegdep(jsegdep); 1167 } 1168 1169 /* 1170 * Routines for tracking and managing workitems. 1171 */ 1172 static void workitem_free(struct worklist *, int); 1173 static void workitem_alloc(struct worklist *, int, struct mount *); 1174 static void workitem_reassign(struct worklist *, int); 1175 1176 #define WORKITEM_FREE(item, type) \ 1177 workitem_free((struct worklist *)(item), (type)) 1178 #define WORKITEM_REASSIGN(item, type) \ 1179 workitem_reassign((struct worklist *)(item), (type)) 1180 1181 static void 1182 workitem_free(item, type) 1183 struct worklist *item; 1184 int type; 1185 { 1186 struct ufsmount *ump; 1187 1188 #ifdef INVARIANTS 1189 if (item->wk_state & ONWORKLIST) 1190 panic("workitem_free: %s(0x%X) still on list, " 1191 "added in function %s at line %d", 1192 TYPENAME(item->wk_type), item->wk_state, 1193 item->wk_func, item->wk_line); 1194 if (item->wk_type != type && type != D_NEWBLK) 1195 panic("workitem_free: type mismatch %s != %s", 1196 TYPENAME(item->wk_type), TYPENAME(type)); 1197 #endif 1198 if (item->wk_state & IOWAITING) 1199 wakeup(item); 1200 ump = VFSTOUFS(item->wk_mp); 1201 LOCK_OWNED(ump); 1202 KASSERT(ump->softdep_deps > 0, 1203 ("workitem_free: %s: softdep_deps going negative", 1204 ump->um_fs->fs_fsmnt)); 1205 if (--ump->softdep_deps == 0 && ump->softdep_req) 1206 wakeup(&ump->softdep_deps); 1207 KASSERT(dep_current[item->wk_type] > 0, 1208 ("workitem_free: %s: dep_current[%s] going negative", 1209 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1210 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1211 ("workitem_free: %s: softdep_curdeps[%s] going negative", 1212 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1213 atomic_subtract_long(&dep_current[item->wk_type], 1); 1214 ump->softdep_curdeps[item->wk_type] -= 1; 1215 #ifdef INVARIANTS 1216 LIST_REMOVE(item, wk_all); 1217 #endif 1218 free(item, DtoM(type)); 1219 } 1220 1221 static void 1222 workitem_alloc(item, type, mp) 1223 struct worklist *item; 1224 int type; 1225 struct mount *mp; 1226 { 1227 struct ufsmount *ump; 1228 1229 item->wk_type = type; 1230 item->wk_mp = mp; 1231 item->wk_state = 0; 1232 1233 ump = VFSTOUFS(mp); 1234 ACQUIRE_GBLLOCK(&lk); 1235 dep_current[type]++; 1236 if (dep_current[type] > dep_highuse[type]) 1237 dep_highuse[type] = dep_current[type]; 1238 dep_total[type]++; 1239 FREE_GBLLOCK(&lk); 1240 ACQUIRE_LOCK(ump); 1241 ump->softdep_curdeps[type] += 1; 1242 ump->softdep_deps++; 1243 ump->softdep_accdeps++; 1244 #ifdef INVARIANTS 1245 LIST_INSERT_HEAD(&ump->softdep_alldeps[type], item, wk_all); 1246 #endif 1247 FREE_LOCK(ump); 1248 } 1249 1250 static void 1251 workitem_reassign(item, newtype) 1252 struct worklist *item; 1253 int newtype; 1254 { 1255 struct ufsmount *ump; 1256 1257 ump = VFSTOUFS(item->wk_mp); 1258 LOCK_OWNED(ump); 1259 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1260 ("workitem_reassign: %s: softdep_curdeps[%s] going negative", 1261 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1262 ump->softdep_curdeps[item->wk_type] -= 1; 1263 ump->softdep_curdeps[newtype] += 1; 1264 KASSERT(dep_current[item->wk_type] > 0, 1265 ("workitem_reassign: %s: dep_current[%s] going negative", 1266 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1267 ACQUIRE_GBLLOCK(&lk); 1268 dep_current[newtype]++; 1269 dep_current[item->wk_type]--; 1270 if (dep_current[newtype] > dep_highuse[newtype]) 1271 dep_highuse[newtype] = dep_current[newtype]; 1272 dep_total[newtype]++; 1273 FREE_GBLLOCK(&lk); 1274 item->wk_type = newtype; 1275 } 1276 1277 /* 1278 * Workitem queue management 1279 */ 1280 static int max_softdeps; /* maximum number of structs before slowdown */ 1281 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 1282 static int proc_waiting; /* tracks whether we have a timeout posted */ 1283 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 1284 static struct callout softdep_callout; 1285 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 1286 static int req_clear_remove; /* syncer process flush some freeblks */ 1287 static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */ 1288 1289 /* 1290 * runtime statistics 1291 */ 1292 static int stat_flush_threads; /* number of softdep flushing threads */ 1293 static int stat_worklist_push; /* number of worklist cleanups */ 1294 static int stat_blk_limit_push; /* number of times block limit neared */ 1295 static int stat_ino_limit_push; /* number of times inode limit neared */ 1296 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 1297 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 1298 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 1299 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 1300 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 1301 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 1302 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 1303 static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */ 1304 static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */ 1305 static int stat_journal_min; /* Times hit journal min threshold */ 1306 static int stat_journal_low; /* Times hit journal low threshold */ 1307 static int stat_journal_wait; /* Times blocked in jwait(). */ 1308 static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */ 1309 static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */ 1310 static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */ 1311 static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */ 1312 static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */ 1313 static int stat_cleanup_blkrequests; /* Number of block cleanup requests */ 1314 static int stat_cleanup_inorequests; /* Number of inode cleanup requests */ 1315 static int stat_cleanup_retries; /* Number of cleanups that needed to flush */ 1316 static int stat_cleanup_failures; /* Number of cleanup requests that failed */ 1317 static int stat_emptyjblocks; /* Number of potentially empty journal blocks */ 1318 1319 SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW, 1320 &max_softdeps, 0, ""); 1321 SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW, 1322 &tickdelay, 0, ""); 1323 SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD, 1324 &stat_flush_threads, 0, ""); 1325 SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, 1326 CTLFLAG_RW | CTLFLAG_STATS, &stat_worklist_push, 0,""); 1327 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, 1328 CTLFLAG_RW | CTLFLAG_STATS, &stat_blk_limit_push, 0,""); 1329 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, 1330 CTLFLAG_RW | CTLFLAG_STATS, &stat_ino_limit_push, 0,""); 1331 SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, 1332 CTLFLAG_RW | CTLFLAG_STATS, &stat_blk_limit_hit, 0, ""); 1333 SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, 1334 CTLFLAG_RW | CTLFLAG_STATS, &stat_ino_limit_hit, 0, ""); 1335 SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, 1336 CTLFLAG_RW | CTLFLAG_STATS, &stat_sync_limit_hit, 0, ""); 1337 SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, 1338 CTLFLAG_RW | CTLFLAG_STATS, &stat_indir_blk_ptrs, 0, ""); 1339 SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, 1340 CTLFLAG_RW | CTLFLAG_STATS, &stat_inode_bitmap, 0, ""); 1341 SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, 1342 CTLFLAG_RW | CTLFLAG_STATS, &stat_direct_blk_ptrs, 0, ""); 1343 SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, 1344 CTLFLAG_RW | CTLFLAG_STATS, &stat_dir_entry, 0, ""); 1345 SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, 1346 CTLFLAG_RW | CTLFLAG_STATS, &stat_jaddref, 0, ""); 1347 SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, 1348 CTLFLAG_RW | CTLFLAG_STATS, &stat_jnewblk, 0, ""); 1349 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, 1350 CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_low, 0, ""); 1351 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, 1352 CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_min, 0, ""); 1353 SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, 1354 CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_wait, 0, ""); 1355 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, 1356 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_filepage, 0, ""); 1357 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, 1358 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_freeblks, 0, ""); 1359 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, 1360 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_inode, 0, ""); 1361 SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, 1362 CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_newblk, 0, ""); 1363 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, 1364 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_blkrequests, 0, ""); 1365 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, 1366 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_inorequests, 0, ""); 1367 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, 1368 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_high_delay, 0, ""); 1369 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, 1370 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_retries, 0, ""); 1371 SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, 1372 CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_failures, 0, ""); 1373 1374 SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW, 1375 &softdep_flushcache, 0, ""); 1376 SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD, 1377 &stat_emptyjblocks, 0, ""); 1378 1379 SYSCTL_DECL(_vfs_ffs); 1380 1381 /* Whether to recompute the summary at mount time */ 1382 static int compute_summary_at_mount = 0; 1383 SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW, 1384 &compute_summary_at_mount, 0, "Recompute summary at mount"); 1385 static int print_threads = 0; 1386 SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW, 1387 &print_threads, 0, "Notify flusher thread start/stop"); 1388 1389 /* List of all filesystems mounted with soft updates */ 1390 static TAILQ_HEAD(, mount_softdeps) softdepmounts; 1391 1392 /* 1393 * This function cleans the worklist for a filesystem. 1394 * Each filesystem running with soft dependencies gets its own 1395 * thread to run in this function. The thread is started up in 1396 * softdep_mount and shutdown in softdep_unmount. They show up 1397 * as part of the kernel "bufdaemon" process whose process 1398 * entry is available in bufdaemonproc. 1399 */ 1400 static int searchfailed; 1401 extern struct proc *bufdaemonproc; 1402 static void 1403 softdep_flush(addr) 1404 void *addr; 1405 { 1406 struct mount *mp; 1407 struct thread *td; 1408 struct ufsmount *ump; 1409 1410 td = curthread; 1411 td->td_pflags |= TDP_NORUNNINGBUF; 1412 mp = (struct mount *)addr; 1413 ump = VFSTOUFS(mp); 1414 atomic_add_int(&stat_flush_threads, 1); 1415 ACQUIRE_LOCK(ump); 1416 ump->softdep_flags &= ~FLUSH_STARTING; 1417 wakeup(&ump->softdep_flushtd); 1418 FREE_LOCK(ump); 1419 if (print_threads) { 1420 if (stat_flush_threads == 1) 1421 printf("Running %s at pid %d\n", bufdaemonproc->p_comm, 1422 bufdaemonproc->p_pid); 1423 printf("Start thread %s\n", td->td_name); 1424 } 1425 for (;;) { 1426 while (softdep_process_worklist(mp, 0) > 0 || 1427 (MOUNTEDSUJ(mp) && 1428 VFSTOUFS(mp)->softdep_jblocks->jb_suspended)) 1429 kthread_suspend_check(); 1430 ACQUIRE_LOCK(ump); 1431 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1432 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, 1433 "sdflush", hz / 2); 1434 ump->softdep_flags &= ~FLUSH_CLEANUP; 1435 /* 1436 * Check to see if we are done and need to exit. 1437 */ 1438 if ((ump->softdep_flags & FLUSH_EXIT) == 0) { 1439 FREE_LOCK(ump); 1440 continue; 1441 } 1442 ump->softdep_flags &= ~FLUSH_EXIT; 1443 FREE_LOCK(ump); 1444 wakeup(&ump->softdep_flags); 1445 if (print_threads) 1446 printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups); 1447 atomic_subtract_int(&stat_flush_threads, 1); 1448 kthread_exit(); 1449 panic("kthread_exit failed\n"); 1450 } 1451 } 1452 1453 static void 1454 worklist_speedup(mp) 1455 struct mount *mp; 1456 { 1457 struct ufsmount *ump; 1458 1459 ump = VFSTOUFS(mp); 1460 LOCK_OWNED(ump); 1461 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1462 ump->softdep_flags |= FLUSH_CLEANUP; 1463 wakeup(&ump->softdep_flushtd); 1464 } 1465 1466 static void 1467 softdep_send_speedup(struct ufsmount *ump, size_t shortage, u_int flags) 1468 { 1469 struct buf *bp; 1470 1471 if ((ump->um_flags & UM_CANSPEEDUP) == 0) 1472 return; 1473 1474 bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO); 1475 bp->b_iocmd = BIO_SPEEDUP; 1476 bp->b_ioflags = flags; 1477 bp->b_bcount = shortage; 1478 g_vfs_strategy(ump->um_bo, bp); 1479 bufwait(bp); 1480 free(bp, M_TRIM); 1481 } 1482 1483 static int 1484 softdep_speedup(ump) 1485 struct ufsmount *ump; 1486 { 1487 struct ufsmount *altump; 1488 struct mount_softdeps *sdp; 1489 1490 LOCK_OWNED(ump); 1491 worklist_speedup(ump->um_mountp); 1492 bd_speedup(); 1493 /* 1494 * If we have global shortages, then we need other 1495 * filesystems to help with the cleanup. Here we wakeup a 1496 * flusher thread for a filesystem that is over its fair 1497 * share of resources. 1498 */ 1499 if (req_clear_inodedeps || req_clear_remove) { 1500 ACQUIRE_GBLLOCK(&lk); 1501 TAILQ_FOREACH(sdp, &softdepmounts, sd_next) { 1502 if ((altump = sdp->sd_ump) == ump) 1503 continue; 1504 if (((req_clear_inodedeps && 1505 altump->softdep_curdeps[D_INODEDEP] > 1506 max_softdeps / stat_flush_threads) || 1507 (req_clear_remove && 1508 altump->softdep_curdeps[D_DIRREM] > 1509 (max_softdeps / 2) / stat_flush_threads)) && 1510 TRY_ACQUIRE_LOCK(altump)) 1511 break; 1512 } 1513 if (sdp == NULL) { 1514 searchfailed++; 1515 FREE_GBLLOCK(&lk); 1516 } else { 1517 /* 1518 * Move to the end of the list so we pick a 1519 * different one on out next try. 1520 */ 1521 TAILQ_REMOVE(&softdepmounts, sdp, sd_next); 1522 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); 1523 FREE_GBLLOCK(&lk); 1524 if ((altump->softdep_flags & 1525 (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1526 altump->softdep_flags |= FLUSH_CLEANUP; 1527 altump->um_softdep->sd_cleanups++; 1528 wakeup(&altump->softdep_flushtd); 1529 FREE_LOCK(altump); 1530 } 1531 } 1532 return (speedup_syncer()); 1533 } 1534 1535 /* 1536 * Add an item to the end of the work queue. 1537 * This routine requires that the lock be held. 1538 * This is the only routine that adds items to the list. 1539 * The following routine is the only one that removes items 1540 * and does so in order from first to last. 1541 */ 1542 1543 #define WK_HEAD 0x0001 /* Add to HEAD. */ 1544 #define WK_NODELAY 0x0002 /* Process immediately. */ 1545 1546 static void 1547 add_to_worklist(wk, flags) 1548 struct worklist *wk; 1549 int flags; 1550 { 1551 struct ufsmount *ump; 1552 1553 ump = VFSTOUFS(wk->wk_mp); 1554 LOCK_OWNED(ump); 1555 if (wk->wk_state & ONWORKLIST) 1556 panic("add_to_worklist: %s(0x%X) already on list", 1557 TYPENAME(wk->wk_type), wk->wk_state); 1558 wk->wk_state |= ONWORKLIST; 1559 if (ump->softdep_on_worklist == 0) { 1560 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1561 ump->softdep_worklist_tail = wk; 1562 } else if (flags & WK_HEAD) { 1563 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1564 } else { 1565 LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list); 1566 ump->softdep_worklist_tail = wk; 1567 } 1568 ump->softdep_on_worklist += 1; 1569 if (flags & WK_NODELAY) 1570 worklist_speedup(wk->wk_mp); 1571 } 1572 1573 /* 1574 * Remove the item to be processed. If we are removing the last 1575 * item on the list, we need to recalculate the tail pointer. 1576 */ 1577 static void 1578 remove_from_worklist(wk) 1579 struct worklist *wk; 1580 { 1581 struct ufsmount *ump; 1582 1583 ump = VFSTOUFS(wk->wk_mp); 1584 if (ump->softdep_worklist_tail == wk) 1585 ump->softdep_worklist_tail = 1586 (struct worklist *)wk->wk_list.le_prev; 1587 WORKLIST_REMOVE(wk); 1588 ump->softdep_on_worklist -= 1; 1589 } 1590 1591 static void 1592 wake_worklist(wk) 1593 struct worklist *wk; 1594 { 1595 if (wk->wk_state & IOWAITING) { 1596 wk->wk_state &= ~IOWAITING; 1597 wakeup(wk); 1598 } 1599 } 1600 1601 static void 1602 wait_worklist(wk, wmesg) 1603 struct worklist *wk; 1604 char *wmesg; 1605 { 1606 struct ufsmount *ump; 1607 1608 ump = VFSTOUFS(wk->wk_mp); 1609 wk->wk_state |= IOWAITING; 1610 msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0); 1611 } 1612 1613 /* 1614 * Process that runs once per second to handle items in the background queue. 1615 * 1616 * Note that we ensure that everything is done in the order in which they 1617 * appear in the queue. The code below depends on this property to ensure 1618 * that blocks of a file are freed before the inode itself is freed. This 1619 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 1620 * until all the old ones have been purged from the dependency lists. 1621 */ 1622 static int 1623 softdep_process_worklist(mp, full) 1624 struct mount *mp; 1625 int full; 1626 { 1627 int cnt, matchcnt; 1628 struct ufsmount *ump; 1629 long starttime; 1630 1631 KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp")); 1632 if (MOUNTEDSOFTDEP(mp) == 0) 1633 return (0); 1634 matchcnt = 0; 1635 ump = VFSTOUFS(mp); 1636 ACQUIRE_LOCK(ump); 1637 starttime = time_second; 1638 softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0); 1639 check_clear_deps(mp); 1640 while (ump->softdep_on_worklist > 0) { 1641 if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0) 1642 break; 1643 else 1644 matchcnt += cnt; 1645 check_clear_deps(mp); 1646 /* 1647 * We do not generally want to stop for buffer space, but if 1648 * we are really being a buffer hog, we will stop and wait. 1649 */ 1650 if (should_yield()) { 1651 FREE_LOCK(ump); 1652 kern_yield(PRI_USER); 1653 bwillwrite(); 1654 ACQUIRE_LOCK(ump); 1655 } 1656 /* 1657 * Never allow processing to run for more than one 1658 * second. This gives the syncer thread the opportunity 1659 * to pause if appropriate. 1660 */ 1661 if (!full && starttime != time_second) 1662 break; 1663 } 1664 if (full == 0) 1665 journal_unsuspend(ump); 1666 FREE_LOCK(ump); 1667 return (matchcnt); 1668 } 1669 1670 /* 1671 * Process all removes associated with a vnode if we are running out of 1672 * journal space. Any other process which attempts to flush these will 1673 * be unable as we have the vnodes locked. 1674 */ 1675 static void 1676 process_removes(vp) 1677 struct vnode *vp; 1678 { 1679 struct inodedep *inodedep; 1680 struct dirrem *dirrem; 1681 struct ufsmount *ump; 1682 struct mount *mp; 1683 ino_t inum; 1684 1685 mp = vp->v_mount; 1686 ump = VFSTOUFS(mp); 1687 LOCK_OWNED(ump); 1688 inum = VTOI(vp)->i_number; 1689 for (;;) { 1690 top: 1691 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1692 return; 1693 LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) { 1694 /* 1695 * If another thread is trying to lock this vnode 1696 * it will fail but we must wait for it to do so 1697 * before we can proceed. 1698 */ 1699 if (dirrem->dm_state & INPROGRESS) { 1700 wait_worklist(&dirrem->dm_list, "pwrwait"); 1701 goto top; 1702 } 1703 if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) == 1704 (COMPLETE | ONWORKLIST)) 1705 break; 1706 } 1707 if (dirrem == NULL) 1708 return; 1709 remove_from_worklist(&dirrem->dm_list); 1710 FREE_LOCK(ump); 1711 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1712 panic("process_removes: suspended filesystem"); 1713 handle_workitem_remove(dirrem, 0); 1714 vn_finished_secondary_write(mp); 1715 ACQUIRE_LOCK(ump); 1716 } 1717 } 1718 1719 /* 1720 * Process all truncations associated with a vnode if we are running out 1721 * of journal space. This is called when the vnode lock is already held 1722 * and no other process can clear the truncation. This function returns 1723 * a value greater than zero if it did any work. 1724 */ 1725 static void 1726 process_truncates(vp) 1727 struct vnode *vp; 1728 { 1729 struct inodedep *inodedep; 1730 struct freeblks *freeblks; 1731 struct ufsmount *ump; 1732 struct mount *mp; 1733 ino_t inum; 1734 int cgwait; 1735 1736 mp = vp->v_mount; 1737 ump = VFSTOUFS(mp); 1738 LOCK_OWNED(ump); 1739 inum = VTOI(vp)->i_number; 1740 for (;;) { 1741 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1742 return; 1743 cgwait = 0; 1744 TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) { 1745 /* Journal entries not yet written. */ 1746 if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) { 1747 jwait(&LIST_FIRST( 1748 &freeblks->fb_jblkdephd)->jb_list, 1749 MNT_WAIT); 1750 break; 1751 } 1752 /* Another thread is executing this item. */ 1753 if (freeblks->fb_state & INPROGRESS) { 1754 wait_worklist(&freeblks->fb_list, "ptrwait"); 1755 break; 1756 } 1757 /* Freeblks is waiting on a inode write. */ 1758 if ((freeblks->fb_state & COMPLETE) == 0) { 1759 FREE_LOCK(ump); 1760 ffs_update(vp, 1); 1761 ACQUIRE_LOCK(ump); 1762 break; 1763 } 1764 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) == 1765 (ALLCOMPLETE | ONWORKLIST)) { 1766 remove_from_worklist(&freeblks->fb_list); 1767 freeblks->fb_state |= INPROGRESS; 1768 FREE_LOCK(ump); 1769 if (vn_start_secondary_write(NULL, &mp, 1770 V_NOWAIT)) 1771 panic("process_truncates: " 1772 "suspended filesystem"); 1773 handle_workitem_freeblocks(freeblks, 0); 1774 vn_finished_secondary_write(mp); 1775 ACQUIRE_LOCK(ump); 1776 break; 1777 } 1778 if (freeblks->fb_cgwait) 1779 cgwait++; 1780 } 1781 if (cgwait) { 1782 FREE_LOCK(ump); 1783 sync_cgs(mp, MNT_WAIT); 1784 ffs_sync_snap(mp, MNT_WAIT); 1785 ACQUIRE_LOCK(ump); 1786 continue; 1787 } 1788 if (freeblks == NULL) 1789 break; 1790 } 1791 return; 1792 } 1793 1794 /* 1795 * Process one item on the worklist. 1796 */ 1797 static int 1798 process_worklist_item(mp, target, flags) 1799 struct mount *mp; 1800 int target; 1801 int flags; 1802 { 1803 struct worklist sentinel; 1804 struct worklist *wk; 1805 struct ufsmount *ump; 1806 int matchcnt; 1807 int error; 1808 1809 KASSERT(mp != NULL, ("process_worklist_item: NULL mp")); 1810 /* 1811 * If we are being called because of a process doing a 1812 * copy-on-write, then it is not safe to write as we may 1813 * recurse into the copy-on-write routine. 1814 */ 1815 if (curthread->td_pflags & TDP_COWINPROGRESS) 1816 return (-1); 1817 PHOLD(curproc); /* Don't let the stack go away. */ 1818 ump = VFSTOUFS(mp); 1819 LOCK_OWNED(ump); 1820 matchcnt = 0; 1821 sentinel.wk_mp = NULL; 1822 sentinel.wk_type = D_SENTINEL; 1823 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list); 1824 for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL; 1825 wk = LIST_NEXT(&sentinel, wk_list)) { 1826 if (wk->wk_type == D_SENTINEL) { 1827 LIST_REMOVE(&sentinel, wk_list); 1828 LIST_INSERT_AFTER(wk, &sentinel, wk_list); 1829 continue; 1830 } 1831 if (wk->wk_state & INPROGRESS) 1832 panic("process_worklist_item: %p already in progress.", 1833 wk); 1834 wk->wk_state |= INPROGRESS; 1835 remove_from_worklist(wk); 1836 FREE_LOCK(ump); 1837 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1838 panic("process_worklist_item: suspended filesystem"); 1839 switch (wk->wk_type) { 1840 case D_DIRREM: 1841 /* removal of a directory entry */ 1842 error = handle_workitem_remove(WK_DIRREM(wk), flags); 1843 break; 1844 1845 case D_FREEBLKS: 1846 /* releasing blocks and/or fragments from a file */ 1847 error = handle_workitem_freeblocks(WK_FREEBLKS(wk), 1848 flags); 1849 break; 1850 1851 case D_FREEFRAG: 1852 /* releasing a fragment when replaced as a file grows */ 1853 handle_workitem_freefrag(WK_FREEFRAG(wk)); 1854 error = 0; 1855 break; 1856 1857 case D_FREEFILE: 1858 /* releasing an inode when its link count drops to 0 */ 1859 handle_workitem_freefile(WK_FREEFILE(wk)); 1860 error = 0; 1861 break; 1862 1863 default: 1864 panic("%s_process_worklist: Unknown type %s", 1865 "softdep", TYPENAME(wk->wk_type)); 1866 /* NOTREACHED */ 1867 } 1868 vn_finished_secondary_write(mp); 1869 ACQUIRE_LOCK(ump); 1870 if (error == 0) { 1871 if (++matchcnt == target) 1872 break; 1873 continue; 1874 } 1875 /* 1876 * We have to retry the worklist item later. Wake up any 1877 * waiters who may be able to complete it immediately and 1878 * add the item back to the head so we don't try to execute 1879 * it again. 1880 */ 1881 wk->wk_state &= ~INPROGRESS; 1882 wake_worklist(wk); 1883 add_to_worklist(wk, WK_HEAD); 1884 } 1885 /* Sentinal could've become the tail from remove_from_worklist. */ 1886 if (ump->softdep_worklist_tail == &sentinel) 1887 ump->softdep_worklist_tail = 1888 (struct worklist *)sentinel.wk_list.le_prev; 1889 LIST_REMOVE(&sentinel, wk_list); 1890 PRELE(curproc); 1891 return (matchcnt); 1892 } 1893 1894 /* 1895 * Move dependencies from one buffer to another. 1896 */ 1897 int 1898 softdep_move_dependencies(oldbp, newbp) 1899 struct buf *oldbp; 1900 struct buf *newbp; 1901 { 1902 struct worklist *wk, *wktail; 1903 struct ufsmount *ump; 1904 int dirty; 1905 1906 if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL) 1907 return (0); 1908 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 1909 ("softdep_move_dependencies called on non-softdep filesystem")); 1910 dirty = 0; 1911 wktail = NULL; 1912 ump = VFSTOUFS(wk->wk_mp); 1913 ACQUIRE_LOCK(ump); 1914 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 1915 LIST_REMOVE(wk, wk_list); 1916 if (wk->wk_type == D_BMSAFEMAP && 1917 bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp)) 1918 dirty = 1; 1919 if (wktail == NULL) 1920 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 1921 else 1922 LIST_INSERT_AFTER(wktail, wk, wk_list); 1923 wktail = wk; 1924 } 1925 FREE_LOCK(ump); 1926 1927 return (dirty); 1928 } 1929 1930 /* 1931 * Purge the work list of all items associated with a particular mount point. 1932 */ 1933 int 1934 softdep_flushworklist(oldmnt, countp, td) 1935 struct mount *oldmnt; 1936 int *countp; 1937 struct thread *td; 1938 { 1939 struct vnode *devvp; 1940 struct ufsmount *ump; 1941 int count, error; 1942 1943 /* 1944 * Alternately flush the block device associated with the mount 1945 * point and process any dependencies that the flushing 1946 * creates. We continue until no more worklist dependencies 1947 * are found. 1948 */ 1949 *countp = 0; 1950 error = 0; 1951 ump = VFSTOUFS(oldmnt); 1952 devvp = ump->um_devvp; 1953 while ((count = softdep_process_worklist(oldmnt, 1)) > 0) { 1954 *countp += count; 1955 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1956 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1957 VOP_UNLOCK(devvp); 1958 if (error != 0) 1959 break; 1960 } 1961 return (error); 1962 } 1963 1964 #define SU_WAITIDLE_RETRIES 20 1965 static int 1966 softdep_waitidle(struct mount *mp, int flags __unused) 1967 { 1968 struct ufsmount *ump; 1969 struct vnode *devvp; 1970 struct thread *td; 1971 int error, i; 1972 1973 ump = VFSTOUFS(mp); 1974 devvp = ump->um_devvp; 1975 td = curthread; 1976 error = 0; 1977 ACQUIRE_LOCK(ump); 1978 for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) { 1979 ump->softdep_req = 1; 1980 KASSERT((flags & FORCECLOSE) == 0 || 1981 ump->softdep_on_worklist == 0, 1982 ("softdep_waitidle: work added after flush")); 1983 msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP, 1984 "softdeps", 10 * hz); 1985 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1986 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1987 VOP_UNLOCK(devvp); 1988 ACQUIRE_LOCK(ump); 1989 if (error != 0) 1990 break; 1991 } 1992 ump->softdep_req = 0; 1993 if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) { 1994 error = EBUSY; 1995 printf("softdep_waitidle: Failed to flush worklist for %p\n", 1996 mp); 1997 } 1998 FREE_LOCK(ump); 1999 return (error); 2000 } 2001 2002 /* 2003 * Flush all vnodes and worklist items associated with a specified mount point. 2004 */ 2005 int 2006 softdep_flushfiles(oldmnt, flags, td) 2007 struct mount *oldmnt; 2008 int flags; 2009 struct thread *td; 2010 { 2011 #ifdef QUOTA 2012 struct ufsmount *ump; 2013 int i; 2014 #endif 2015 int error, early, depcount, loopcnt, retry_flush_count, retry; 2016 int morework; 2017 2018 KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0, 2019 ("softdep_flushfiles called on non-softdep filesystem")); 2020 loopcnt = 10; 2021 retry_flush_count = 3; 2022 retry_flush: 2023 error = 0; 2024 2025 /* 2026 * Alternately flush the vnodes associated with the mount 2027 * point and process any dependencies that the flushing 2028 * creates. In theory, this loop can happen at most twice, 2029 * but we give it a few extra just to be sure. 2030 */ 2031 for (; loopcnt > 0; loopcnt--) { 2032 /* 2033 * Do another flush in case any vnodes were brought in 2034 * as part of the cleanup operations. 2035 */ 2036 early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag & 2037 MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH; 2038 if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0) 2039 break; 2040 if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 || 2041 depcount == 0) 2042 break; 2043 } 2044 /* 2045 * If we are unmounting then it is an error to fail. If we 2046 * are simply trying to downgrade to read-only, then filesystem 2047 * activity can keep us busy forever, so we just fail with EBUSY. 2048 */ 2049 if (loopcnt == 0) { 2050 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 2051 panic("softdep_flushfiles: looping"); 2052 error = EBUSY; 2053 } 2054 if (!error) 2055 error = softdep_waitidle(oldmnt, flags); 2056 if (!error) { 2057 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) { 2058 retry = 0; 2059 MNT_ILOCK(oldmnt); 2060 morework = oldmnt->mnt_nvnodelistsize > 0; 2061 #ifdef QUOTA 2062 ump = VFSTOUFS(oldmnt); 2063 UFS_LOCK(ump); 2064 for (i = 0; i < MAXQUOTAS; i++) { 2065 if (ump->um_quotas[i] != NULLVP) 2066 morework = 1; 2067 } 2068 UFS_UNLOCK(ump); 2069 #endif 2070 if (morework) { 2071 if (--retry_flush_count > 0) { 2072 retry = 1; 2073 loopcnt = 3; 2074 } else 2075 error = EBUSY; 2076 } 2077 MNT_IUNLOCK(oldmnt); 2078 if (retry) 2079 goto retry_flush; 2080 } 2081 } 2082 return (error); 2083 } 2084 2085 /* 2086 * Structure hashing. 2087 * 2088 * There are four types of structures that can be looked up: 2089 * 1) pagedep structures identified by mount point, inode number, 2090 * and logical block. 2091 * 2) inodedep structures identified by mount point and inode number. 2092 * 3) newblk structures identified by mount point and 2093 * physical block number. 2094 * 4) bmsafemap structures identified by mount point and 2095 * cylinder group number. 2096 * 2097 * The "pagedep" and "inodedep" dependency structures are hashed 2098 * separately from the file blocks and inodes to which they correspond. 2099 * This separation helps when the in-memory copy of an inode or 2100 * file block must be replaced. It also obviates the need to access 2101 * an inode or file page when simply updating (or de-allocating) 2102 * dependency structures. Lookup of newblk structures is needed to 2103 * find newly allocated blocks when trying to associate them with 2104 * their allocdirect or allocindir structure. 2105 * 2106 * The lookup routines optionally create and hash a new instance when 2107 * an existing entry is not found. The bmsafemap lookup routine always 2108 * allocates a new structure if an existing one is not found. 2109 */ 2110 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 2111 2112 /* 2113 * Structures and routines associated with pagedep caching. 2114 */ 2115 #define PAGEDEP_HASH(ump, inum, lbn) \ 2116 (&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size]) 2117 2118 static int 2119 pagedep_find(pagedephd, ino, lbn, pagedeppp) 2120 struct pagedep_hashhead *pagedephd; 2121 ino_t ino; 2122 ufs_lbn_t lbn; 2123 struct pagedep **pagedeppp; 2124 { 2125 struct pagedep *pagedep; 2126 2127 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 2128 if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) { 2129 *pagedeppp = pagedep; 2130 return (1); 2131 } 2132 } 2133 *pagedeppp = NULL; 2134 return (0); 2135 } 2136 /* 2137 * Look up a pagedep. Return 1 if found, 0 otherwise. 2138 * If not found, allocate if DEPALLOC flag is passed. 2139 * Found or allocated entry is returned in pagedeppp. 2140 */ 2141 static int 2142 pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp) 2143 struct mount *mp; 2144 struct buf *bp; 2145 ino_t ino; 2146 ufs_lbn_t lbn; 2147 int flags; 2148 struct pagedep **pagedeppp; 2149 { 2150 struct pagedep *pagedep; 2151 struct pagedep_hashhead *pagedephd; 2152 struct worklist *wk; 2153 struct ufsmount *ump; 2154 int ret; 2155 int i; 2156 2157 ump = VFSTOUFS(mp); 2158 LOCK_OWNED(ump); 2159 if (bp) { 2160 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 2161 if (wk->wk_type == D_PAGEDEP) { 2162 *pagedeppp = WK_PAGEDEP(wk); 2163 return (1); 2164 } 2165 } 2166 } 2167 pagedephd = PAGEDEP_HASH(ump, ino, lbn); 2168 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2169 if (ret) { 2170 if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp) 2171 WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list); 2172 return (1); 2173 } 2174 if ((flags & DEPALLOC) == 0) 2175 return (0); 2176 FREE_LOCK(ump); 2177 pagedep = malloc(sizeof(struct pagedep), 2178 M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO); 2179 workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp); 2180 ACQUIRE_LOCK(ump); 2181 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2182 if (*pagedeppp) { 2183 /* 2184 * This should never happen since we only create pagedeps 2185 * with the vnode lock held. Could be an assert. 2186 */ 2187 WORKITEM_FREE(pagedep, D_PAGEDEP); 2188 return (ret); 2189 } 2190 pagedep->pd_ino = ino; 2191 pagedep->pd_lbn = lbn; 2192 LIST_INIT(&pagedep->pd_dirremhd); 2193 LIST_INIT(&pagedep->pd_pendinghd); 2194 for (i = 0; i < DAHASHSZ; i++) 2195 LIST_INIT(&pagedep->pd_diraddhd[i]); 2196 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 2197 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2198 *pagedeppp = pagedep; 2199 return (0); 2200 } 2201 2202 /* 2203 * Structures and routines associated with inodedep caching. 2204 */ 2205 #define INODEDEP_HASH(ump, inum) \ 2206 (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size]) 2207 2208 static int 2209 inodedep_find(inodedephd, inum, inodedeppp) 2210 struct inodedep_hashhead *inodedephd; 2211 ino_t inum; 2212 struct inodedep **inodedeppp; 2213 { 2214 struct inodedep *inodedep; 2215 2216 LIST_FOREACH(inodedep, inodedephd, id_hash) 2217 if (inum == inodedep->id_ino) 2218 break; 2219 if (inodedep) { 2220 *inodedeppp = inodedep; 2221 return (1); 2222 } 2223 *inodedeppp = NULL; 2224 2225 return (0); 2226 } 2227 /* 2228 * Look up an inodedep. Return 1 if found, 0 if not found. 2229 * If not found, allocate if DEPALLOC flag is passed. 2230 * Found or allocated entry is returned in inodedeppp. 2231 */ 2232 static int 2233 inodedep_lookup(mp, inum, flags, inodedeppp) 2234 struct mount *mp; 2235 ino_t inum; 2236 int flags; 2237 struct inodedep **inodedeppp; 2238 { 2239 struct inodedep *inodedep; 2240 struct inodedep_hashhead *inodedephd; 2241 struct ufsmount *ump; 2242 struct fs *fs; 2243 2244 ump = VFSTOUFS(mp); 2245 LOCK_OWNED(ump); 2246 fs = ump->um_fs; 2247 inodedephd = INODEDEP_HASH(ump, inum); 2248 2249 if (inodedep_find(inodedephd, inum, inodedeppp)) 2250 return (1); 2251 if ((flags & DEPALLOC) == 0) 2252 return (0); 2253 /* 2254 * If the system is over its limit and our filesystem is 2255 * responsible for more than our share of that usage and 2256 * we are not in a rush, request some inodedep cleanup. 2257 */ 2258 if (softdep_excess_items(ump, D_INODEDEP)) 2259 schedule_cleanup(mp); 2260 else 2261 FREE_LOCK(ump); 2262 inodedep = malloc(sizeof(struct inodedep), 2263 M_INODEDEP, M_SOFTDEP_FLAGS); 2264 workitem_alloc(&inodedep->id_list, D_INODEDEP, mp); 2265 ACQUIRE_LOCK(ump); 2266 if (inodedep_find(inodedephd, inum, inodedeppp)) { 2267 WORKITEM_FREE(inodedep, D_INODEDEP); 2268 return (1); 2269 } 2270 inodedep->id_fs = fs; 2271 inodedep->id_ino = inum; 2272 inodedep->id_state = ALLCOMPLETE; 2273 inodedep->id_nlinkdelta = 0; 2274 inodedep->id_nlinkwrote = -1; 2275 inodedep->id_savedino1 = NULL; 2276 inodedep->id_savedsize = -1; 2277 inodedep->id_savedextsize = -1; 2278 inodedep->id_savednlink = -1; 2279 inodedep->id_bmsafemap = NULL; 2280 inodedep->id_mkdiradd = NULL; 2281 LIST_INIT(&inodedep->id_dirremhd); 2282 LIST_INIT(&inodedep->id_pendinghd); 2283 LIST_INIT(&inodedep->id_inowait); 2284 LIST_INIT(&inodedep->id_bufwait); 2285 TAILQ_INIT(&inodedep->id_inoreflst); 2286 TAILQ_INIT(&inodedep->id_inoupdt); 2287 TAILQ_INIT(&inodedep->id_newinoupdt); 2288 TAILQ_INIT(&inodedep->id_extupdt); 2289 TAILQ_INIT(&inodedep->id_newextupdt); 2290 TAILQ_INIT(&inodedep->id_freeblklst); 2291 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 2292 *inodedeppp = inodedep; 2293 return (0); 2294 } 2295 2296 /* 2297 * Structures and routines associated with newblk caching. 2298 */ 2299 #define NEWBLK_HASH(ump, inum) \ 2300 (&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size]) 2301 2302 static int 2303 newblk_find(newblkhd, newblkno, flags, newblkpp) 2304 struct newblk_hashhead *newblkhd; 2305 ufs2_daddr_t newblkno; 2306 int flags; 2307 struct newblk **newblkpp; 2308 { 2309 struct newblk *newblk; 2310 2311 LIST_FOREACH(newblk, newblkhd, nb_hash) { 2312 if (newblkno != newblk->nb_newblkno) 2313 continue; 2314 /* 2315 * If we're creating a new dependency don't match those that 2316 * have already been converted to allocdirects. This is for 2317 * a frag extend. 2318 */ 2319 if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK) 2320 continue; 2321 break; 2322 } 2323 if (newblk) { 2324 *newblkpp = newblk; 2325 return (1); 2326 } 2327 *newblkpp = NULL; 2328 return (0); 2329 } 2330 2331 /* 2332 * Look up a newblk. Return 1 if found, 0 if not found. 2333 * If not found, allocate if DEPALLOC flag is passed. 2334 * Found or allocated entry is returned in newblkpp. 2335 */ 2336 static int 2337 newblk_lookup(mp, newblkno, flags, newblkpp) 2338 struct mount *mp; 2339 ufs2_daddr_t newblkno; 2340 int flags; 2341 struct newblk **newblkpp; 2342 { 2343 struct newblk *newblk; 2344 struct newblk_hashhead *newblkhd; 2345 struct ufsmount *ump; 2346 2347 ump = VFSTOUFS(mp); 2348 LOCK_OWNED(ump); 2349 newblkhd = NEWBLK_HASH(ump, newblkno); 2350 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) 2351 return (1); 2352 if ((flags & DEPALLOC) == 0) 2353 return (0); 2354 if (softdep_excess_items(ump, D_NEWBLK) || 2355 softdep_excess_items(ump, D_ALLOCDIRECT) || 2356 softdep_excess_items(ump, D_ALLOCINDIR)) 2357 schedule_cleanup(mp); 2358 else 2359 FREE_LOCK(ump); 2360 newblk = malloc(sizeof(union allblk), M_NEWBLK, 2361 M_SOFTDEP_FLAGS | M_ZERO); 2362 workitem_alloc(&newblk->nb_list, D_NEWBLK, mp); 2363 ACQUIRE_LOCK(ump); 2364 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) { 2365 WORKITEM_FREE(newblk, D_NEWBLK); 2366 return (1); 2367 } 2368 newblk->nb_freefrag = NULL; 2369 LIST_INIT(&newblk->nb_indirdeps); 2370 LIST_INIT(&newblk->nb_newdirblk); 2371 LIST_INIT(&newblk->nb_jwork); 2372 newblk->nb_state = ATTACHED; 2373 newblk->nb_newblkno = newblkno; 2374 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 2375 *newblkpp = newblk; 2376 return (0); 2377 } 2378 2379 /* 2380 * Structures and routines associated with freed indirect block caching. 2381 */ 2382 #define INDIR_HASH(ump, blkno) \ 2383 (&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size]) 2384 2385 /* 2386 * Lookup an indirect block in the indir hash table. The freework is 2387 * removed and potentially freed. The caller must do a blocking journal 2388 * write before writing to the blkno. 2389 */ 2390 static int 2391 indirblk_lookup(mp, blkno) 2392 struct mount *mp; 2393 ufs2_daddr_t blkno; 2394 { 2395 struct freework *freework; 2396 struct indir_hashhead *wkhd; 2397 struct ufsmount *ump; 2398 2399 ump = VFSTOUFS(mp); 2400 wkhd = INDIR_HASH(ump, blkno); 2401 TAILQ_FOREACH(freework, wkhd, fw_next) { 2402 if (freework->fw_blkno != blkno) 2403 continue; 2404 indirblk_remove(freework); 2405 return (1); 2406 } 2407 return (0); 2408 } 2409 2410 /* 2411 * Insert an indirect block represented by freework into the indirblk 2412 * hash table so that it may prevent the block from being re-used prior 2413 * to the journal being written. 2414 */ 2415 static void 2416 indirblk_insert(freework) 2417 struct freework *freework; 2418 { 2419 struct jblocks *jblocks; 2420 struct jseg *jseg; 2421 struct ufsmount *ump; 2422 2423 ump = VFSTOUFS(freework->fw_list.wk_mp); 2424 jblocks = ump->softdep_jblocks; 2425 jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst); 2426 if (jseg == NULL) 2427 return; 2428 2429 LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs); 2430 TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework, 2431 fw_next); 2432 freework->fw_state &= ~DEPCOMPLETE; 2433 } 2434 2435 static void 2436 indirblk_remove(freework) 2437 struct freework *freework; 2438 { 2439 struct ufsmount *ump; 2440 2441 ump = VFSTOUFS(freework->fw_list.wk_mp); 2442 LIST_REMOVE(freework, fw_segs); 2443 TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); 2444 freework->fw_state |= DEPCOMPLETE; 2445 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 2446 WORKITEM_FREE(freework, D_FREEWORK); 2447 } 2448 2449 /* 2450 * Executed during filesystem system initialization before 2451 * mounting any filesystems. 2452 */ 2453 void 2454 softdep_initialize() 2455 { 2456 2457 TAILQ_INIT(&softdepmounts); 2458 #ifdef __LP64__ 2459 max_softdeps = desiredvnodes * 4; 2460 #else 2461 max_softdeps = desiredvnodes * 2; 2462 #endif 2463 2464 /* initialise bioops hack */ 2465 bioops.io_start = softdep_disk_io_initiation; 2466 bioops.io_complete = softdep_disk_write_complete; 2467 bioops.io_deallocate = softdep_deallocate_dependencies; 2468 bioops.io_countdeps = softdep_count_dependencies; 2469 softdep_ast_cleanup = softdep_ast_cleanup_proc; 2470 2471 /* Initialize the callout with an mtx. */ 2472 callout_init_mtx(&softdep_callout, &lk, 0); 2473 } 2474 2475 /* 2476 * Executed after all filesystems have been unmounted during 2477 * filesystem module unload. 2478 */ 2479 void 2480 softdep_uninitialize() 2481 { 2482 2483 /* clear bioops hack */ 2484 bioops.io_start = NULL; 2485 bioops.io_complete = NULL; 2486 bioops.io_deallocate = NULL; 2487 bioops.io_countdeps = NULL; 2488 softdep_ast_cleanup = NULL; 2489 2490 callout_drain(&softdep_callout); 2491 } 2492 2493 /* 2494 * Called at mount time to notify the dependency code that a 2495 * filesystem wishes to use it. 2496 */ 2497 int 2498 softdep_mount(devvp, mp, fs, cred) 2499 struct vnode *devvp; 2500 struct mount *mp; 2501 struct fs *fs; 2502 struct ucred *cred; 2503 { 2504 struct csum_total cstotal; 2505 struct mount_softdeps *sdp; 2506 struct ufsmount *ump; 2507 struct cg *cgp; 2508 struct buf *bp; 2509 u_int cyl, i; 2510 int error; 2511 2512 sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA, 2513 M_WAITOK | M_ZERO); 2514 MNT_ILOCK(mp); 2515 mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP; 2516 if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) { 2517 mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) | 2518 MNTK_SOFTDEP | MNTK_NOASYNC; 2519 } 2520 ump = VFSTOUFS(mp); 2521 ump->um_softdep = sdp; 2522 MNT_IUNLOCK(mp); 2523 rw_init(LOCK_PTR(ump), "per-fs softdep"); 2524 sdp->sd_ump = ump; 2525 LIST_INIT(&ump->softdep_workitem_pending); 2526 LIST_INIT(&ump->softdep_journal_pending); 2527 TAILQ_INIT(&ump->softdep_unlinked); 2528 LIST_INIT(&ump->softdep_dirtycg); 2529 ump->softdep_worklist_tail = NULL; 2530 ump->softdep_on_worklist = 0; 2531 ump->softdep_deps = 0; 2532 LIST_INIT(&ump->softdep_mkdirlisthd); 2533 ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 2534 &ump->pagedep_hash_size); 2535 ump->pagedep_nextclean = 0; 2536 ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, 2537 &ump->inodedep_hash_size); 2538 ump->inodedep_nextclean = 0; 2539 ump->newblk_hashtbl = hashinit(max_softdeps / 2, M_NEWBLK, 2540 &ump->newblk_hash_size); 2541 ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, 2542 &ump->bmsafemap_hash_size); 2543 i = 1 << (ffs(desiredvnodes / 10) - 1); 2544 ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead), 2545 M_FREEWORK, M_WAITOK); 2546 ump->indir_hash_size = i - 1; 2547 for (i = 0; i <= ump->indir_hash_size; i++) 2548 TAILQ_INIT(&ump->indir_hashtbl[i]); 2549 #ifdef INVARIANTS 2550 for (i = 0; i <= D_LAST; i++) 2551 LIST_INIT(&ump->softdep_alldeps[i]); 2552 #endif 2553 ACQUIRE_GBLLOCK(&lk); 2554 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); 2555 FREE_GBLLOCK(&lk); 2556 if ((fs->fs_flags & FS_SUJ) && 2557 (error = journal_mount(mp, fs, cred)) != 0) { 2558 printf("Failed to start journal: %d\n", error); 2559 softdep_unmount(mp); 2560 return (error); 2561 } 2562 /* 2563 * Start our flushing thread in the bufdaemon process. 2564 */ 2565 ACQUIRE_LOCK(ump); 2566 ump->softdep_flags |= FLUSH_STARTING; 2567 FREE_LOCK(ump); 2568 kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc, 2569 &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker", 2570 mp->mnt_stat.f_mntonname); 2571 ACQUIRE_LOCK(ump); 2572 while ((ump->softdep_flags & FLUSH_STARTING) != 0) { 2573 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart", 2574 hz / 2); 2575 } 2576 FREE_LOCK(ump); 2577 /* 2578 * When doing soft updates, the counters in the 2579 * superblock may have gotten out of sync. Recomputation 2580 * can take a long time and can be deferred for background 2581 * fsck. However, the old behavior of scanning the cylinder 2582 * groups and recalculating them at mount time is available 2583 * by setting vfs.ffs.compute_summary_at_mount to one. 2584 */ 2585 if (compute_summary_at_mount == 0 || fs->fs_clean != 0) 2586 return (0); 2587 bzero(&cstotal, sizeof cstotal); 2588 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 2589 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 2590 fs->fs_cgsize, cred, &bp)) != 0) { 2591 brelse(bp); 2592 softdep_unmount(mp); 2593 return (error); 2594 } 2595 cgp = (struct cg *)bp->b_data; 2596 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 2597 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 2598 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 2599 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 2600 fs->fs_cs(fs, cyl) = cgp->cg_cs; 2601 brelse(bp); 2602 } 2603 #ifdef INVARIANTS 2604 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 2605 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 2606 #endif 2607 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 2608 return (0); 2609 } 2610 2611 void 2612 softdep_unmount(mp) 2613 struct mount *mp; 2614 { 2615 struct ufsmount *ump; 2616 #ifdef INVARIANTS 2617 int i; 2618 #endif 2619 2620 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 2621 ("softdep_unmount called on non-softdep filesystem")); 2622 ump = VFSTOUFS(mp); 2623 MNT_ILOCK(mp); 2624 mp->mnt_flag &= ~MNT_SOFTDEP; 2625 if (MOUNTEDSUJ(mp) == 0) { 2626 MNT_IUNLOCK(mp); 2627 } else { 2628 mp->mnt_flag &= ~MNT_SUJ; 2629 MNT_IUNLOCK(mp); 2630 journal_unmount(ump); 2631 } 2632 /* 2633 * Shut down our flushing thread. Check for NULL is if 2634 * softdep_mount errors out before the thread has been created. 2635 */ 2636 if (ump->softdep_flushtd != NULL) { 2637 ACQUIRE_LOCK(ump); 2638 ump->softdep_flags |= FLUSH_EXIT; 2639 wakeup(&ump->softdep_flushtd); 2640 msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP, 2641 "sdwait", 0); 2642 KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0, 2643 ("Thread shutdown failed")); 2644 } 2645 /* 2646 * Free up our resources. 2647 */ 2648 ACQUIRE_GBLLOCK(&lk); 2649 TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next); 2650 FREE_GBLLOCK(&lk); 2651 rw_destroy(LOCK_PTR(ump)); 2652 hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size); 2653 hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size); 2654 hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size); 2655 hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP, 2656 ump->bmsafemap_hash_size); 2657 free(ump->indir_hashtbl, M_FREEWORK); 2658 #ifdef INVARIANTS 2659 for (i = 0; i <= D_LAST; i++) { 2660 KASSERT(ump->softdep_curdeps[i] == 0, 2661 ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt, 2662 TYPENAME(i), ump->softdep_curdeps[i])); 2663 KASSERT(LIST_EMPTY(&ump->softdep_alldeps[i]), 2664 ("Unmount %s: Dep type %s not empty (%p)", ump->um_fs->fs_fsmnt, 2665 TYPENAME(i), LIST_FIRST(&ump->softdep_alldeps[i]))); 2666 } 2667 #endif 2668 free(ump->um_softdep, M_MOUNTDATA); 2669 } 2670 2671 static struct jblocks * 2672 jblocks_create(void) 2673 { 2674 struct jblocks *jblocks; 2675 2676 jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO); 2677 TAILQ_INIT(&jblocks->jb_segs); 2678 jblocks->jb_avail = 10; 2679 jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2680 M_JBLOCKS, M_WAITOK | M_ZERO); 2681 2682 return (jblocks); 2683 } 2684 2685 static ufs2_daddr_t 2686 jblocks_alloc(jblocks, bytes, actual) 2687 struct jblocks *jblocks; 2688 int bytes; 2689 int *actual; 2690 { 2691 ufs2_daddr_t daddr; 2692 struct jextent *jext; 2693 int freecnt; 2694 int blocks; 2695 2696 blocks = bytes / DEV_BSIZE; 2697 jext = &jblocks->jb_extent[jblocks->jb_head]; 2698 freecnt = jext->je_blocks - jblocks->jb_off; 2699 if (freecnt == 0) { 2700 jblocks->jb_off = 0; 2701 if (++jblocks->jb_head > jblocks->jb_used) 2702 jblocks->jb_head = 0; 2703 jext = &jblocks->jb_extent[jblocks->jb_head]; 2704 freecnt = jext->je_blocks; 2705 } 2706 if (freecnt > blocks) 2707 freecnt = blocks; 2708 *actual = freecnt * DEV_BSIZE; 2709 daddr = jext->je_daddr + jblocks->jb_off; 2710 jblocks->jb_off += freecnt; 2711 jblocks->jb_free -= freecnt; 2712 2713 return (daddr); 2714 } 2715 2716 static void 2717 jblocks_free(jblocks, mp, bytes) 2718 struct jblocks *jblocks; 2719 struct mount *mp; 2720 int bytes; 2721 { 2722 2723 LOCK_OWNED(VFSTOUFS(mp)); 2724 jblocks->jb_free += bytes / DEV_BSIZE; 2725 if (jblocks->jb_suspended) 2726 worklist_speedup(mp); 2727 wakeup(jblocks); 2728 } 2729 2730 static void 2731 jblocks_destroy(jblocks) 2732 struct jblocks *jblocks; 2733 { 2734 2735 if (jblocks->jb_extent) 2736 free(jblocks->jb_extent, M_JBLOCKS); 2737 free(jblocks, M_JBLOCKS); 2738 } 2739 2740 static void 2741 jblocks_add(jblocks, daddr, blocks) 2742 struct jblocks *jblocks; 2743 ufs2_daddr_t daddr; 2744 int blocks; 2745 { 2746 struct jextent *jext; 2747 2748 jblocks->jb_blocks += blocks; 2749 jblocks->jb_free += blocks; 2750 jext = &jblocks->jb_extent[jblocks->jb_used]; 2751 /* Adding the first block. */ 2752 if (jext->je_daddr == 0) { 2753 jext->je_daddr = daddr; 2754 jext->je_blocks = blocks; 2755 return; 2756 } 2757 /* Extending the last extent. */ 2758 if (jext->je_daddr + jext->je_blocks == daddr) { 2759 jext->je_blocks += blocks; 2760 return; 2761 } 2762 /* Adding a new extent. */ 2763 if (++jblocks->jb_used == jblocks->jb_avail) { 2764 jblocks->jb_avail *= 2; 2765 jext = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2766 M_JBLOCKS, M_WAITOK | M_ZERO); 2767 memcpy(jext, jblocks->jb_extent, 2768 sizeof(struct jextent) * jblocks->jb_used); 2769 free(jblocks->jb_extent, M_JBLOCKS); 2770 jblocks->jb_extent = jext; 2771 } 2772 jext = &jblocks->jb_extent[jblocks->jb_used]; 2773 jext->je_daddr = daddr; 2774 jext->je_blocks = blocks; 2775 return; 2776 } 2777 2778 int 2779 softdep_journal_lookup(mp, vpp) 2780 struct mount *mp; 2781 struct vnode **vpp; 2782 { 2783 struct componentname cnp; 2784 struct vnode *dvp; 2785 ino_t sujournal; 2786 int error; 2787 2788 error = VFS_VGET(mp, UFS_ROOTINO, LK_EXCLUSIVE, &dvp); 2789 if (error) 2790 return (error); 2791 bzero(&cnp, sizeof(cnp)); 2792 cnp.cn_nameiop = LOOKUP; 2793 cnp.cn_flags = ISLASTCN; 2794 cnp.cn_thread = curthread; 2795 cnp.cn_cred = curthread->td_ucred; 2796 cnp.cn_pnbuf = SUJ_FILE; 2797 cnp.cn_nameptr = SUJ_FILE; 2798 cnp.cn_namelen = strlen(SUJ_FILE); 2799 error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal); 2800 vput(dvp); 2801 if (error != 0) 2802 return (error); 2803 error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp); 2804 return (error); 2805 } 2806 2807 /* 2808 * Open and verify the journal file. 2809 */ 2810 static int 2811 journal_mount(mp, fs, cred) 2812 struct mount *mp; 2813 struct fs *fs; 2814 struct ucred *cred; 2815 { 2816 struct jblocks *jblocks; 2817 struct ufsmount *ump; 2818 struct vnode *vp; 2819 struct inode *ip; 2820 ufs2_daddr_t blkno; 2821 int bcount; 2822 int error; 2823 int i; 2824 2825 ump = VFSTOUFS(mp); 2826 ump->softdep_journal_tail = NULL; 2827 ump->softdep_on_journal = 0; 2828 ump->softdep_accdeps = 0; 2829 ump->softdep_req = 0; 2830 ump->softdep_jblocks = NULL; 2831 error = softdep_journal_lookup(mp, &vp); 2832 if (error != 0) { 2833 printf("Failed to find journal. Use tunefs to create one\n"); 2834 return (error); 2835 } 2836 ip = VTOI(vp); 2837 if (ip->i_size < SUJ_MIN) { 2838 error = ENOSPC; 2839 goto out; 2840 } 2841 bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */ 2842 jblocks = jblocks_create(); 2843 for (i = 0; i < bcount; i++) { 2844 error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL); 2845 if (error) 2846 break; 2847 jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag)); 2848 } 2849 if (error) { 2850 jblocks_destroy(jblocks); 2851 goto out; 2852 } 2853 jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */ 2854 jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */ 2855 ump->softdep_jblocks = jblocks; 2856 out: 2857 if (error == 0) { 2858 MNT_ILOCK(mp); 2859 mp->mnt_flag |= MNT_SUJ; 2860 mp->mnt_flag &= ~MNT_SOFTDEP; 2861 MNT_IUNLOCK(mp); 2862 /* 2863 * Only validate the journal contents if the 2864 * filesystem is clean, otherwise we write the logs 2865 * but they'll never be used. If the filesystem was 2866 * still dirty when we mounted it the journal is 2867 * invalid and a new journal can only be valid if it 2868 * starts from a clean mount. 2869 */ 2870 if (fs->fs_clean) { 2871 DIP_SET(ip, i_modrev, fs->fs_mtime); 2872 ip->i_flags |= IN_MODIFIED; 2873 ffs_update(vp, 1); 2874 } 2875 } 2876 vput(vp); 2877 return (error); 2878 } 2879 2880 static void 2881 journal_unmount(ump) 2882 struct ufsmount *ump; 2883 { 2884 2885 if (ump->softdep_jblocks) 2886 jblocks_destroy(ump->softdep_jblocks); 2887 ump->softdep_jblocks = NULL; 2888 } 2889 2890 /* 2891 * Called when a journal record is ready to be written. Space is allocated 2892 * and the journal entry is created when the journal is flushed to stable 2893 * store. 2894 */ 2895 static void 2896 add_to_journal(wk) 2897 struct worklist *wk; 2898 { 2899 struct ufsmount *ump; 2900 2901 ump = VFSTOUFS(wk->wk_mp); 2902 LOCK_OWNED(ump); 2903 if (wk->wk_state & ONWORKLIST) 2904 panic("add_to_journal: %s(0x%X) already on list", 2905 TYPENAME(wk->wk_type), wk->wk_state); 2906 wk->wk_state |= ONWORKLIST | DEPCOMPLETE; 2907 if (LIST_EMPTY(&ump->softdep_journal_pending)) { 2908 ump->softdep_jblocks->jb_age = ticks; 2909 LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list); 2910 } else 2911 LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list); 2912 ump->softdep_journal_tail = wk; 2913 ump->softdep_on_journal += 1; 2914 } 2915 2916 /* 2917 * Remove an arbitrary item for the journal worklist maintain the tail 2918 * pointer. This happens when a new operation obviates the need to 2919 * journal an old operation. 2920 */ 2921 static void 2922 remove_from_journal(wk) 2923 struct worklist *wk; 2924 { 2925 struct ufsmount *ump; 2926 2927 ump = VFSTOUFS(wk->wk_mp); 2928 LOCK_OWNED(ump); 2929 #ifdef INVARIANTS 2930 { 2931 struct worklist *wkn; 2932 2933 LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list) 2934 if (wkn == wk) 2935 break; 2936 if (wkn == NULL) 2937 panic("remove_from_journal: %p is not in journal", wk); 2938 } 2939 #endif 2940 /* 2941 * We emulate a TAILQ to save space in most structures which do not 2942 * require TAILQ semantics. Here we must update the tail position 2943 * when removing the tail which is not the final entry. This works 2944 * only if the worklist linkage are at the beginning of the structure. 2945 */ 2946 if (ump->softdep_journal_tail == wk) 2947 ump->softdep_journal_tail = 2948 (struct worklist *)wk->wk_list.le_prev; 2949 WORKLIST_REMOVE(wk); 2950 ump->softdep_on_journal -= 1; 2951 } 2952 2953 /* 2954 * Check for journal space as well as dependency limits so the prelink 2955 * code can throttle both journaled and non-journaled filesystems. 2956 * Threshold is 0 for low and 1 for min. 2957 */ 2958 static int 2959 journal_space(ump, thresh) 2960 struct ufsmount *ump; 2961 int thresh; 2962 { 2963 struct jblocks *jblocks; 2964 int limit, avail; 2965 2966 jblocks = ump->softdep_jblocks; 2967 if (jblocks == NULL) 2968 return (1); 2969 /* 2970 * We use a tighter restriction here to prevent request_cleanup() 2971 * running in threads from running into locks we currently hold. 2972 * We have to be over the limit and our filesystem has to be 2973 * responsible for more than our share of that usage. 2974 */ 2975 limit = (max_softdeps / 10) * 9; 2976 if (dep_current[D_INODEDEP] > limit && 2977 ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads) 2978 return (0); 2979 if (thresh) 2980 thresh = jblocks->jb_min; 2981 else 2982 thresh = jblocks->jb_low; 2983 avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE; 2984 avail = jblocks->jb_free - avail; 2985 2986 return (avail > thresh); 2987 } 2988 2989 static void 2990 journal_suspend(ump) 2991 struct ufsmount *ump; 2992 { 2993 struct jblocks *jblocks; 2994 struct mount *mp; 2995 bool set; 2996 2997 mp = UFSTOVFS(ump); 2998 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) 2999 return; 3000 3001 jblocks = ump->softdep_jblocks; 3002 vfs_op_enter(mp); 3003 set = false; 3004 MNT_ILOCK(mp); 3005 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 3006 stat_journal_min++; 3007 mp->mnt_kern_flag |= MNTK_SUSPEND; 3008 mp->mnt_susp_owner = ump->softdep_flushtd; 3009 set = true; 3010 } 3011 jblocks->jb_suspended = 1; 3012 MNT_IUNLOCK(mp); 3013 if (!set) 3014 vfs_op_exit(mp); 3015 } 3016 3017 static int 3018 journal_unsuspend(struct ufsmount *ump) 3019 { 3020 struct jblocks *jblocks; 3021 struct mount *mp; 3022 3023 mp = UFSTOVFS(ump); 3024 jblocks = ump->softdep_jblocks; 3025 3026 if (jblocks != NULL && jblocks->jb_suspended && 3027 journal_space(ump, jblocks->jb_min)) { 3028 jblocks->jb_suspended = 0; 3029 FREE_LOCK(ump); 3030 mp->mnt_susp_owner = curthread; 3031 vfs_write_resume(mp, 0); 3032 ACQUIRE_LOCK(ump); 3033 return (1); 3034 } 3035 return (0); 3036 } 3037 3038 /* 3039 * Called before any allocation function to be certain that there is 3040 * sufficient space in the journal prior to creating any new records. 3041 * Since in the case of block allocation we may have multiple locked 3042 * buffers at the time of the actual allocation we can not block 3043 * when the journal records are created. Doing so would create a deadlock 3044 * if any of these buffers needed to be flushed to reclaim space. Instead 3045 * we require a sufficiently large amount of available space such that 3046 * each thread in the system could have passed this allocation check and 3047 * still have sufficient free space. With 20% of a minimum journal size 3048 * of 1MB we have 6553 records available. 3049 */ 3050 int 3051 softdep_prealloc(vp, waitok) 3052 struct vnode *vp; 3053 int waitok; 3054 { 3055 struct ufsmount *ump; 3056 3057 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 3058 ("softdep_prealloc called on non-softdep filesystem")); 3059 /* 3060 * Nothing to do if we are not running journaled soft updates. 3061 * If we currently hold the snapshot lock, we must avoid 3062 * handling other resources that could cause deadlock. Do not 3063 * touch quotas vnode since it is typically recursed with 3064 * other vnode locks held. 3065 */ 3066 if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)) || 3067 (vp->v_vflag & VV_SYSTEM) != 0) 3068 return (0); 3069 ump = VFSTOUFS(vp->v_mount); 3070 ACQUIRE_LOCK(ump); 3071 if (journal_space(ump, 0)) { 3072 FREE_LOCK(ump); 3073 return (0); 3074 } 3075 stat_journal_low++; 3076 FREE_LOCK(ump); 3077 if (waitok == MNT_NOWAIT) 3078 return (ENOSPC); 3079 /* 3080 * Attempt to sync this vnode once to flush any journal 3081 * work attached to it. 3082 */ 3083 if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0) 3084 ffs_syncvnode(vp, waitok, 0); 3085 ACQUIRE_LOCK(ump); 3086 process_removes(vp); 3087 process_truncates(vp); 3088 if (journal_space(ump, 0) == 0) { 3089 softdep_speedup(ump); 3090 if (journal_space(ump, 1) == 0) 3091 journal_suspend(ump); 3092 } 3093 FREE_LOCK(ump); 3094 3095 return (0); 3096 } 3097 3098 /* 3099 * Before adjusting a link count on a vnode verify that we have sufficient 3100 * journal space. If not, process operations that depend on the currently 3101 * locked pair of vnodes to try to flush space as the syncer, buf daemon, 3102 * and softdep flush threads can not acquire these locks to reclaim space. 3103 */ 3104 static void 3105 softdep_prelink(dvp, vp) 3106 struct vnode *dvp; 3107 struct vnode *vp; 3108 { 3109 struct ufsmount *ump; 3110 3111 ump = VFSTOUFS(dvp->v_mount); 3112 LOCK_OWNED(ump); 3113 /* 3114 * Nothing to do if we have sufficient journal space. 3115 * If we currently hold the snapshot lock, we must avoid 3116 * handling other resources that could cause deadlock. 3117 */ 3118 if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp)))) 3119 return; 3120 stat_journal_low++; 3121 FREE_LOCK(ump); 3122 if (vp) 3123 ffs_syncvnode(vp, MNT_NOWAIT, 0); 3124 ffs_syncvnode(dvp, MNT_WAIT, 0); 3125 ACQUIRE_LOCK(ump); 3126 /* Process vp before dvp as it may create .. removes. */ 3127 if (vp) { 3128 process_removes(vp); 3129 process_truncates(vp); 3130 } 3131 process_removes(dvp); 3132 process_truncates(dvp); 3133 softdep_speedup(ump); 3134 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT); 3135 if (journal_space(ump, 0) == 0) { 3136 softdep_speedup(ump); 3137 if (journal_space(ump, 1) == 0) 3138 journal_suspend(ump); 3139 } 3140 } 3141 3142 static void 3143 jseg_write(ump, jseg, data) 3144 struct ufsmount *ump; 3145 struct jseg *jseg; 3146 uint8_t *data; 3147 { 3148 struct jsegrec *rec; 3149 3150 rec = (struct jsegrec *)data; 3151 rec->jsr_seq = jseg->js_seq; 3152 rec->jsr_oldest = jseg->js_oldseq; 3153 rec->jsr_cnt = jseg->js_cnt; 3154 rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; 3155 rec->jsr_crc = 0; 3156 rec->jsr_time = ump->um_fs->fs_mtime; 3157 } 3158 3159 static inline void 3160 inoref_write(inoref, jseg, rec) 3161 struct inoref *inoref; 3162 struct jseg *jseg; 3163 struct jrefrec *rec; 3164 { 3165 3166 inoref->if_jsegdep->jd_seg = jseg; 3167 rec->jr_ino = inoref->if_ino; 3168 rec->jr_parent = inoref->if_parent; 3169 rec->jr_nlink = inoref->if_nlink; 3170 rec->jr_mode = inoref->if_mode; 3171 rec->jr_diroff = inoref->if_diroff; 3172 } 3173 3174 static void 3175 jaddref_write(jaddref, jseg, data) 3176 struct jaddref *jaddref; 3177 struct jseg *jseg; 3178 uint8_t *data; 3179 { 3180 struct jrefrec *rec; 3181 3182 rec = (struct jrefrec *)data; 3183 rec->jr_op = JOP_ADDREF; 3184 inoref_write(&jaddref->ja_ref, jseg, rec); 3185 } 3186 3187 static void 3188 jremref_write(jremref, jseg, data) 3189 struct jremref *jremref; 3190 struct jseg *jseg; 3191 uint8_t *data; 3192 { 3193 struct jrefrec *rec; 3194 3195 rec = (struct jrefrec *)data; 3196 rec->jr_op = JOP_REMREF; 3197 inoref_write(&jremref->jr_ref, jseg, rec); 3198 } 3199 3200 static void 3201 jmvref_write(jmvref, jseg, data) 3202 struct jmvref *jmvref; 3203 struct jseg *jseg; 3204 uint8_t *data; 3205 { 3206 struct jmvrec *rec; 3207 3208 rec = (struct jmvrec *)data; 3209 rec->jm_op = JOP_MVREF; 3210 rec->jm_ino = jmvref->jm_ino; 3211 rec->jm_parent = jmvref->jm_parent; 3212 rec->jm_oldoff = jmvref->jm_oldoff; 3213 rec->jm_newoff = jmvref->jm_newoff; 3214 } 3215 3216 static void 3217 jnewblk_write(jnewblk, jseg, data) 3218 struct jnewblk *jnewblk; 3219 struct jseg *jseg; 3220 uint8_t *data; 3221 { 3222 struct jblkrec *rec; 3223 3224 jnewblk->jn_jsegdep->jd_seg = jseg; 3225 rec = (struct jblkrec *)data; 3226 rec->jb_op = JOP_NEWBLK; 3227 rec->jb_ino = jnewblk->jn_ino; 3228 rec->jb_blkno = jnewblk->jn_blkno; 3229 rec->jb_lbn = jnewblk->jn_lbn; 3230 rec->jb_frags = jnewblk->jn_frags; 3231 rec->jb_oldfrags = jnewblk->jn_oldfrags; 3232 } 3233 3234 static void 3235 jfreeblk_write(jfreeblk, jseg, data) 3236 struct jfreeblk *jfreeblk; 3237 struct jseg *jseg; 3238 uint8_t *data; 3239 { 3240 struct jblkrec *rec; 3241 3242 jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg; 3243 rec = (struct jblkrec *)data; 3244 rec->jb_op = JOP_FREEBLK; 3245 rec->jb_ino = jfreeblk->jf_ino; 3246 rec->jb_blkno = jfreeblk->jf_blkno; 3247 rec->jb_lbn = jfreeblk->jf_lbn; 3248 rec->jb_frags = jfreeblk->jf_frags; 3249 rec->jb_oldfrags = 0; 3250 } 3251 3252 static void 3253 jfreefrag_write(jfreefrag, jseg, data) 3254 struct jfreefrag *jfreefrag; 3255 struct jseg *jseg; 3256 uint8_t *data; 3257 { 3258 struct jblkrec *rec; 3259 3260 jfreefrag->fr_jsegdep->jd_seg = jseg; 3261 rec = (struct jblkrec *)data; 3262 rec->jb_op = JOP_FREEBLK; 3263 rec->jb_ino = jfreefrag->fr_ino; 3264 rec->jb_blkno = jfreefrag->fr_blkno; 3265 rec->jb_lbn = jfreefrag->fr_lbn; 3266 rec->jb_frags = jfreefrag->fr_frags; 3267 rec->jb_oldfrags = 0; 3268 } 3269 3270 static void 3271 jtrunc_write(jtrunc, jseg, data) 3272 struct jtrunc *jtrunc; 3273 struct jseg *jseg; 3274 uint8_t *data; 3275 { 3276 struct jtrncrec *rec; 3277 3278 jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg; 3279 rec = (struct jtrncrec *)data; 3280 rec->jt_op = JOP_TRUNC; 3281 rec->jt_ino = jtrunc->jt_ino; 3282 rec->jt_size = jtrunc->jt_size; 3283 rec->jt_extsize = jtrunc->jt_extsize; 3284 } 3285 3286 static void 3287 jfsync_write(jfsync, jseg, data) 3288 struct jfsync *jfsync; 3289 struct jseg *jseg; 3290 uint8_t *data; 3291 { 3292 struct jtrncrec *rec; 3293 3294 rec = (struct jtrncrec *)data; 3295 rec->jt_op = JOP_SYNC; 3296 rec->jt_ino = jfsync->jfs_ino; 3297 rec->jt_size = jfsync->jfs_size; 3298 rec->jt_extsize = jfsync->jfs_extsize; 3299 } 3300 3301 static void 3302 softdep_flushjournal(mp) 3303 struct mount *mp; 3304 { 3305 struct jblocks *jblocks; 3306 struct ufsmount *ump; 3307 3308 if (MOUNTEDSUJ(mp) == 0) 3309 return; 3310 ump = VFSTOUFS(mp); 3311 jblocks = ump->softdep_jblocks; 3312 ACQUIRE_LOCK(ump); 3313 while (ump->softdep_on_journal) { 3314 jblocks->jb_needseg = 1; 3315 softdep_process_journal(mp, NULL, MNT_WAIT); 3316 } 3317 FREE_LOCK(ump); 3318 } 3319 3320 static void softdep_synchronize_completed(struct bio *); 3321 static void softdep_synchronize(struct bio *, struct ufsmount *, void *); 3322 3323 static void 3324 softdep_synchronize_completed(bp) 3325 struct bio *bp; 3326 { 3327 struct jseg *oldest; 3328 struct jseg *jseg; 3329 struct ufsmount *ump; 3330 3331 /* 3332 * caller1 marks the last segment written before we issued the 3333 * synchronize cache. 3334 */ 3335 jseg = bp->bio_caller1; 3336 if (jseg == NULL) { 3337 g_destroy_bio(bp); 3338 return; 3339 } 3340 ump = VFSTOUFS(jseg->js_list.wk_mp); 3341 ACQUIRE_LOCK(ump); 3342 oldest = NULL; 3343 /* 3344 * Mark all the journal entries waiting on the synchronize cache 3345 * as completed so they may continue on. 3346 */ 3347 while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) { 3348 jseg->js_state |= COMPLETE; 3349 oldest = jseg; 3350 jseg = TAILQ_PREV(jseg, jseglst, js_next); 3351 } 3352 /* 3353 * Restart deferred journal entry processing from the oldest 3354 * completed jseg. 3355 */ 3356 if (oldest) 3357 complete_jsegs(oldest); 3358 3359 FREE_LOCK(ump); 3360 g_destroy_bio(bp); 3361 } 3362 3363 /* 3364 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering 3365 * barriers. The journal must be written prior to any blocks that depend 3366 * on it and the journal can not be released until the blocks have be 3367 * written. This code handles both barriers simultaneously. 3368 */ 3369 static void 3370 softdep_synchronize(bp, ump, caller1) 3371 struct bio *bp; 3372 struct ufsmount *ump; 3373 void *caller1; 3374 { 3375 3376 bp->bio_cmd = BIO_FLUSH; 3377 bp->bio_flags |= BIO_ORDERED; 3378 bp->bio_data = NULL; 3379 bp->bio_offset = ump->um_cp->provider->mediasize; 3380 bp->bio_length = 0; 3381 bp->bio_done = softdep_synchronize_completed; 3382 bp->bio_caller1 = caller1; 3383 g_io_request(bp, ump->um_cp); 3384 } 3385 3386 /* 3387 * Flush some journal records to disk. 3388 */ 3389 static void 3390 softdep_process_journal(mp, needwk, flags) 3391 struct mount *mp; 3392 struct worklist *needwk; 3393 int flags; 3394 { 3395 struct jblocks *jblocks; 3396 struct ufsmount *ump; 3397 struct worklist *wk; 3398 struct jseg *jseg; 3399 struct buf *bp; 3400 struct bio *bio; 3401 uint8_t *data; 3402 struct fs *fs; 3403 int shouldflush; 3404 int segwritten; 3405 int jrecmin; /* Minimum records per block. */ 3406 int jrecmax; /* Maximum records per block. */ 3407 int size; 3408 int cnt; 3409 int off; 3410 int devbsize; 3411 3412 if (MOUNTEDSUJ(mp) == 0) 3413 return; 3414 shouldflush = softdep_flushcache; 3415 bio = NULL; 3416 jseg = NULL; 3417 ump = VFSTOUFS(mp); 3418 LOCK_OWNED(ump); 3419 fs = ump->um_fs; 3420 jblocks = ump->softdep_jblocks; 3421 devbsize = ump->um_devvp->v_bufobj.bo_bsize; 3422 /* 3423 * We write anywhere between a disk block and fs block. The upper 3424 * bound is picked to prevent buffer cache fragmentation and limit 3425 * processing time per I/O. 3426 */ 3427 jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */ 3428 jrecmax = (fs->fs_bsize / devbsize) * jrecmin; 3429 segwritten = 0; 3430 for (;;) { 3431 cnt = ump->softdep_on_journal; 3432 /* 3433 * Criteria for writing a segment: 3434 * 1) We have a full block. 3435 * 2) We're called from jwait() and haven't found the 3436 * journal item yet. 3437 * 3) Always write if needseg is set. 3438 * 4) If we are called from process_worklist and have 3439 * not yet written anything we write a partial block 3440 * to enforce a 1 second maximum latency on journal 3441 * entries. 3442 */ 3443 if (cnt < (jrecmax - 1) && needwk == NULL && 3444 jblocks->jb_needseg == 0 && (segwritten || cnt == 0)) 3445 break; 3446 cnt++; 3447 /* 3448 * Verify some free journal space. softdep_prealloc() should 3449 * guarantee that we don't run out so this is indicative of 3450 * a problem with the flow control. Try to recover 3451 * gracefully in any event. 3452 */ 3453 while (jblocks->jb_free == 0) { 3454 if (flags != MNT_WAIT) 3455 break; 3456 printf("softdep: Out of journal space!\n"); 3457 softdep_speedup(ump); 3458 msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz); 3459 } 3460 FREE_LOCK(ump); 3461 jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS); 3462 workitem_alloc(&jseg->js_list, D_JSEG, mp); 3463 LIST_INIT(&jseg->js_entries); 3464 LIST_INIT(&jseg->js_indirs); 3465 jseg->js_state = ATTACHED; 3466 if (shouldflush == 0) 3467 jseg->js_state |= COMPLETE; 3468 else if (bio == NULL) 3469 bio = g_alloc_bio(); 3470 jseg->js_jblocks = jblocks; 3471 bp = geteblk(fs->fs_bsize, 0); 3472 ACQUIRE_LOCK(ump); 3473 /* 3474 * If there was a race while we were allocating the block 3475 * and jseg the entry we care about was likely written. 3476 * We bail out in both the WAIT and NOWAIT case and assume 3477 * the caller will loop if the entry it cares about is 3478 * not written. 3479 */ 3480 cnt = ump->softdep_on_journal; 3481 if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) { 3482 bp->b_flags |= B_INVAL | B_NOCACHE; 3483 WORKITEM_FREE(jseg, D_JSEG); 3484 FREE_LOCK(ump); 3485 brelse(bp); 3486 ACQUIRE_LOCK(ump); 3487 break; 3488 } 3489 /* 3490 * Calculate the disk block size required for the available 3491 * records rounded to the min size. 3492 */ 3493 if (cnt == 0) 3494 size = devbsize; 3495 else if (cnt < jrecmax) 3496 size = howmany(cnt, jrecmin) * devbsize; 3497 else 3498 size = fs->fs_bsize; 3499 /* 3500 * Allocate a disk block for this journal data and account 3501 * for truncation of the requested size if enough contiguous 3502 * space was not available. 3503 */ 3504 bp->b_blkno = jblocks_alloc(jblocks, size, &size); 3505 bp->b_lblkno = bp->b_blkno; 3506 bp->b_offset = bp->b_blkno * DEV_BSIZE; 3507 bp->b_bcount = size; 3508 bp->b_flags &= ~B_INVAL; 3509 bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY; 3510 /* 3511 * Initialize our jseg with cnt records. Assign the next 3512 * sequence number to it and link it in-order. 3513 */ 3514 cnt = MIN(cnt, (size / devbsize) * jrecmin); 3515 jseg->js_buf = bp; 3516 jseg->js_cnt = cnt; 3517 jseg->js_refs = cnt + 1; /* Self ref. */ 3518 jseg->js_size = size; 3519 jseg->js_seq = jblocks->jb_nextseq++; 3520 if (jblocks->jb_oldestseg == NULL) 3521 jblocks->jb_oldestseg = jseg; 3522 jseg->js_oldseq = jblocks->jb_oldestseg->js_seq; 3523 TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next); 3524 if (jblocks->jb_writeseg == NULL) 3525 jblocks->jb_writeseg = jseg; 3526 /* 3527 * Start filling in records from the pending list. 3528 */ 3529 data = bp->b_data; 3530 off = 0; 3531 3532 /* 3533 * Always put a header on the first block. 3534 * XXX As with below, there might not be a chance to get 3535 * into the loop. Ensure that something valid is written. 3536 */ 3537 jseg_write(ump, jseg, data); 3538 off += JREC_SIZE; 3539 data = bp->b_data + off; 3540 3541 /* 3542 * XXX Something is wrong here. There's no work to do, 3543 * but we need to perform and I/O and allow it to complete 3544 * anyways. 3545 */ 3546 if (LIST_EMPTY(&ump->softdep_journal_pending)) 3547 stat_emptyjblocks++; 3548 3549 while ((wk = LIST_FIRST(&ump->softdep_journal_pending)) 3550 != NULL) { 3551 if (cnt == 0) 3552 break; 3553 /* Place a segment header on every device block. */ 3554 if ((off % devbsize) == 0) { 3555 jseg_write(ump, jseg, data); 3556 off += JREC_SIZE; 3557 data = bp->b_data + off; 3558 } 3559 if (wk == needwk) 3560 needwk = NULL; 3561 remove_from_journal(wk); 3562 wk->wk_state |= INPROGRESS; 3563 WORKLIST_INSERT(&jseg->js_entries, wk); 3564 switch (wk->wk_type) { 3565 case D_JADDREF: 3566 jaddref_write(WK_JADDREF(wk), jseg, data); 3567 break; 3568 case D_JREMREF: 3569 jremref_write(WK_JREMREF(wk), jseg, data); 3570 break; 3571 case D_JMVREF: 3572 jmvref_write(WK_JMVREF(wk), jseg, data); 3573 break; 3574 case D_JNEWBLK: 3575 jnewblk_write(WK_JNEWBLK(wk), jseg, data); 3576 break; 3577 case D_JFREEBLK: 3578 jfreeblk_write(WK_JFREEBLK(wk), jseg, data); 3579 break; 3580 case D_JFREEFRAG: 3581 jfreefrag_write(WK_JFREEFRAG(wk), jseg, data); 3582 break; 3583 case D_JTRUNC: 3584 jtrunc_write(WK_JTRUNC(wk), jseg, data); 3585 break; 3586 case D_JFSYNC: 3587 jfsync_write(WK_JFSYNC(wk), jseg, data); 3588 break; 3589 default: 3590 panic("process_journal: Unknown type %s", 3591 TYPENAME(wk->wk_type)); 3592 /* NOTREACHED */ 3593 } 3594 off += JREC_SIZE; 3595 data = bp->b_data + off; 3596 cnt--; 3597 } 3598 3599 /* Clear any remaining space so we don't leak kernel data */ 3600 if (size > off) 3601 bzero(data, size - off); 3602 3603 /* 3604 * Write this one buffer and continue. 3605 */ 3606 segwritten = 1; 3607 jblocks->jb_needseg = 0; 3608 WORKLIST_INSERT(&bp->b_dep, &jseg->js_list); 3609 FREE_LOCK(ump); 3610 bp->b_xflags |= BX_CVTENXIO; 3611 pbgetvp(ump->um_devvp, bp); 3612 /* 3613 * We only do the blocking wait once we find the journal 3614 * entry we're looking for. 3615 */ 3616 if (needwk == NULL && flags == MNT_WAIT) 3617 bwrite(bp); 3618 else 3619 bawrite(bp); 3620 ACQUIRE_LOCK(ump); 3621 } 3622 /* 3623 * If we wrote a segment issue a synchronize cache so the journal 3624 * is reflected on disk before the data is written. Since reclaiming 3625 * journal space also requires writing a journal record this 3626 * process also enforces a barrier before reclamation. 3627 */ 3628 if (segwritten && shouldflush) { 3629 softdep_synchronize(bio, ump, 3630 TAILQ_LAST(&jblocks->jb_segs, jseglst)); 3631 } else if (bio) 3632 g_destroy_bio(bio); 3633 /* 3634 * If we've suspended the filesystem because we ran out of journal 3635 * space either try to sync it here to make some progress or 3636 * unsuspend it if we already have. 3637 */ 3638 if (flags == 0 && jblocks->jb_suspended) { 3639 if (journal_unsuspend(ump)) 3640 return; 3641 FREE_LOCK(ump); 3642 VFS_SYNC(mp, MNT_NOWAIT); 3643 ffs_sbupdate(ump, MNT_WAIT, 0); 3644 ACQUIRE_LOCK(ump); 3645 } 3646 } 3647 3648 /* 3649 * Complete a jseg, allowing all dependencies awaiting journal writes 3650 * to proceed. Each journal dependency also attaches a jsegdep to dependent 3651 * structures so that the journal segment can be freed to reclaim space. 3652 */ 3653 static void 3654 complete_jseg(jseg) 3655 struct jseg *jseg; 3656 { 3657 struct worklist *wk; 3658 struct jmvref *jmvref; 3659 #ifdef INVARIANTS 3660 int i = 0; 3661 #endif 3662 3663 while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) { 3664 WORKLIST_REMOVE(wk); 3665 wk->wk_state &= ~INPROGRESS; 3666 wk->wk_state |= COMPLETE; 3667 KASSERT(i++ < jseg->js_cnt, 3668 ("handle_written_jseg: overflow %d >= %d", 3669 i - 1, jseg->js_cnt)); 3670 switch (wk->wk_type) { 3671 case D_JADDREF: 3672 handle_written_jaddref(WK_JADDREF(wk)); 3673 break; 3674 case D_JREMREF: 3675 handle_written_jremref(WK_JREMREF(wk)); 3676 break; 3677 case D_JMVREF: 3678 rele_jseg(jseg); /* No jsegdep. */ 3679 jmvref = WK_JMVREF(wk); 3680 LIST_REMOVE(jmvref, jm_deps); 3681 if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0) 3682 free_pagedep(jmvref->jm_pagedep); 3683 WORKITEM_FREE(jmvref, D_JMVREF); 3684 break; 3685 case D_JNEWBLK: 3686 handle_written_jnewblk(WK_JNEWBLK(wk)); 3687 break; 3688 case D_JFREEBLK: 3689 handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep); 3690 break; 3691 case D_JTRUNC: 3692 handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep); 3693 break; 3694 case D_JFSYNC: 3695 rele_jseg(jseg); /* No jsegdep. */ 3696 WORKITEM_FREE(wk, D_JFSYNC); 3697 break; 3698 case D_JFREEFRAG: 3699 handle_written_jfreefrag(WK_JFREEFRAG(wk)); 3700 break; 3701 default: 3702 panic("handle_written_jseg: Unknown type %s", 3703 TYPENAME(wk->wk_type)); 3704 /* NOTREACHED */ 3705 } 3706 } 3707 /* Release the self reference so the structure may be freed. */ 3708 rele_jseg(jseg); 3709 } 3710 3711 /* 3712 * Determine which jsegs are ready for completion processing. Waits for 3713 * synchronize cache to complete as well as forcing in-order completion 3714 * of journal entries. 3715 */ 3716 static void 3717 complete_jsegs(jseg) 3718 struct jseg *jseg; 3719 { 3720 struct jblocks *jblocks; 3721 struct jseg *jsegn; 3722 3723 jblocks = jseg->js_jblocks; 3724 /* 3725 * Don't allow out of order completions. If this isn't the first 3726 * block wait for it to write before we're done. 3727 */ 3728 if (jseg != jblocks->jb_writeseg) 3729 return; 3730 /* Iterate through available jsegs processing their entries. */ 3731 while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) { 3732 jblocks->jb_oldestwrseq = jseg->js_oldseq; 3733 jsegn = TAILQ_NEXT(jseg, js_next); 3734 complete_jseg(jseg); 3735 jseg = jsegn; 3736 } 3737 jblocks->jb_writeseg = jseg; 3738 /* 3739 * Attempt to free jsegs now that oldestwrseq may have advanced. 3740 */ 3741 free_jsegs(jblocks); 3742 } 3743 3744 /* 3745 * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle 3746 * the final completions. 3747 */ 3748 static void 3749 handle_written_jseg(jseg, bp) 3750 struct jseg *jseg; 3751 struct buf *bp; 3752 { 3753 3754 if (jseg->js_refs == 0) 3755 panic("handle_written_jseg: No self-reference on %p", jseg); 3756 jseg->js_state |= DEPCOMPLETE; 3757 /* 3758 * We'll never need this buffer again, set flags so it will be 3759 * discarded. 3760 */ 3761 bp->b_flags |= B_INVAL | B_NOCACHE; 3762 pbrelvp(bp); 3763 complete_jsegs(jseg); 3764 } 3765 3766 static inline struct jsegdep * 3767 inoref_jseg(inoref) 3768 struct inoref *inoref; 3769 { 3770 struct jsegdep *jsegdep; 3771 3772 jsegdep = inoref->if_jsegdep; 3773 inoref->if_jsegdep = NULL; 3774 3775 return (jsegdep); 3776 } 3777 3778 /* 3779 * Called once a jremref has made it to stable store. The jremref is marked 3780 * complete and we attempt to free it. Any pagedeps writes sleeping waiting 3781 * for the jremref to complete will be awoken by free_jremref. 3782 */ 3783 static void 3784 handle_written_jremref(jremref) 3785 struct jremref *jremref; 3786 { 3787 struct inodedep *inodedep; 3788 struct jsegdep *jsegdep; 3789 struct dirrem *dirrem; 3790 3791 /* Grab the jsegdep. */ 3792 jsegdep = inoref_jseg(&jremref->jr_ref); 3793 /* 3794 * Remove us from the inoref list. 3795 */ 3796 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 3797 0, &inodedep) == 0) 3798 panic("handle_written_jremref: Lost inodedep"); 3799 TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 3800 /* 3801 * Complete the dirrem. 3802 */ 3803 dirrem = jremref->jr_dirrem; 3804 jremref->jr_dirrem = NULL; 3805 LIST_REMOVE(jremref, jr_deps); 3806 jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT; 3807 jwork_insert(&dirrem->dm_jwork, jsegdep); 3808 if (LIST_EMPTY(&dirrem->dm_jremrefhd) && 3809 (dirrem->dm_state & COMPLETE) != 0) 3810 add_to_worklist(&dirrem->dm_list, 0); 3811 free_jremref(jremref); 3812 } 3813 3814 /* 3815 * Called once a jaddref has made it to stable store. The dependency is 3816 * marked complete and any dependent structures are added to the inode 3817 * bufwait list to be completed as soon as it is written. If a bitmap write 3818 * depends on this entry we move the inode into the inodedephd of the 3819 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap. 3820 */ 3821 static void 3822 handle_written_jaddref(jaddref) 3823 struct jaddref *jaddref; 3824 { 3825 struct jsegdep *jsegdep; 3826 struct inodedep *inodedep; 3827 struct diradd *diradd; 3828 struct mkdir *mkdir; 3829 3830 /* Grab the jsegdep. */ 3831 jsegdep = inoref_jseg(&jaddref->ja_ref); 3832 mkdir = NULL; 3833 diradd = NULL; 3834 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 3835 0, &inodedep) == 0) 3836 panic("handle_written_jaddref: Lost inodedep."); 3837 if (jaddref->ja_diradd == NULL) 3838 panic("handle_written_jaddref: No dependency"); 3839 if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) { 3840 diradd = jaddref->ja_diradd; 3841 WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list); 3842 } else if (jaddref->ja_state & MKDIR_PARENT) { 3843 mkdir = jaddref->ja_mkdir; 3844 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list); 3845 } else if (jaddref->ja_state & MKDIR_BODY) 3846 mkdir = jaddref->ja_mkdir; 3847 else 3848 panic("handle_written_jaddref: Unknown dependency %p", 3849 jaddref->ja_diradd); 3850 jaddref->ja_diradd = NULL; /* also clears ja_mkdir */ 3851 /* 3852 * Remove us from the inode list. 3853 */ 3854 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); 3855 /* 3856 * The mkdir may be waiting on the jaddref to clear before freeing. 3857 */ 3858 if (mkdir) { 3859 KASSERT(mkdir->md_list.wk_type == D_MKDIR, 3860 ("handle_written_jaddref: Incorrect type for mkdir %s", 3861 TYPENAME(mkdir->md_list.wk_type))); 3862 mkdir->md_jaddref = NULL; 3863 diradd = mkdir->md_diradd; 3864 mkdir->md_state |= DEPCOMPLETE; 3865 complete_mkdir(mkdir); 3866 } 3867 jwork_insert(&diradd->da_jwork, jsegdep); 3868 if (jaddref->ja_state & NEWBLOCK) { 3869 inodedep->id_state |= ONDEPLIST; 3870 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd, 3871 inodedep, id_deps); 3872 } 3873 free_jaddref(jaddref); 3874 } 3875 3876 /* 3877 * Called once a jnewblk journal is written. The allocdirect or allocindir 3878 * is placed in the bmsafemap to await notification of a written bitmap. If 3879 * the operation was canceled we add the segdep to the appropriate 3880 * dependency to free the journal space once the canceling operation 3881 * completes. 3882 */ 3883 static void 3884 handle_written_jnewblk(jnewblk) 3885 struct jnewblk *jnewblk; 3886 { 3887 struct bmsafemap *bmsafemap; 3888 struct freefrag *freefrag; 3889 struct freework *freework; 3890 struct jsegdep *jsegdep; 3891 struct newblk *newblk; 3892 3893 /* Grab the jsegdep. */ 3894 jsegdep = jnewblk->jn_jsegdep; 3895 jnewblk->jn_jsegdep = NULL; 3896 if (jnewblk->jn_dep == NULL) 3897 panic("handle_written_jnewblk: No dependency for the segdep."); 3898 switch (jnewblk->jn_dep->wk_type) { 3899 case D_NEWBLK: 3900 case D_ALLOCDIRECT: 3901 case D_ALLOCINDIR: 3902 /* 3903 * Add the written block to the bmsafemap so it can 3904 * be notified when the bitmap is on disk. 3905 */ 3906 newblk = WK_NEWBLK(jnewblk->jn_dep); 3907 newblk->nb_jnewblk = NULL; 3908 if ((newblk->nb_state & GOINGAWAY) == 0) { 3909 bmsafemap = newblk->nb_bmsafemap; 3910 newblk->nb_state |= ONDEPLIST; 3911 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, 3912 nb_deps); 3913 } 3914 jwork_insert(&newblk->nb_jwork, jsegdep); 3915 break; 3916 case D_FREEFRAG: 3917 /* 3918 * A newblock being removed by a freefrag when replaced by 3919 * frag extension. 3920 */ 3921 freefrag = WK_FREEFRAG(jnewblk->jn_dep); 3922 freefrag->ff_jdep = NULL; 3923 jwork_insert(&freefrag->ff_jwork, jsegdep); 3924 break; 3925 case D_FREEWORK: 3926 /* 3927 * A direct block was removed by truncate. 3928 */ 3929 freework = WK_FREEWORK(jnewblk->jn_dep); 3930 freework->fw_jnewblk = NULL; 3931 jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep); 3932 break; 3933 default: 3934 panic("handle_written_jnewblk: Unknown type %d.", 3935 jnewblk->jn_dep->wk_type); 3936 } 3937 jnewblk->jn_dep = NULL; 3938 free_jnewblk(jnewblk); 3939 } 3940 3941 /* 3942 * Cancel a jfreefrag that won't be needed, probably due to colliding with 3943 * an in-flight allocation that has not yet been committed. Divorce us 3944 * from the freefrag and mark it DEPCOMPLETE so that it may be added 3945 * to the worklist. 3946 */ 3947 static void 3948 cancel_jfreefrag(jfreefrag) 3949 struct jfreefrag *jfreefrag; 3950 { 3951 struct freefrag *freefrag; 3952 3953 if (jfreefrag->fr_jsegdep) { 3954 free_jsegdep(jfreefrag->fr_jsegdep); 3955 jfreefrag->fr_jsegdep = NULL; 3956 } 3957 freefrag = jfreefrag->fr_freefrag; 3958 jfreefrag->fr_freefrag = NULL; 3959 free_jfreefrag(jfreefrag); 3960 freefrag->ff_state |= DEPCOMPLETE; 3961 CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno); 3962 } 3963 3964 /* 3965 * Free a jfreefrag when the parent freefrag is rendered obsolete. 3966 */ 3967 static void 3968 free_jfreefrag(jfreefrag) 3969 struct jfreefrag *jfreefrag; 3970 { 3971 3972 if (jfreefrag->fr_state & INPROGRESS) 3973 WORKLIST_REMOVE(&jfreefrag->fr_list); 3974 else if (jfreefrag->fr_state & ONWORKLIST) 3975 remove_from_journal(&jfreefrag->fr_list); 3976 if (jfreefrag->fr_freefrag != NULL) 3977 panic("free_jfreefrag: Still attached to a freefrag."); 3978 WORKITEM_FREE(jfreefrag, D_JFREEFRAG); 3979 } 3980 3981 /* 3982 * Called when the journal write for a jfreefrag completes. The parent 3983 * freefrag is added to the worklist if this completes its dependencies. 3984 */ 3985 static void 3986 handle_written_jfreefrag(jfreefrag) 3987 struct jfreefrag *jfreefrag; 3988 { 3989 struct jsegdep *jsegdep; 3990 struct freefrag *freefrag; 3991 3992 /* Grab the jsegdep. */ 3993 jsegdep = jfreefrag->fr_jsegdep; 3994 jfreefrag->fr_jsegdep = NULL; 3995 freefrag = jfreefrag->fr_freefrag; 3996 if (freefrag == NULL) 3997 panic("handle_written_jfreefrag: No freefrag."); 3998 freefrag->ff_state |= DEPCOMPLETE; 3999 freefrag->ff_jdep = NULL; 4000 jwork_insert(&freefrag->ff_jwork, jsegdep); 4001 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 4002 add_to_worklist(&freefrag->ff_list, 0); 4003 jfreefrag->fr_freefrag = NULL; 4004 free_jfreefrag(jfreefrag); 4005 } 4006 4007 /* 4008 * Called when the journal write for a jfreeblk completes. The jfreeblk 4009 * is removed from the freeblks list of pending journal writes and the 4010 * jsegdep is moved to the freeblks jwork to be completed when all blocks 4011 * have been reclaimed. 4012 */ 4013 static void 4014 handle_written_jblkdep(jblkdep) 4015 struct jblkdep *jblkdep; 4016 { 4017 struct freeblks *freeblks; 4018 struct jsegdep *jsegdep; 4019 4020 /* Grab the jsegdep. */ 4021 jsegdep = jblkdep->jb_jsegdep; 4022 jblkdep->jb_jsegdep = NULL; 4023 freeblks = jblkdep->jb_freeblks; 4024 LIST_REMOVE(jblkdep, jb_deps); 4025 jwork_insert(&freeblks->fb_jwork, jsegdep); 4026 /* 4027 * If the freeblks is all journaled, we can add it to the worklist. 4028 */ 4029 if (LIST_EMPTY(&freeblks->fb_jblkdephd) && 4030 (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 4031 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 4032 4033 free_jblkdep(jblkdep); 4034 } 4035 4036 static struct jsegdep * 4037 newjsegdep(struct worklist *wk) 4038 { 4039 struct jsegdep *jsegdep; 4040 4041 jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS); 4042 workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp); 4043 jsegdep->jd_seg = NULL; 4044 4045 return (jsegdep); 4046 } 4047 4048 static struct jmvref * 4049 newjmvref(dp, ino, oldoff, newoff) 4050 struct inode *dp; 4051 ino_t ino; 4052 off_t oldoff; 4053 off_t newoff; 4054 { 4055 struct jmvref *jmvref; 4056 4057 jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS); 4058 workitem_alloc(&jmvref->jm_list, D_JMVREF, ITOVFS(dp)); 4059 jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE; 4060 jmvref->jm_parent = dp->i_number; 4061 jmvref->jm_ino = ino; 4062 jmvref->jm_oldoff = oldoff; 4063 jmvref->jm_newoff = newoff; 4064 4065 return (jmvref); 4066 } 4067 4068 /* 4069 * Allocate a new jremref that tracks the removal of ip from dp with the 4070 * directory entry offset of diroff. Mark the entry as ATTACHED and 4071 * DEPCOMPLETE as we have all the information required for the journal write 4072 * and the directory has already been removed from the buffer. The caller 4073 * is responsible for linking the jremref into the pagedep and adding it 4074 * to the journal to write. The MKDIR_PARENT flag is set if we're doing 4075 * a DOTDOT addition so handle_workitem_remove() can properly assign 4076 * the jsegdep when we're done. 4077 */ 4078 static struct jremref * 4079 newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip, 4080 off_t diroff, nlink_t nlink) 4081 { 4082 struct jremref *jremref; 4083 4084 jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS); 4085 workitem_alloc(&jremref->jr_list, D_JREMREF, ITOVFS(dp)); 4086 jremref->jr_state = ATTACHED; 4087 newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff, 4088 nlink, ip->i_mode); 4089 jremref->jr_dirrem = dirrem; 4090 4091 return (jremref); 4092 } 4093 4094 static inline void 4095 newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff, 4096 nlink_t nlink, uint16_t mode) 4097 { 4098 4099 inoref->if_jsegdep = newjsegdep(&inoref->if_list); 4100 inoref->if_diroff = diroff; 4101 inoref->if_ino = ino; 4102 inoref->if_parent = parent; 4103 inoref->if_nlink = nlink; 4104 inoref->if_mode = mode; 4105 } 4106 4107 /* 4108 * Allocate a new jaddref to track the addition of ino to dp at diroff. The 4109 * directory offset may not be known until later. The caller is responsible 4110 * adding the entry to the journal when this information is available. nlink 4111 * should be the link count prior to the addition and mode is only required 4112 * to have the correct FMT. 4113 */ 4114 static struct jaddref * 4115 newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink, 4116 uint16_t mode) 4117 { 4118 struct jaddref *jaddref; 4119 4120 jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS); 4121 workitem_alloc(&jaddref->ja_list, D_JADDREF, ITOVFS(dp)); 4122 jaddref->ja_state = ATTACHED; 4123 jaddref->ja_mkdir = NULL; 4124 newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode); 4125 4126 return (jaddref); 4127 } 4128 4129 /* 4130 * Create a new free dependency for a freework. The caller is responsible 4131 * for adjusting the reference count when it has the lock held. The freedep 4132 * will track an outstanding bitmap write that will ultimately clear the 4133 * freework to continue. 4134 */ 4135 static struct freedep * 4136 newfreedep(struct freework *freework) 4137 { 4138 struct freedep *freedep; 4139 4140 freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS); 4141 workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp); 4142 freedep->fd_freework = freework; 4143 4144 return (freedep); 4145 } 4146 4147 /* 4148 * Free a freedep structure once the buffer it is linked to is written. If 4149 * this is the last reference to the freework schedule it for completion. 4150 */ 4151 static void 4152 free_freedep(freedep) 4153 struct freedep *freedep; 4154 { 4155 struct freework *freework; 4156 4157 freework = freedep->fd_freework; 4158 freework->fw_freeblks->fb_cgwait--; 4159 if (--freework->fw_ref == 0) 4160 freework_enqueue(freework); 4161 WORKITEM_FREE(freedep, D_FREEDEP); 4162 } 4163 4164 /* 4165 * Allocate a new freework structure that may be a level in an indirect 4166 * when parent is not NULL or a top level block when it is. The top level 4167 * freework structures are allocated without the per-filesystem lock held 4168 * and before the freeblks is visible outside of softdep_setup_freeblocks(). 4169 */ 4170 static struct freework * 4171 newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal) 4172 struct ufsmount *ump; 4173 struct freeblks *freeblks; 4174 struct freework *parent; 4175 ufs_lbn_t lbn; 4176 ufs2_daddr_t nb; 4177 int frags; 4178 int off; 4179 int journal; 4180 { 4181 struct freework *freework; 4182 4183 freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS); 4184 workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp); 4185 freework->fw_state = ATTACHED; 4186 freework->fw_jnewblk = NULL; 4187 freework->fw_freeblks = freeblks; 4188 freework->fw_parent = parent; 4189 freework->fw_lbn = lbn; 4190 freework->fw_blkno = nb; 4191 freework->fw_frags = frags; 4192 freework->fw_indir = NULL; 4193 freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || 4194 lbn >= -UFS_NXADDR) ? 0 : NINDIR(ump->um_fs) + 1; 4195 freework->fw_start = freework->fw_off = off; 4196 if (journal) 4197 newjfreeblk(freeblks, lbn, nb, frags); 4198 if (parent == NULL) { 4199 ACQUIRE_LOCK(ump); 4200 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 4201 freeblks->fb_ref++; 4202 FREE_LOCK(ump); 4203 } 4204 4205 return (freework); 4206 } 4207 4208 /* 4209 * Eliminate a jfreeblk for a block that does not need journaling. 4210 */ 4211 static void 4212 cancel_jfreeblk(freeblks, blkno) 4213 struct freeblks *freeblks; 4214 ufs2_daddr_t blkno; 4215 { 4216 struct jfreeblk *jfreeblk; 4217 struct jblkdep *jblkdep; 4218 4219 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) { 4220 if (jblkdep->jb_list.wk_type != D_JFREEBLK) 4221 continue; 4222 jfreeblk = WK_JFREEBLK(&jblkdep->jb_list); 4223 if (jfreeblk->jf_blkno == blkno) 4224 break; 4225 } 4226 if (jblkdep == NULL) 4227 return; 4228 CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno); 4229 free_jsegdep(jblkdep->jb_jsegdep); 4230 LIST_REMOVE(jblkdep, jb_deps); 4231 WORKITEM_FREE(jfreeblk, D_JFREEBLK); 4232 } 4233 4234 /* 4235 * Allocate a new jfreeblk to journal top level block pointer when truncating 4236 * a file. The caller must add this to the worklist when the per-filesystem 4237 * lock is held. 4238 */ 4239 static struct jfreeblk * 4240 newjfreeblk(freeblks, lbn, blkno, frags) 4241 struct freeblks *freeblks; 4242 ufs_lbn_t lbn; 4243 ufs2_daddr_t blkno; 4244 int frags; 4245 { 4246 struct jfreeblk *jfreeblk; 4247 4248 jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS); 4249 workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK, 4250 freeblks->fb_list.wk_mp); 4251 jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list); 4252 jfreeblk->jf_dep.jb_freeblks = freeblks; 4253 jfreeblk->jf_ino = freeblks->fb_inum; 4254 jfreeblk->jf_lbn = lbn; 4255 jfreeblk->jf_blkno = blkno; 4256 jfreeblk->jf_frags = frags; 4257 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps); 4258 4259 return (jfreeblk); 4260 } 4261 4262 /* 4263 * The journal is only prepared to handle full-size block numbers, so we 4264 * have to adjust the record to reflect the change to a full-size block. 4265 * For example, suppose we have a block made up of fragments 8-15 and 4266 * want to free its last two fragments. We are given a request that says: 4267 * FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0 4268 * where frags are the number of fragments to free and oldfrags are the 4269 * number of fragments to keep. To block align it, we have to change it to 4270 * have a valid full-size blkno, so it becomes: 4271 * FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6 4272 */ 4273 static void 4274 adjust_newfreework(freeblks, frag_offset) 4275 struct freeblks *freeblks; 4276 int frag_offset; 4277 { 4278 struct jfreeblk *jfreeblk; 4279 4280 KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL && 4281 LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK), 4282 ("adjust_newfreework: Missing freeblks dependency")); 4283 4284 jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd)); 4285 jfreeblk->jf_blkno -= frag_offset; 4286 jfreeblk->jf_frags += frag_offset; 4287 } 4288 4289 /* 4290 * Allocate a new jtrunc to track a partial truncation. 4291 */ 4292 static struct jtrunc * 4293 newjtrunc(freeblks, size, extsize) 4294 struct freeblks *freeblks; 4295 off_t size; 4296 int extsize; 4297 { 4298 struct jtrunc *jtrunc; 4299 4300 jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS); 4301 workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC, 4302 freeblks->fb_list.wk_mp); 4303 jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list); 4304 jtrunc->jt_dep.jb_freeblks = freeblks; 4305 jtrunc->jt_ino = freeblks->fb_inum; 4306 jtrunc->jt_size = size; 4307 jtrunc->jt_extsize = extsize; 4308 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps); 4309 4310 return (jtrunc); 4311 } 4312 4313 /* 4314 * If we're canceling a new bitmap we have to search for another ref 4315 * to move into the bmsafemap dep. This might be better expressed 4316 * with another structure. 4317 */ 4318 static void 4319 move_newblock_dep(jaddref, inodedep) 4320 struct jaddref *jaddref; 4321 struct inodedep *inodedep; 4322 { 4323 struct inoref *inoref; 4324 struct jaddref *jaddrefn; 4325 4326 jaddrefn = NULL; 4327 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4328 inoref = TAILQ_NEXT(inoref, if_deps)) { 4329 if ((jaddref->ja_state & NEWBLOCK) && 4330 inoref->if_list.wk_type == D_JADDREF) { 4331 jaddrefn = (struct jaddref *)inoref; 4332 break; 4333 } 4334 } 4335 if (jaddrefn == NULL) 4336 return; 4337 jaddrefn->ja_state &= ~(ATTACHED | UNDONE); 4338 jaddrefn->ja_state |= jaddref->ja_state & 4339 (ATTACHED | UNDONE | NEWBLOCK); 4340 jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK); 4341 jaddref->ja_state |= ATTACHED; 4342 LIST_REMOVE(jaddref, ja_bmdeps); 4343 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn, 4344 ja_bmdeps); 4345 } 4346 4347 /* 4348 * Cancel a jaddref either before it has been written or while it is being 4349 * written. This happens when a link is removed before the add reaches 4350 * the disk. The jaddref dependency is kept linked into the bmsafemap 4351 * and inode to prevent the link count or bitmap from reaching the disk 4352 * until handle_workitem_remove() re-adjusts the counts and bitmaps as 4353 * required. 4354 * 4355 * Returns 1 if the canceled addref requires journaling of the remove and 4356 * 0 otherwise. 4357 */ 4358 static int 4359 cancel_jaddref(jaddref, inodedep, wkhd) 4360 struct jaddref *jaddref; 4361 struct inodedep *inodedep; 4362 struct workhead *wkhd; 4363 { 4364 struct inoref *inoref; 4365 struct jsegdep *jsegdep; 4366 int needsj; 4367 4368 KASSERT((jaddref->ja_state & COMPLETE) == 0, 4369 ("cancel_jaddref: Canceling complete jaddref")); 4370 if (jaddref->ja_state & (INPROGRESS | COMPLETE)) 4371 needsj = 1; 4372 else 4373 needsj = 0; 4374 if (inodedep == NULL) 4375 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 4376 0, &inodedep) == 0) 4377 panic("cancel_jaddref: Lost inodedep"); 4378 /* 4379 * We must adjust the nlink of any reference operation that follows 4380 * us so that it is consistent with the in-memory reference. This 4381 * ensures that inode nlink rollbacks always have the correct link. 4382 */ 4383 if (needsj == 0) { 4384 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4385 inoref = TAILQ_NEXT(inoref, if_deps)) { 4386 if (inoref->if_state & GOINGAWAY) 4387 break; 4388 inoref->if_nlink--; 4389 } 4390 } 4391 jsegdep = inoref_jseg(&jaddref->ja_ref); 4392 if (jaddref->ja_state & NEWBLOCK) 4393 move_newblock_dep(jaddref, inodedep); 4394 wake_worklist(&jaddref->ja_list); 4395 jaddref->ja_mkdir = NULL; 4396 if (jaddref->ja_state & INPROGRESS) { 4397 jaddref->ja_state &= ~INPROGRESS; 4398 WORKLIST_REMOVE(&jaddref->ja_list); 4399 jwork_insert(wkhd, jsegdep); 4400 } else { 4401 free_jsegdep(jsegdep); 4402 if (jaddref->ja_state & DEPCOMPLETE) 4403 remove_from_journal(&jaddref->ja_list); 4404 } 4405 jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE); 4406 /* 4407 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove 4408 * can arrange for them to be freed with the bitmap. Otherwise we 4409 * no longer need this addref attached to the inoreflst and it 4410 * will incorrectly adjust nlink if we leave it. 4411 */ 4412 if ((jaddref->ja_state & NEWBLOCK) == 0) { 4413 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 4414 if_deps); 4415 jaddref->ja_state |= COMPLETE; 4416 free_jaddref(jaddref); 4417 return (needsj); 4418 } 4419 /* 4420 * Leave the head of the list for jsegdeps for fast merging. 4421 */ 4422 if (LIST_FIRST(wkhd) != NULL) { 4423 jaddref->ja_state |= ONWORKLIST; 4424 LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list); 4425 } else 4426 WORKLIST_INSERT(wkhd, &jaddref->ja_list); 4427 4428 return (needsj); 4429 } 4430 4431 /* 4432 * Attempt to free a jaddref structure when some work completes. This 4433 * should only succeed once the entry is written and all dependencies have 4434 * been notified. 4435 */ 4436 static void 4437 free_jaddref(jaddref) 4438 struct jaddref *jaddref; 4439 { 4440 4441 if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE) 4442 return; 4443 if (jaddref->ja_ref.if_jsegdep) 4444 panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n", 4445 jaddref, jaddref->ja_state); 4446 if (jaddref->ja_state & NEWBLOCK) 4447 LIST_REMOVE(jaddref, ja_bmdeps); 4448 if (jaddref->ja_state & (INPROGRESS | ONWORKLIST)) 4449 panic("free_jaddref: Bad state %p(0x%X)", 4450 jaddref, jaddref->ja_state); 4451 if (jaddref->ja_mkdir != NULL) 4452 panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state); 4453 WORKITEM_FREE(jaddref, D_JADDREF); 4454 } 4455 4456 /* 4457 * Free a jremref structure once it has been written or discarded. 4458 */ 4459 static void 4460 free_jremref(jremref) 4461 struct jremref *jremref; 4462 { 4463 4464 if (jremref->jr_ref.if_jsegdep) 4465 free_jsegdep(jremref->jr_ref.if_jsegdep); 4466 if (jremref->jr_state & INPROGRESS) 4467 panic("free_jremref: IO still pending"); 4468 WORKITEM_FREE(jremref, D_JREMREF); 4469 } 4470 4471 /* 4472 * Free a jnewblk structure. 4473 */ 4474 static void 4475 free_jnewblk(jnewblk) 4476 struct jnewblk *jnewblk; 4477 { 4478 4479 if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE) 4480 return; 4481 LIST_REMOVE(jnewblk, jn_deps); 4482 if (jnewblk->jn_dep != NULL) 4483 panic("free_jnewblk: Dependency still attached."); 4484 WORKITEM_FREE(jnewblk, D_JNEWBLK); 4485 } 4486 4487 /* 4488 * Cancel a jnewblk which has been been made redundant by frag extension. 4489 */ 4490 static void 4491 cancel_jnewblk(jnewblk, wkhd) 4492 struct jnewblk *jnewblk; 4493 struct workhead *wkhd; 4494 { 4495 struct jsegdep *jsegdep; 4496 4497 CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno); 4498 jsegdep = jnewblk->jn_jsegdep; 4499 if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL) 4500 panic("cancel_jnewblk: Invalid state"); 4501 jnewblk->jn_jsegdep = NULL; 4502 jnewblk->jn_dep = NULL; 4503 jnewblk->jn_state |= GOINGAWAY; 4504 if (jnewblk->jn_state & INPROGRESS) { 4505 jnewblk->jn_state &= ~INPROGRESS; 4506 WORKLIST_REMOVE(&jnewblk->jn_list); 4507 jwork_insert(wkhd, jsegdep); 4508 } else { 4509 free_jsegdep(jsegdep); 4510 remove_from_journal(&jnewblk->jn_list); 4511 } 4512 wake_worklist(&jnewblk->jn_list); 4513 WORKLIST_INSERT(wkhd, &jnewblk->jn_list); 4514 } 4515 4516 static void 4517 free_jblkdep(jblkdep) 4518 struct jblkdep *jblkdep; 4519 { 4520 4521 if (jblkdep->jb_list.wk_type == D_JFREEBLK) 4522 WORKITEM_FREE(jblkdep, D_JFREEBLK); 4523 else if (jblkdep->jb_list.wk_type == D_JTRUNC) 4524 WORKITEM_FREE(jblkdep, D_JTRUNC); 4525 else 4526 panic("free_jblkdep: Unexpected type %s", 4527 TYPENAME(jblkdep->jb_list.wk_type)); 4528 } 4529 4530 /* 4531 * Free a single jseg once it is no longer referenced in memory or on 4532 * disk. Reclaim journal blocks and dependencies waiting for the segment 4533 * to disappear. 4534 */ 4535 static void 4536 free_jseg(jseg, jblocks) 4537 struct jseg *jseg; 4538 struct jblocks *jblocks; 4539 { 4540 struct freework *freework; 4541 4542 /* 4543 * Free freework structures that were lingering to indicate freed 4544 * indirect blocks that forced journal write ordering on reallocate. 4545 */ 4546 while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL) 4547 indirblk_remove(freework); 4548 if (jblocks->jb_oldestseg == jseg) 4549 jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next); 4550 TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next); 4551 jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size); 4552 KASSERT(LIST_EMPTY(&jseg->js_entries), 4553 ("free_jseg: Freed jseg has valid entries.")); 4554 WORKITEM_FREE(jseg, D_JSEG); 4555 } 4556 4557 /* 4558 * Free all jsegs that meet the criteria for being reclaimed and update 4559 * oldestseg. 4560 */ 4561 static void 4562 free_jsegs(jblocks) 4563 struct jblocks *jblocks; 4564 { 4565 struct jseg *jseg; 4566 4567 /* 4568 * Free only those jsegs which have none allocated before them to 4569 * preserve the journal space ordering. 4570 */ 4571 while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) { 4572 /* 4573 * Only reclaim space when nothing depends on this journal 4574 * set and another set has written that it is no longer 4575 * valid. 4576 */ 4577 if (jseg->js_refs != 0) { 4578 jblocks->jb_oldestseg = jseg; 4579 return; 4580 } 4581 if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE) 4582 break; 4583 if (jseg->js_seq > jblocks->jb_oldestwrseq) 4584 break; 4585 /* 4586 * We can free jsegs that didn't write entries when 4587 * oldestwrseq == js_seq. 4588 */ 4589 if (jseg->js_seq == jblocks->jb_oldestwrseq && 4590 jseg->js_cnt != 0) 4591 break; 4592 free_jseg(jseg, jblocks); 4593 } 4594 /* 4595 * If we exited the loop above we still must discover the 4596 * oldest valid segment. 4597 */ 4598 if (jseg) 4599 for (jseg = jblocks->jb_oldestseg; jseg != NULL; 4600 jseg = TAILQ_NEXT(jseg, js_next)) 4601 if (jseg->js_refs != 0) 4602 break; 4603 jblocks->jb_oldestseg = jseg; 4604 /* 4605 * The journal has no valid records but some jsegs may still be 4606 * waiting on oldestwrseq to advance. We force a small record 4607 * out to permit these lingering records to be reclaimed. 4608 */ 4609 if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs)) 4610 jblocks->jb_needseg = 1; 4611 } 4612 4613 /* 4614 * Release one reference to a jseg and free it if the count reaches 0. This 4615 * should eventually reclaim journal space as well. 4616 */ 4617 static void 4618 rele_jseg(jseg) 4619 struct jseg *jseg; 4620 { 4621 4622 KASSERT(jseg->js_refs > 0, 4623 ("free_jseg: Invalid refcnt %d", jseg->js_refs)); 4624 if (--jseg->js_refs != 0) 4625 return; 4626 free_jsegs(jseg->js_jblocks); 4627 } 4628 4629 /* 4630 * Release a jsegdep and decrement the jseg count. 4631 */ 4632 static void 4633 free_jsegdep(jsegdep) 4634 struct jsegdep *jsegdep; 4635 { 4636 4637 if (jsegdep->jd_seg) 4638 rele_jseg(jsegdep->jd_seg); 4639 WORKITEM_FREE(jsegdep, D_JSEGDEP); 4640 } 4641 4642 /* 4643 * Wait for a journal item to make it to disk. Initiate journal processing 4644 * if required. 4645 */ 4646 static int 4647 jwait(wk, waitfor) 4648 struct worklist *wk; 4649 int waitfor; 4650 { 4651 4652 LOCK_OWNED(VFSTOUFS(wk->wk_mp)); 4653 /* 4654 * Blocking journal waits cause slow synchronous behavior. Record 4655 * stats on the frequency of these blocking operations. 4656 */ 4657 if (waitfor == MNT_WAIT) { 4658 stat_journal_wait++; 4659 switch (wk->wk_type) { 4660 case D_JREMREF: 4661 case D_JMVREF: 4662 stat_jwait_filepage++; 4663 break; 4664 case D_JTRUNC: 4665 case D_JFREEBLK: 4666 stat_jwait_freeblks++; 4667 break; 4668 case D_JNEWBLK: 4669 stat_jwait_newblk++; 4670 break; 4671 case D_JADDREF: 4672 stat_jwait_inode++; 4673 break; 4674 default: 4675 break; 4676 } 4677 } 4678 /* 4679 * If IO has not started we process the journal. We can't mark the 4680 * worklist item as IOWAITING because we drop the lock while 4681 * processing the journal and the worklist entry may be freed after 4682 * this point. The caller may call back in and re-issue the request. 4683 */ 4684 if ((wk->wk_state & INPROGRESS) == 0) { 4685 softdep_process_journal(wk->wk_mp, wk, waitfor); 4686 if (waitfor != MNT_WAIT) 4687 return (EBUSY); 4688 return (0); 4689 } 4690 if (waitfor != MNT_WAIT) 4691 return (EBUSY); 4692 wait_worklist(wk, "jwait"); 4693 return (0); 4694 } 4695 4696 /* 4697 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as 4698 * appropriate. This is a convenience function to reduce duplicate code 4699 * for the setup and revert functions below. 4700 */ 4701 static struct inodedep * 4702 inodedep_lookup_ip(ip) 4703 struct inode *ip; 4704 { 4705 struct inodedep *inodedep; 4706 4707 KASSERT(ip->i_nlink >= ip->i_effnlink, 4708 ("inodedep_lookup_ip: bad delta")); 4709 (void) inodedep_lookup(ITOVFS(ip), ip->i_number, DEPALLOC, 4710 &inodedep); 4711 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 4712 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 4713 4714 return (inodedep); 4715 } 4716 4717 /* 4718 * Called prior to creating a new inode and linking it to a directory. The 4719 * jaddref structure must already be allocated by softdep_setup_inomapdep 4720 * and it is discovered here so we can initialize the mode and update 4721 * nlinkdelta. 4722 */ 4723 void 4724 softdep_setup_create(dp, ip) 4725 struct inode *dp; 4726 struct inode *ip; 4727 { 4728 struct inodedep *inodedep; 4729 struct jaddref *jaddref; 4730 struct vnode *dvp; 4731 4732 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4733 ("softdep_setup_create called on non-softdep filesystem")); 4734 KASSERT(ip->i_nlink == 1, 4735 ("softdep_setup_create: Invalid link count.")); 4736 dvp = ITOV(dp); 4737 ACQUIRE_LOCK(ITOUMP(dp)); 4738 inodedep = inodedep_lookup_ip(ip); 4739 if (DOINGSUJ(dvp)) { 4740 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4741 inoreflst); 4742 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 4743 ("softdep_setup_create: No addref structure present.")); 4744 } 4745 softdep_prelink(dvp, NULL); 4746 FREE_LOCK(ITOUMP(dp)); 4747 } 4748 4749 /* 4750 * Create a jaddref structure to track the addition of a DOTDOT link when 4751 * we are reparenting an inode as part of a rename. This jaddref will be 4752 * found by softdep_setup_directory_change. Adjusts nlinkdelta for 4753 * non-journaling softdep. 4754 */ 4755 void 4756 softdep_setup_dotdot_link(dp, ip) 4757 struct inode *dp; 4758 struct inode *ip; 4759 { 4760 struct inodedep *inodedep; 4761 struct jaddref *jaddref; 4762 struct vnode *dvp; 4763 4764 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4765 ("softdep_setup_dotdot_link called on non-softdep filesystem")); 4766 dvp = ITOV(dp); 4767 jaddref = NULL; 4768 /* 4769 * We don't set MKDIR_PARENT as this is not tied to a mkdir and 4770 * is used as a normal link would be. 4771 */ 4772 if (DOINGSUJ(dvp)) 4773 jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4774 dp->i_effnlink - 1, dp->i_mode); 4775 ACQUIRE_LOCK(ITOUMP(dp)); 4776 inodedep = inodedep_lookup_ip(dp); 4777 if (jaddref) 4778 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4779 if_deps); 4780 softdep_prelink(dvp, ITOV(ip)); 4781 FREE_LOCK(ITOUMP(dp)); 4782 } 4783 4784 /* 4785 * Create a jaddref structure to track a new link to an inode. The directory 4786 * offset is not known until softdep_setup_directory_add or 4787 * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling 4788 * softdep. 4789 */ 4790 void 4791 softdep_setup_link(dp, ip) 4792 struct inode *dp; 4793 struct inode *ip; 4794 { 4795 struct inodedep *inodedep; 4796 struct jaddref *jaddref; 4797 struct vnode *dvp; 4798 4799 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4800 ("softdep_setup_link called on non-softdep filesystem")); 4801 dvp = ITOV(dp); 4802 jaddref = NULL; 4803 if (DOINGSUJ(dvp)) 4804 jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1, 4805 ip->i_mode); 4806 ACQUIRE_LOCK(ITOUMP(dp)); 4807 inodedep = inodedep_lookup_ip(ip); 4808 if (jaddref) 4809 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4810 if_deps); 4811 softdep_prelink(dvp, ITOV(ip)); 4812 FREE_LOCK(ITOUMP(dp)); 4813 } 4814 4815 /* 4816 * Called to create the jaddref structures to track . and .. references as 4817 * well as lookup and further initialize the incomplete jaddref created 4818 * by softdep_setup_inomapdep when the inode was allocated. Adjusts 4819 * nlinkdelta for non-journaling softdep. 4820 */ 4821 void 4822 softdep_setup_mkdir(dp, ip) 4823 struct inode *dp; 4824 struct inode *ip; 4825 { 4826 struct inodedep *inodedep; 4827 struct jaddref *dotdotaddref; 4828 struct jaddref *dotaddref; 4829 struct jaddref *jaddref; 4830 struct vnode *dvp; 4831 4832 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4833 ("softdep_setup_mkdir called on non-softdep filesystem")); 4834 dvp = ITOV(dp); 4835 dotaddref = dotdotaddref = NULL; 4836 if (DOINGSUJ(dvp)) { 4837 dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1, 4838 ip->i_mode); 4839 dotaddref->ja_state |= MKDIR_BODY; 4840 dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4841 dp->i_effnlink - 1, dp->i_mode); 4842 dotdotaddref->ja_state |= MKDIR_PARENT; 4843 } 4844 ACQUIRE_LOCK(ITOUMP(dp)); 4845 inodedep = inodedep_lookup_ip(ip); 4846 if (DOINGSUJ(dvp)) { 4847 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4848 inoreflst); 4849 KASSERT(jaddref != NULL, 4850 ("softdep_setup_mkdir: No addref structure present.")); 4851 KASSERT(jaddref->ja_parent == dp->i_number, 4852 ("softdep_setup_mkdir: bad parent %ju", 4853 (uintmax_t)jaddref->ja_parent)); 4854 TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref, 4855 if_deps); 4856 } 4857 inodedep = inodedep_lookup_ip(dp); 4858 if (DOINGSUJ(dvp)) 4859 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, 4860 &dotdotaddref->ja_ref, if_deps); 4861 softdep_prelink(ITOV(dp), NULL); 4862 FREE_LOCK(ITOUMP(dp)); 4863 } 4864 4865 /* 4866 * Called to track nlinkdelta of the inode and parent directories prior to 4867 * unlinking a directory. 4868 */ 4869 void 4870 softdep_setup_rmdir(dp, ip) 4871 struct inode *dp; 4872 struct inode *ip; 4873 { 4874 struct vnode *dvp; 4875 4876 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4877 ("softdep_setup_rmdir called on non-softdep filesystem")); 4878 dvp = ITOV(dp); 4879 ACQUIRE_LOCK(ITOUMP(dp)); 4880 (void) inodedep_lookup_ip(ip); 4881 (void) inodedep_lookup_ip(dp); 4882 softdep_prelink(dvp, ITOV(ip)); 4883 FREE_LOCK(ITOUMP(dp)); 4884 } 4885 4886 /* 4887 * Called to track nlinkdelta of the inode and parent directories prior to 4888 * unlink. 4889 */ 4890 void 4891 softdep_setup_unlink(dp, ip) 4892 struct inode *dp; 4893 struct inode *ip; 4894 { 4895 struct vnode *dvp; 4896 4897 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4898 ("softdep_setup_unlink called on non-softdep filesystem")); 4899 dvp = ITOV(dp); 4900 ACQUIRE_LOCK(ITOUMP(dp)); 4901 (void) inodedep_lookup_ip(ip); 4902 (void) inodedep_lookup_ip(dp); 4903 softdep_prelink(dvp, ITOV(ip)); 4904 FREE_LOCK(ITOUMP(dp)); 4905 } 4906 4907 /* 4908 * Called to release the journal structures created by a failed non-directory 4909 * creation. Adjusts nlinkdelta for non-journaling softdep. 4910 */ 4911 void 4912 softdep_revert_create(dp, ip) 4913 struct inode *dp; 4914 struct inode *ip; 4915 { 4916 struct inodedep *inodedep; 4917 struct jaddref *jaddref; 4918 struct vnode *dvp; 4919 4920 KASSERT(MOUNTEDSOFTDEP(ITOVFS((dp))) != 0, 4921 ("softdep_revert_create called on non-softdep filesystem")); 4922 dvp = ITOV(dp); 4923 ACQUIRE_LOCK(ITOUMP(dp)); 4924 inodedep = inodedep_lookup_ip(ip); 4925 if (DOINGSUJ(dvp)) { 4926 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4927 inoreflst); 4928 KASSERT(jaddref->ja_parent == dp->i_number, 4929 ("softdep_revert_create: addref parent mismatch")); 4930 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4931 } 4932 FREE_LOCK(ITOUMP(dp)); 4933 } 4934 4935 /* 4936 * Called to release the journal structures created by a failed link 4937 * addition. Adjusts nlinkdelta for non-journaling softdep. 4938 */ 4939 void 4940 softdep_revert_link(dp, ip) 4941 struct inode *dp; 4942 struct inode *ip; 4943 { 4944 struct inodedep *inodedep; 4945 struct jaddref *jaddref; 4946 struct vnode *dvp; 4947 4948 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4949 ("softdep_revert_link called on non-softdep filesystem")); 4950 dvp = ITOV(dp); 4951 ACQUIRE_LOCK(ITOUMP(dp)); 4952 inodedep = inodedep_lookup_ip(ip); 4953 if (DOINGSUJ(dvp)) { 4954 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4955 inoreflst); 4956 KASSERT(jaddref->ja_parent == dp->i_number, 4957 ("softdep_revert_link: addref parent mismatch")); 4958 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4959 } 4960 FREE_LOCK(ITOUMP(dp)); 4961 } 4962 4963 /* 4964 * Called to release the journal structures created by a failed mkdir 4965 * attempt. Adjusts nlinkdelta for non-journaling softdep. 4966 */ 4967 void 4968 softdep_revert_mkdir(dp, ip) 4969 struct inode *dp; 4970 struct inode *ip; 4971 { 4972 struct inodedep *inodedep; 4973 struct jaddref *jaddref; 4974 struct jaddref *dotaddref; 4975 struct vnode *dvp; 4976 4977 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 4978 ("softdep_revert_mkdir called on non-softdep filesystem")); 4979 dvp = ITOV(dp); 4980 4981 ACQUIRE_LOCK(ITOUMP(dp)); 4982 inodedep = inodedep_lookup_ip(dp); 4983 if (DOINGSUJ(dvp)) { 4984 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4985 inoreflst); 4986 KASSERT(jaddref->ja_parent == ip->i_number, 4987 ("softdep_revert_mkdir: dotdot addref parent mismatch")); 4988 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4989 } 4990 inodedep = inodedep_lookup_ip(ip); 4991 if (DOINGSUJ(dvp)) { 4992 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4993 inoreflst); 4994 KASSERT(jaddref->ja_parent == dp->i_number, 4995 ("softdep_revert_mkdir: addref parent mismatch")); 4996 dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 4997 inoreflst, if_deps); 4998 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4999 KASSERT(dotaddref->ja_parent == ip->i_number, 5000 ("softdep_revert_mkdir: dot addref parent mismatch")); 5001 cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait); 5002 } 5003 FREE_LOCK(ITOUMP(dp)); 5004 } 5005 5006 /* 5007 * Called to correct nlinkdelta after a failed rmdir. 5008 */ 5009 void 5010 softdep_revert_rmdir(dp, ip) 5011 struct inode *dp; 5012 struct inode *ip; 5013 { 5014 5015 KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0, 5016 ("softdep_revert_rmdir called on non-softdep filesystem")); 5017 ACQUIRE_LOCK(ITOUMP(dp)); 5018 (void) inodedep_lookup_ip(ip); 5019 (void) inodedep_lookup_ip(dp); 5020 FREE_LOCK(ITOUMP(dp)); 5021 } 5022 5023 /* 5024 * Protecting the freemaps (or bitmaps). 5025 * 5026 * To eliminate the need to execute fsck before mounting a filesystem 5027 * after a power failure, one must (conservatively) guarantee that the 5028 * on-disk copy of the bitmaps never indicate that a live inode or block is 5029 * free. So, when a block or inode is allocated, the bitmap should be 5030 * updated (on disk) before any new pointers. When a block or inode is 5031 * freed, the bitmap should not be updated until all pointers have been 5032 * reset. The latter dependency is handled by the delayed de-allocation 5033 * approach described below for block and inode de-allocation. The former 5034 * dependency is handled by calling the following procedure when a block or 5035 * inode is allocated. When an inode is allocated an "inodedep" is created 5036 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 5037 * Each "inodedep" is also inserted into the hash indexing structure so 5038 * that any additional link additions can be made dependent on the inode 5039 * allocation. 5040 * 5041 * The ufs filesystem maintains a number of free block counts (e.g., per 5042 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 5043 * in addition to the bitmaps. These counts are used to improve efficiency 5044 * during allocation and therefore must be consistent with the bitmaps. 5045 * There is no convenient way to guarantee post-crash consistency of these 5046 * counts with simple update ordering, for two main reasons: (1) The counts 5047 * and bitmaps for a single cylinder group block are not in the same disk 5048 * sector. If a disk write is interrupted (e.g., by power failure), one may 5049 * be written and the other not. (2) Some of the counts are located in the 5050 * superblock rather than the cylinder group block. So, we focus our soft 5051 * updates implementation on protecting the bitmaps. When mounting a 5052 * filesystem, we recompute the auxiliary counts from the bitmaps. 5053 */ 5054 5055 /* 5056 * Called just after updating the cylinder group block to allocate an inode. 5057 */ 5058 void 5059 softdep_setup_inomapdep(bp, ip, newinum, mode) 5060 struct buf *bp; /* buffer for cylgroup block with inode map */ 5061 struct inode *ip; /* inode related to allocation */ 5062 ino_t newinum; /* new inode number being allocated */ 5063 int mode; 5064 { 5065 struct inodedep *inodedep; 5066 struct bmsafemap *bmsafemap; 5067 struct jaddref *jaddref; 5068 struct mount *mp; 5069 struct fs *fs; 5070 5071 mp = ITOVFS(ip); 5072 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5073 ("softdep_setup_inomapdep called on non-softdep filesystem")); 5074 fs = VFSTOUFS(mp)->um_fs; 5075 jaddref = NULL; 5076 5077 /* 5078 * Allocate the journal reference add structure so that the bitmap 5079 * can be dependent on it. 5080 */ 5081 if (MOUNTEDSUJ(mp)) { 5082 jaddref = newjaddref(ip, newinum, 0, 0, mode); 5083 jaddref->ja_state |= NEWBLOCK; 5084 } 5085 5086 /* 5087 * Create a dependency for the newly allocated inode. 5088 * Panic if it already exists as something is seriously wrong. 5089 * Otherwise add it to the dependency list for the buffer holding 5090 * the cylinder group map from which it was allocated. 5091 * 5092 * We have to preallocate a bmsafemap entry in case it is needed 5093 * in bmsafemap_lookup since once we allocate the inodedep, we 5094 * have to finish initializing it before we can FREE_LOCK(). 5095 * By preallocating, we avoid FREE_LOCK() while doing a malloc 5096 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before 5097 * creating the inodedep as it can be freed during the time 5098 * that we FREE_LOCK() while allocating the inodedep. We must 5099 * call workitem_alloc() before entering the locked section as 5100 * it also acquires the lock and we must avoid trying doing so 5101 * recursively. 5102 */ 5103 bmsafemap = malloc(sizeof(struct bmsafemap), 5104 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 5105 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 5106 ACQUIRE_LOCK(ITOUMP(ip)); 5107 if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep))) 5108 panic("softdep_setup_inomapdep: dependency %p for new" 5109 "inode already exists", inodedep); 5110 bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap); 5111 if (jaddref) { 5112 LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps); 5113 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 5114 if_deps); 5115 } else { 5116 inodedep->id_state |= ONDEPLIST; 5117 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 5118 } 5119 inodedep->id_bmsafemap = bmsafemap; 5120 inodedep->id_state &= ~DEPCOMPLETE; 5121 FREE_LOCK(ITOUMP(ip)); 5122 } 5123 5124 /* 5125 * Called just after updating the cylinder group block to 5126 * allocate block or fragment. 5127 */ 5128 void 5129 softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 5130 struct buf *bp; /* buffer for cylgroup block with block map */ 5131 struct mount *mp; /* filesystem doing allocation */ 5132 ufs2_daddr_t newblkno; /* number of newly allocated block */ 5133 int frags; /* Number of fragments. */ 5134 int oldfrags; /* Previous number of fragments for extend. */ 5135 { 5136 struct newblk *newblk; 5137 struct bmsafemap *bmsafemap; 5138 struct jnewblk *jnewblk; 5139 struct ufsmount *ump; 5140 struct fs *fs; 5141 5142 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5143 ("softdep_setup_blkmapdep called on non-softdep filesystem")); 5144 ump = VFSTOUFS(mp); 5145 fs = ump->um_fs; 5146 jnewblk = NULL; 5147 /* 5148 * Create a dependency for the newly allocated block. 5149 * Add it to the dependency list for the buffer holding 5150 * the cylinder group map from which it was allocated. 5151 */ 5152 if (MOUNTEDSUJ(mp)) { 5153 jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS); 5154 workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp); 5155 jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list); 5156 jnewblk->jn_state = ATTACHED; 5157 jnewblk->jn_blkno = newblkno; 5158 jnewblk->jn_frags = frags; 5159 jnewblk->jn_oldfrags = oldfrags; 5160 #ifdef INVARIANTS 5161 { 5162 struct cg *cgp; 5163 uint8_t *blksfree; 5164 long bno; 5165 int i; 5166 5167 cgp = (struct cg *)bp->b_data; 5168 blksfree = cg_blksfree(cgp); 5169 bno = dtogd(fs, jnewblk->jn_blkno); 5170 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; 5171 i++) { 5172 if (isset(blksfree, bno + i)) 5173 panic("softdep_setup_blkmapdep: " 5174 "free fragment %d from %d-%d " 5175 "state 0x%X dep %p", i, 5176 jnewblk->jn_oldfrags, 5177 jnewblk->jn_frags, 5178 jnewblk->jn_state, 5179 jnewblk->jn_dep); 5180 } 5181 } 5182 #endif 5183 } 5184 5185 CTR3(KTR_SUJ, 5186 "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d", 5187 newblkno, frags, oldfrags); 5188 ACQUIRE_LOCK(ump); 5189 if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0) 5190 panic("softdep_setup_blkmapdep: found block"); 5191 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp, 5192 dtog(fs, newblkno), NULL); 5193 if (jnewblk) { 5194 jnewblk->jn_dep = (struct worklist *)newblk; 5195 LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps); 5196 } else { 5197 newblk->nb_state |= ONDEPLIST; 5198 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 5199 } 5200 newblk->nb_bmsafemap = bmsafemap; 5201 newblk->nb_jnewblk = jnewblk; 5202 FREE_LOCK(ump); 5203 } 5204 5205 #define BMSAFEMAP_HASH(ump, cg) \ 5206 (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size]) 5207 5208 static int 5209 bmsafemap_find(bmsafemaphd, cg, bmsafemapp) 5210 struct bmsafemap_hashhead *bmsafemaphd; 5211 int cg; 5212 struct bmsafemap **bmsafemapp; 5213 { 5214 struct bmsafemap *bmsafemap; 5215 5216 LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash) 5217 if (bmsafemap->sm_cg == cg) 5218 break; 5219 if (bmsafemap) { 5220 *bmsafemapp = bmsafemap; 5221 return (1); 5222 } 5223 *bmsafemapp = NULL; 5224 5225 return (0); 5226 } 5227 5228 /* 5229 * Find the bmsafemap associated with a cylinder group buffer. 5230 * If none exists, create one. The buffer must be locked when 5231 * this routine is called and this routine must be called with 5232 * the softdep lock held. To avoid giving up the lock while 5233 * allocating a new bmsafemap, a preallocated bmsafemap may be 5234 * provided. If it is provided but not needed, it is freed. 5235 */ 5236 static struct bmsafemap * 5237 bmsafemap_lookup(mp, bp, cg, newbmsafemap) 5238 struct mount *mp; 5239 struct buf *bp; 5240 int cg; 5241 struct bmsafemap *newbmsafemap; 5242 { 5243 struct bmsafemap_hashhead *bmsafemaphd; 5244 struct bmsafemap *bmsafemap, *collision; 5245 struct worklist *wk; 5246 struct ufsmount *ump; 5247 5248 ump = VFSTOUFS(mp); 5249 LOCK_OWNED(ump); 5250 KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer")); 5251 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5252 if (wk->wk_type == D_BMSAFEMAP) { 5253 if (newbmsafemap) 5254 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5255 return (WK_BMSAFEMAP(wk)); 5256 } 5257 } 5258 bmsafemaphd = BMSAFEMAP_HASH(ump, cg); 5259 if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) { 5260 if (newbmsafemap) 5261 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5262 return (bmsafemap); 5263 } 5264 if (newbmsafemap) { 5265 bmsafemap = newbmsafemap; 5266 } else { 5267 FREE_LOCK(ump); 5268 bmsafemap = malloc(sizeof(struct bmsafemap), 5269 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 5270 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 5271 ACQUIRE_LOCK(ump); 5272 } 5273 bmsafemap->sm_buf = bp; 5274 LIST_INIT(&bmsafemap->sm_inodedephd); 5275 LIST_INIT(&bmsafemap->sm_inodedepwr); 5276 LIST_INIT(&bmsafemap->sm_newblkhd); 5277 LIST_INIT(&bmsafemap->sm_newblkwr); 5278 LIST_INIT(&bmsafemap->sm_jaddrefhd); 5279 LIST_INIT(&bmsafemap->sm_jnewblkhd); 5280 LIST_INIT(&bmsafemap->sm_freehd); 5281 LIST_INIT(&bmsafemap->sm_freewr); 5282 if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) { 5283 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 5284 return (collision); 5285 } 5286 bmsafemap->sm_cg = cg; 5287 LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash); 5288 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 5289 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 5290 return (bmsafemap); 5291 } 5292 5293 /* 5294 * Direct block allocation dependencies. 5295 * 5296 * When a new block is allocated, the corresponding disk locations must be 5297 * initialized (with zeros or new data) before the on-disk inode points to 5298 * them. Also, the freemap from which the block was allocated must be 5299 * updated (on disk) before the inode's pointer. These two dependencies are 5300 * independent of each other and are needed for all file blocks and indirect 5301 * blocks that are pointed to directly by the inode. Just before the 5302 * "in-core" version of the inode is updated with a newly allocated block 5303 * number, a procedure (below) is called to setup allocation dependency 5304 * structures. These structures are removed when the corresponding 5305 * dependencies are satisfied or when the block allocation becomes obsolete 5306 * (i.e., the file is deleted, the block is de-allocated, or the block is a 5307 * fragment that gets upgraded). All of these cases are handled in 5308 * procedures described later. 5309 * 5310 * When a file extension causes a fragment to be upgraded, either to a larger 5311 * fragment or to a full block, the on-disk location may change (if the 5312 * previous fragment could not simply be extended). In this case, the old 5313 * fragment must be de-allocated, but not until after the inode's pointer has 5314 * been updated. In most cases, this is handled by later procedures, which 5315 * will construct a "freefrag" structure to be added to the workitem queue 5316 * when the inode update is complete (or obsolete). The main exception to 5317 * this is when an allocation occurs while a pending allocation dependency 5318 * (for the same block pointer) remains. This case is handled in the main 5319 * allocation dependency setup procedure by immediately freeing the 5320 * unreferenced fragments. 5321 */ 5322 void 5323 softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5324 struct inode *ip; /* inode to which block is being added */ 5325 ufs_lbn_t off; /* block pointer within inode */ 5326 ufs2_daddr_t newblkno; /* disk block number being added */ 5327 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 5328 long newsize; /* size of new block */ 5329 long oldsize; /* size of new block */ 5330 struct buf *bp; /* bp for allocated block */ 5331 { 5332 struct allocdirect *adp, *oldadp; 5333 struct allocdirectlst *adphead; 5334 struct freefrag *freefrag; 5335 struct inodedep *inodedep; 5336 struct pagedep *pagedep; 5337 struct jnewblk *jnewblk; 5338 struct newblk *newblk; 5339 struct mount *mp; 5340 ufs_lbn_t lbn; 5341 5342 lbn = bp->b_lblkno; 5343 mp = ITOVFS(ip); 5344 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5345 ("softdep_setup_allocdirect called on non-softdep filesystem")); 5346 if (oldblkno && oldblkno != newblkno) 5347 /* 5348 * The usual case is that a smaller fragment that 5349 * was just allocated has been replaced with a bigger 5350 * fragment or a full-size block. If it is marked as 5351 * B_DELWRI, the current contents have not been written 5352 * to disk. It is possible that the block was written 5353 * earlier, but very uncommon. If the block has never 5354 * been written, there is no need to send a BIO_DELETE 5355 * for it when it is freed. The gain from avoiding the 5356 * TRIMs for the common case of unwritten blocks far 5357 * exceeds the cost of the write amplification for the 5358 * uncommon case of failing to send a TRIM for a block 5359 * that had been written. 5360 */ 5361 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn, 5362 (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY); 5363 else 5364 freefrag = NULL; 5365 5366 CTR6(KTR_SUJ, 5367 "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd " 5368 "off %jd newsize %ld oldsize %d", 5369 ip->i_number, newblkno, oldblkno, off, newsize, oldsize); 5370 ACQUIRE_LOCK(ITOUMP(ip)); 5371 if (off >= UFS_NDADDR) { 5372 if (lbn > 0) 5373 panic("softdep_setup_allocdirect: bad lbn %jd, off %jd", 5374 lbn, off); 5375 /* allocating an indirect block */ 5376 if (oldblkno != 0) 5377 panic("softdep_setup_allocdirect: non-zero indir"); 5378 } else { 5379 if (off != lbn) 5380 panic("softdep_setup_allocdirect: lbn %jd != off %jd", 5381 lbn, off); 5382 /* 5383 * Allocating a direct block. 5384 * 5385 * If we are allocating a directory block, then we must 5386 * allocate an associated pagedep to track additions and 5387 * deletions. 5388 */ 5389 if ((ip->i_mode & IFMT) == IFDIR) 5390 pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC, 5391 &pagedep); 5392 } 5393 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5394 panic("softdep_setup_allocdirect: lost block"); 5395 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5396 ("softdep_setup_allocdirect: newblk already initialized")); 5397 /* 5398 * Convert the newblk to an allocdirect. 5399 */ 5400 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5401 adp = (struct allocdirect *)newblk; 5402 newblk->nb_freefrag = freefrag; 5403 adp->ad_offset = off; 5404 adp->ad_oldblkno = oldblkno; 5405 adp->ad_newsize = newsize; 5406 adp->ad_oldsize = oldsize; 5407 5408 /* 5409 * Finish initializing the journal. 5410 */ 5411 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5412 jnewblk->jn_ino = ip->i_number; 5413 jnewblk->jn_lbn = lbn; 5414 add_to_journal(&jnewblk->jn_list); 5415 } 5416 if (freefrag && freefrag->ff_jdep != NULL && 5417 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5418 add_to_journal(freefrag->ff_jdep); 5419 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 5420 adp->ad_inodedep = inodedep; 5421 5422 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5423 /* 5424 * The list of allocdirects must be kept in sorted and ascending 5425 * order so that the rollback routines can quickly determine the 5426 * first uncommitted block (the size of the file stored on disk 5427 * ends at the end of the lowest committed fragment, or if there 5428 * are no fragments, at the end of the highest committed block). 5429 * Since files generally grow, the typical case is that the new 5430 * block is to be added at the end of the list. We speed this 5431 * special case by checking against the last allocdirect in the 5432 * list before laboriously traversing the list looking for the 5433 * insertion point. 5434 */ 5435 adphead = &inodedep->id_newinoupdt; 5436 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5437 if (oldadp == NULL || oldadp->ad_offset <= off) { 5438 /* insert at end of list */ 5439 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5440 if (oldadp != NULL && oldadp->ad_offset == off) 5441 allocdirect_merge(adphead, adp, oldadp); 5442 FREE_LOCK(ITOUMP(ip)); 5443 return; 5444 } 5445 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5446 if (oldadp->ad_offset >= off) 5447 break; 5448 } 5449 if (oldadp == NULL) 5450 panic("softdep_setup_allocdirect: lost entry"); 5451 /* insert in middle of list */ 5452 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5453 if (oldadp->ad_offset == off) 5454 allocdirect_merge(adphead, adp, oldadp); 5455 5456 FREE_LOCK(ITOUMP(ip)); 5457 } 5458 5459 /* 5460 * Merge a newer and older journal record to be stored either in a 5461 * newblock or freefrag. This handles aggregating journal records for 5462 * fragment allocation into a second record as well as replacing a 5463 * journal free with an aborted journal allocation. A segment for the 5464 * oldest record will be placed on wkhd if it has been written. If not 5465 * the segment for the newer record will suffice. 5466 */ 5467 static struct worklist * 5468 jnewblk_merge(new, old, wkhd) 5469 struct worklist *new; 5470 struct worklist *old; 5471 struct workhead *wkhd; 5472 { 5473 struct jnewblk *njnewblk; 5474 struct jnewblk *jnewblk; 5475 5476 /* Handle NULLs to simplify callers. */ 5477 if (new == NULL) 5478 return (old); 5479 if (old == NULL) 5480 return (new); 5481 /* Replace a jfreefrag with a jnewblk. */ 5482 if (new->wk_type == D_JFREEFRAG) { 5483 if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno) 5484 panic("jnewblk_merge: blkno mismatch: %p, %p", 5485 old, new); 5486 cancel_jfreefrag(WK_JFREEFRAG(new)); 5487 return (old); 5488 } 5489 if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK) 5490 panic("jnewblk_merge: Bad type: old %d new %d\n", 5491 old->wk_type, new->wk_type); 5492 /* 5493 * Handle merging of two jnewblk records that describe 5494 * different sets of fragments in the same block. 5495 */ 5496 jnewblk = WK_JNEWBLK(old); 5497 njnewblk = WK_JNEWBLK(new); 5498 if (jnewblk->jn_blkno != njnewblk->jn_blkno) 5499 panic("jnewblk_merge: Merging disparate blocks."); 5500 /* 5501 * The record may be rolled back in the cg. 5502 */ 5503 if (jnewblk->jn_state & UNDONE) { 5504 jnewblk->jn_state &= ~UNDONE; 5505 njnewblk->jn_state |= UNDONE; 5506 njnewblk->jn_state &= ~ATTACHED; 5507 } 5508 /* 5509 * We modify the newer addref and free the older so that if neither 5510 * has been written the most up-to-date copy will be on disk. If 5511 * both have been written but rolled back we only temporarily need 5512 * one of them to fix the bits when the cg write completes. 5513 */ 5514 jnewblk->jn_state |= ATTACHED | COMPLETE; 5515 njnewblk->jn_oldfrags = jnewblk->jn_oldfrags; 5516 cancel_jnewblk(jnewblk, wkhd); 5517 WORKLIST_REMOVE(&jnewblk->jn_list); 5518 free_jnewblk(jnewblk); 5519 return (new); 5520 } 5521 5522 /* 5523 * Replace an old allocdirect dependency with a newer one. 5524 */ 5525 static void 5526 allocdirect_merge(adphead, newadp, oldadp) 5527 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 5528 struct allocdirect *newadp; /* allocdirect being added */ 5529 struct allocdirect *oldadp; /* existing allocdirect being checked */ 5530 { 5531 struct worklist *wk; 5532 struct freefrag *freefrag; 5533 5534 freefrag = NULL; 5535 LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp)); 5536 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 5537 newadp->ad_oldsize != oldadp->ad_newsize || 5538 newadp->ad_offset >= UFS_NDADDR) 5539 panic("%s %jd != new %jd || old size %ld != new %ld", 5540 "allocdirect_merge: old blkno", 5541 (intmax_t)newadp->ad_oldblkno, 5542 (intmax_t)oldadp->ad_newblkno, 5543 newadp->ad_oldsize, oldadp->ad_newsize); 5544 newadp->ad_oldblkno = oldadp->ad_oldblkno; 5545 newadp->ad_oldsize = oldadp->ad_oldsize; 5546 /* 5547 * If the old dependency had a fragment to free or had never 5548 * previously had a block allocated, then the new dependency 5549 * can immediately post its freefrag and adopt the old freefrag. 5550 * This action is done by swapping the freefrag dependencies. 5551 * The new dependency gains the old one's freefrag, and the 5552 * old one gets the new one and then immediately puts it on 5553 * the worklist when it is freed by free_newblk. It is 5554 * not possible to do this swap when the old dependency had a 5555 * non-zero size but no previous fragment to free. This condition 5556 * arises when the new block is an extension of the old block. 5557 * Here, the first part of the fragment allocated to the new 5558 * dependency is part of the block currently claimed on disk by 5559 * the old dependency, so cannot legitimately be freed until the 5560 * conditions for the new dependency are fulfilled. 5561 */ 5562 freefrag = newadp->ad_freefrag; 5563 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 5564 newadp->ad_freefrag = oldadp->ad_freefrag; 5565 oldadp->ad_freefrag = freefrag; 5566 } 5567 /* 5568 * If we are tracking a new directory-block allocation, 5569 * move it from the old allocdirect to the new allocdirect. 5570 */ 5571 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 5572 WORKLIST_REMOVE(wk); 5573 if (!LIST_EMPTY(&oldadp->ad_newdirblk)) 5574 panic("allocdirect_merge: extra newdirblk"); 5575 WORKLIST_INSERT(&newadp->ad_newdirblk, wk); 5576 } 5577 TAILQ_REMOVE(adphead, oldadp, ad_next); 5578 /* 5579 * We need to move any journal dependencies over to the freefrag 5580 * that releases this block if it exists. Otherwise we are 5581 * extending an existing block and we'll wait until that is 5582 * complete to release the journal space and extend the 5583 * new journal to cover this old space as well. 5584 */ 5585 if (freefrag == NULL) { 5586 if (oldadp->ad_newblkno != newadp->ad_newblkno) 5587 panic("allocdirect_merge: %jd != %jd", 5588 oldadp->ad_newblkno, newadp->ad_newblkno); 5589 newadp->ad_block.nb_jnewblk = (struct jnewblk *) 5590 jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list, 5591 &oldadp->ad_block.nb_jnewblk->jn_list, 5592 &newadp->ad_block.nb_jwork); 5593 oldadp->ad_block.nb_jnewblk = NULL; 5594 cancel_newblk(&oldadp->ad_block, NULL, 5595 &newadp->ad_block.nb_jwork); 5596 } else { 5597 wk = (struct worklist *) cancel_newblk(&oldadp->ad_block, 5598 &freefrag->ff_list, &freefrag->ff_jwork); 5599 freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk, 5600 &freefrag->ff_jwork); 5601 } 5602 free_newblk(&oldadp->ad_block); 5603 } 5604 5605 /* 5606 * Allocate a jfreefrag structure to journal a single block free. 5607 */ 5608 static struct jfreefrag * 5609 newjfreefrag(freefrag, ip, blkno, size, lbn) 5610 struct freefrag *freefrag; 5611 struct inode *ip; 5612 ufs2_daddr_t blkno; 5613 long size; 5614 ufs_lbn_t lbn; 5615 { 5616 struct jfreefrag *jfreefrag; 5617 struct fs *fs; 5618 5619 fs = ITOFS(ip); 5620 jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG, 5621 M_SOFTDEP_FLAGS); 5622 workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, ITOVFS(ip)); 5623 jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list); 5624 jfreefrag->fr_state = ATTACHED | DEPCOMPLETE; 5625 jfreefrag->fr_ino = ip->i_number; 5626 jfreefrag->fr_lbn = lbn; 5627 jfreefrag->fr_blkno = blkno; 5628 jfreefrag->fr_frags = numfrags(fs, size); 5629 jfreefrag->fr_freefrag = freefrag; 5630 5631 return (jfreefrag); 5632 } 5633 5634 /* 5635 * Allocate a new freefrag structure. 5636 */ 5637 static struct freefrag * 5638 newfreefrag(ip, blkno, size, lbn, key) 5639 struct inode *ip; 5640 ufs2_daddr_t blkno; 5641 long size; 5642 ufs_lbn_t lbn; 5643 u_long key; 5644 { 5645 struct freefrag *freefrag; 5646 struct ufsmount *ump; 5647 struct fs *fs; 5648 5649 CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd", 5650 ip->i_number, blkno, size, lbn); 5651 ump = ITOUMP(ip); 5652 fs = ump->um_fs; 5653 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 5654 panic("newfreefrag: frag size"); 5655 freefrag = malloc(sizeof(struct freefrag), 5656 M_FREEFRAG, M_SOFTDEP_FLAGS); 5657 workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ump)); 5658 freefrag->ff_state = ATTACHED; 5659 LIST_INIT(&freefrag->ff_jwork); 5660 freefrag->ff_inum = ip->i_number; 5661 freefrag->ff_vtype = ITOV(ip)->v_type; 5662 freefrag->ff_blkno = blkno; 5663 freefrag->ff_fragsize = size; 5664 freefrag->ff_key = key; 5665 5666 if (MOUNTEDSUJ(UFSTOVFS(ump))) { 5667 freefrag->ff_jdep = (struct worklist *) 5668 newjfreefrag(freefrag, ip, blkno, size, lbn); 5669 } else { 5670 freefrag->ff_state |= DEPCOMPLETE; 5671 freefrag->ff_jdep = NULL; 5672 } 5673 5674 return (freefrag); 5675 } 5676 5677 /* 5678 * This workitem de-allocates fragments that were replaced during 5679 * file block allocation. 5680 */ 5681 static void 5682 handle_workitem_freefrag(freefrag) 5683 struct freefrag *freefrag; 5684 { 5685 struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp); 5686 struct workhead wkhd; 5687 5688 CTR3(KTR_SUJ, 5689 "handle_workitem_freefrag: ino %d blkno %jd size %ld", 5690 freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize); 5691 /* 5692 * It would be illegal to add new completion items to the 5693 * freefrag after it was schedule to be done so it must be 5694 * safe to modify the list head here. 5695 */ 5696 LIST_INIT(&wkhd); 5697 ACQUIRE_LOCK(ump); 5698 LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list); 5699 /* 5700 * If the journal has not been written we must cancel it here. 5701 */ 5702 if (freefrag->ff_jdep) { 5703 if (freefrag->ff_jdep->wk_type != D_JNEWBLK) 5704 panic("handle_workitem_freefrag: Unexpected type %d\n", 5705 freefrag->ff_jdep->wk_type); 5706 cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd); 5707 } 5708 FREE_LOCK(ump); 5709 ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 5710 freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, 5711 &wkhd, freefrag->ff_key); 5712 ACQUIRE_LOCK(ump); 5713 WORKITEM_FREE(freefrag, D_FREEFRAG); 5714 FREE_LOCK(ump); 5715 } 5716 5717 /* 5718 * Set up a dependency structure for an external attributes data block. 5719 * This routine follows much of the structure of softdep_setup_allocdirect. 5720 * See the description of softdep_setup_allocdirect above for details. 5721 */ 5722 void 5723 softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5724 struct inode *ip; 5725 ufs_lbn_t off; 5726 ufs2_daddr_t newblkno; 5727 ufs2_daddr_t oldblkno; 5728 long newsize; 5729 long oldsize; 5730 struct buf *bp; 5731 { 5732 struct allocdirect *adp, *oldadp; 5733 struct allocdirectlst *adphead; 5734 struct freefrag *freefrag; 5735 struct inodedep *inodedep; 5736 struct jnewblk *jnewblk; 5737 struct newblk *newblk; 5738 struct mount *mp; 5739 struct ufsmount *ump; 5740 ufs_lbn_t lbn; 5741 5742 mp = ITOVFS(ip); 5743 ump = VFSTOUFS(mp); 5744 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5745 ("softdep_setup_allocext called on non-softdep filesystem")); 5746 KASSERT(off < UFS_NXADDR, 5747 ("softdep_setup_allocext: lbn %lld > UFS_NXADDR", (long long)off)); 5748 5749 lbn = bp->b_lblkno; 5750 if (oldblkno && oldblkno != newblkno) 5751 /* 5752 * The usual case is that a smaller fragment that 5753 * was just allocated has been replaced with a bigger 5754 * fragment or a full-size block. If it is marked as 5755 * B_DELWRI, the current contents have not been written 5756 * to disk. It is possible that the block was written 5757 * earlier, but very uncommon. If the block has never 5758 * been written, there is no need to send a BIO_DELETE 5759 * for it when it is freed. The gain from avoiding the 5760 * TRIMs for the common case of unwritten blocks far 5761 * exceeds the cost of the write amplification for the 5762 * uncommon case of failing to send a TRIM for a block 5763 * that had been written. 5764 */ 5765 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn, 5766 (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY); 5767 else 5768 freefrag = NULL; 5769 5770 ACQUIRE_LOCK(ump); 5771 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5772 panic("softdep_setup_allocext: lost block"); 5773 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5774 ("softdep_setup_allocext: newblk already initialized")); 5775 /* 5776 * Convert the newblk to an allocdirect. 5777 */ 5778 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5779 adp = (struct allocdirect *)newblk; 5780 newblk->nb_freefrag = freefrag; 5781 adp->ad_offset = off; 5782 adp->ad_oldblkno = oldblkno; 5783 adp->ad_newsize = newsize; 5784 adp->ad_oldsize = oldsize; 5785 adp->ad_state |= EXTDATA; 5786 5787 /* 5788 * Finish initializing the journal. 5789 */ 5790 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5791 jnewblk->jn_ino = ip->i_number; 5792 jnewblk->jn_lbn = lbn; 5793 add_to_journal(&jnewblk->jn_list); 5794 } 5795 if (freefrag && freefrag->ff_jdep != NULL && 5796 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5797 add_to_journal(freefrag->ff_jdep); 5798 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 5799 adp->ad_inodedep = inodedep; 5800 5801 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5802 /* 5803 * The list of allocdirects must be kept in sorted and ascending 5804 * order so that the rollback routines can quickly determine the 5805 * first uncommitted block (the size of the file stored on disk 5806 * ends at the end of the lowest committed fragment, or if there 5807 * are no fragments, at the end of the highest committed block). 5808 * Since files generally grow, the typical case is that the new 5809 * block is to be added at the end of the list. We speed this 5810 * special case by checking against the last allocdirect in the 5811 * list before laboriously traversing the list looking for the 5812 * insertion point. 5813 */ 5814 adphead = &inodedep->id_newextupdt; 5815 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5816 if (oldadp == NULL || oldadp->ad_offset <= off) { 5817 /* insert at end of list */ 5818 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5819 if (oldadp != NULL && oldadp->ad_offset == off) 5820 allocdirect_merge(adphead, adp, oldadp); 5821 FREE_LOCK(ump); 5822 return; 5823 } 5824 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5825 if (oldadp->ad_offset >= off) 5826 break; 5827 } 5828 if (oldadp == NULL) 5829 panic("softdep_setup_allocext: lost entry"); 5830 /* insert in middle of list */ 5831 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5832 if (oldadp->ad_offset == off) 5833 allocdirect_merge(adphead, adp, oldadp); 5834 FREE_LOCK(ump); 5835 } 5836 5837 /* 5838 * Indirect block allocation dependencies. 5839 * 5840 * The same dependencies that exist for a direct block also exist when 5841 * a new block is allocated and pointed to by an entry in a block of 5842 * indirect pointers. The undo/redo states described above are also 5843 * used here. Because an indirect block contains many pointers that 5844 * may have dependencies, a second copy of the entire in-memory indirect 5845 * block is kept. The buffer cache copy is always completely up-to-date. 5846 * The second copy, which is used only as a source for disk writes, 5847 * contains only the safe pointers (i.e., those that have no remaining 5848 * update dependencies). The second copy is freed when all pointers 5849 * are safe. The cache is not allowed to replace indirect blocks with 5850 * pending update dependencies. If a buffer containing an indirect 5851 * block with dependencies is written, these routines will mark it 5852 * dirty again. It can only be successfully written once all the 5853 * dependencies are removed. The ffs_fsync routine in conjunction with 5854 * softdep_sync_metadata work together to get all the dependencies 5855 * removed so that a file can be successfully written to disk. Three 5856 * procedures are used when setting up indirect block pointer 5857 * dependencies. The division is necessary because of the organization 5858 * of the "balloc" routine and because of the distinction between file 5859 * pages and file metadata blocks. 5860 */ 5861 5862 /* 5863 * Allocate a new allocindir structure. 5864 */ 5865 static struct allocindir * 5866 newallocindir(ip, ptrno, newblkno, oldblkno, lbn) 5867 struct inode *ip; /* inode for file being extended */ 5868 int ptrno; /* offset of pointer in indirect block */ 5869 ufs2_daddr_t newblkno; /* disk block number being added */ 5870 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5871 ufs_lbn_t lbn; 5872 { 5873 struct newblk *newblk; 5874 struct allocindir *aip; 5875 struct freefrag *freefrag; 5876 struct jnewblk *jnewblk; 5877 5878 if (oldblkno) 5879 freefrag = newfreefrag(ip, oldblkno, ITOFS(ip)->fs_bsize, lbn, 5880 SINGLETON_KEY); 5881 else 5882 freefrag = NULL; 5883 ACQUIRE_LOCK(ITOUMP(ip)); 5884 if (newblk_lookup(ITOVFS(ip), newblkno, 0, &newblk) == 0) 5885 panic("new_allocindir: lost block"); 5886 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5887 ("newallocindir: newblk already initialized")); 5888 WORKITEM_REASSIGN(newblk, D_ALLOCINDIR); 5889 newblk->nb_freefrag = freefrag; 5890 aip = (struct allocindir *)newblk; 5891 aip->ai_offset = ptrno; 5892 aip->ai_oldblkno = oldblkno; 5893 aip->ai_lbn = lbn; 5894 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5895 jnewblk->jn_ino = ip->i_number; 5896 jnewblk->jn_lbn = lbn; 5897 add_to_journal(&jnewblk->jn_list); 5898 } 5899 if (freefrag && freefrag->ff_jdep != NULL && 5900 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5901 add_to_journal(freefrag->ff_jdep); 5902 return (aip); 5903 } 5904 5905 /* 5906 * Called just before setting an indirect block pointer 5907 * to a newly allocated file page. 5908 */ 5909 void 5910 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 5911 struct inode *ip; /* inode for file being extended */ 5912 ufs_lbn_t lbn; /* allocated block number within file */ 5913 struct buf *bp; /* buffer with indirect blk referencing page */ 5914 int ptrno; /* offset of pointer in indirect block */ 5915 ufs2_daddr_t newblkno; /* disk block number being added */ 5916 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5917 struct buf *nbp; /* buffer holding allocated page */ 5918 { 5919 struct inodedep *inodedep; 5920 struct freefrag *freefrag; 5921 struct allocindir *aip; 5922 struct pagedep *pagedep; 5923 struct mount *mp; 5924 struct ufsmount *ump; 5925 5926 mp = ITOVFS(ip); 5927 ump = VFSTOUFS(mp); 5928 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5929 ("softdep_setup_allocindir_page called on non-softdep filesystem")); 5930 KASSERT(lbn == nbp->b_lblkno, 5931 ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd", 5932 lbn, bp->b_lblkno)); 5933 CTR4(KTR_SUJ, 5934 "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd " 5935 "lbn %jd", ip->i_number, newblkno, oldblkno, lbn); 5936 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page"); 5937 aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn); 5938 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 5939 /* 5940 * If we are allocating a directory page, then we must 5941 * allocate an associated pagedep to track additions and 5942 * deletions. 5943 */ 5944 if ((ip->i_mode & IFMT) == IFDIR) 5945 pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep); 5946 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5947 freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn); 5948 FREE_LOCK(ump); 5949 if (freefrag) 5950 handle_workitem_freefrag(freefrag); 5951 } 5952 5953 /* 5954 * Called just before setting an indirect block pointer to a 5955 * newly allocated indirect block. 5956 */ 5957 void 5958 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 5959 struct buf *nbp; /* newly allocated indirect block */ 5960 struct inode *ip; /* inode for file being extended */ 5961 struct buf *bp; /* indirect block referencing allocated block */ 5962 int ptrno; /* offset of pointer in indirect block */ 5963 ufs2_daddr_t newblkno; /* disk block number being added */ 5964 { 5965 struct inodedep *inodedep; 5966 struct allocindir *aip; 5967 struct ufsmount *ump; 5968 ufs_lbn_t lbn; 5969 5970 ump = ITOUMP(ip); 5971 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 5972 ("softdep_setup_allocindir_meta called on non-softdep filesystem")); 5973 CTR3(KTR_SUJ, 5974 "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d", 5975 ip->i_number, newblkno, ptrno); 5976 lbn = nbp->b_lblkno; 5977 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta"); 5978 aip = newallocindir(ip, ptrno, newblkno, 0, lbn); 5979 inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep); 5980 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5981 if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)) 5982 panic("softdep_setup_allocindir_meta: Block already existed"); 5983 FREE_LOCK(ump); 5984 } 5985 5986 static void 5987 indirdep_complete(indirdep) 5988 struct indirdep *indirdep; 5989 { 5990 struct allocindir *aip; 5991 5992 LIST_REMOVE(indirdep, ir_next); 5993 indirdep->ir_state |= DEPCOMPLETE; 5994 5995 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) { 5996 LIST_REMOVE(aip, ai_next); 5997 free_newblk(&aip->ai_block); 5998 } 5999 /* 6000 * If this indirdep is not attached to a buf it was simply waiting 6001 * on completion to clear completehd. free_indirdep() asserts 6002 * that nothing is dangling. 6003 */ 6004 if ((indirdep->ir_state & ONWORKLIST) == 0) 6005 free_indirdep(indirdep); 6006 } 6007 6008 static struct indirdep * 6009 indirdep_lookup(mp, ip, bp) 6010 struct mount *mp; 6011 struct inode *ip; 6012 struct buf *bp; 6013 { 6014 struct indirdep *indirdep, *newindirdep; 6015 struct newblk *newblk; 6016 struct ufsmount *ump; 6017 struct worklist *wk; 6018 struct fs *fs; 6019 ufs2_daddr_t blkno; 6020 6021 ump = VFSTOUFS(mp); 6022 LOCK_OWNED(ump); 6023 indirdep = NULL; 6024 newindirdep = NULL; 6025 fs = ump->um_fs; 6026 for (;;) { 6027 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 6028 if (wk->wk_type != D_INDIRDEP) 6029 continue; 6030 indirdep = WK_INDIRDEP(wk); 6031 break; 6032 } 6033 /* Found on the buffer worklist, no new structure to free. */ 6034 if (indirdep != NULL && newindirdep == NULL) 6035 return (indirdep); 6036 if (indirdep != NULL && newindirdep != NULL) 6037 panic("indirdep_lookup: simultaneous create"); 6038 /* None found on the buffer and a new structure is ready. */ 6039 if (indirdep == NULL && newindirdep != NULL) 6040 break; 6041 /* None found and no new structure available. */ 6042 FREE_LOCK(ump); 6043 newindirdep = malloc(sizeof(struct indirdep), 6044 M_INDIRDEP, M_SOFTDEP_FLAGS); 6045 workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp); 6046 newindirdep->ir_state = ATTACHED; 6047 if (I_IS_UFS1(ip)) 6048 newindirdep->ir_state |= UFS1FMT; 6049 TAILQ_INIT(&newindirdep->ir_trunc); 6050 newindirdep->ir_saveddata = NULL; 6051 LIST_INIT(&newindirdep->ir_deplisthd); 6052 LIST_INIT(&newindirdep->ir_donehd); 6053 LIST_INIT(&newindirdep->ir_writehd); 6054 LIST_INIT(&newindirdep->ir_completehd); 6055 if (bp->b_blkno == bp->b_lblkno) { 6056 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 6057 NULL, NULL); 6058 bp->b_blkno = blkno; 6059 } 6060 newindirdep->ir_freeblks = NULL; 6061 newindirdep->ir_savebp = 6062 getblk(ump->um_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0); 6063 newindirdep->ir_bp = bp; 6064 BUF_KERNPROC(newindirdep->ir_savebp); 6065 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 6066 ACQUIRE_LOCK(ump); 6067 } 6068 indirdep = newindirdep; 6069 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 6070 /* 6071 * If the block is not yet allocated we don't set DEPCOMPLETE so 6072 * that we don't free dependencies until the pointers are valid. 6073 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather 6074 * than using the hash. 6075 */ 6076 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)) 6077 LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next); 6078 else 6079 indirdep->ir_state |= DEPCOMPLETE; 6080 return (indirdep); 6081 } 6082 6083 /* 6084 * Called to finish the allocation of the "aip" allocated 6085 * by one of the two routines above. 6086 */ 6087 static struct freefrag * 6088 setup_allocindir_phase2(bp, ip, inodedep, aip, lbn) 6089 struct buf *bp; /* in-memory copy of the indirect block */ 6090 struct inode *ip; /* inode for file being extended */ 6091 struct inodedep *inodedep; /* Inodedep for ip */ 6092 struct allocindir *aip; /* allocindir allocated by the above routines */ 6093 ufs_lbn_t lbn; /* Logical block number for this block. */ 6094 { 6095 struct fs *fs; 6096 struct indirdep *indirdep; 6097 struct allocindir *oldaip; 6098 struct freefrag *freefrag; 6099 struct mount *mp; 6100 struct ufsmount *ump; 6101 6102 mp = ITOVFS(ip); 6103 ump = VFSTOUFS(mp); 6104 LOCK_OWNED(ump); 6105 fs = ump->um_fs; 6106 if (bp->b_lblkno >= 0) 6107 panic("setup_allocindir_phase2: not indir blk"); 6108 KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs), 6109 ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset)); 6110 indirdep = indirdep_lookup(mp, ip, bp); 6111 KASSERT(indirdep->ir_savebp != NULL, 6112 ("setup_allocindir_phase2 NULL ir_savebp")); 6113 aip->ai_indirdep = indirdep; 6114 /* 6115 * Check for an unwritten dependency for this indirect offset. If 6116 * there is, merge the old dependency into the new one. This happens 6117 * as a result of reallocblk only. 6118 */ 6119 freefrag = NULL; 6120 if (aip->ai_oldblkno != 0) { 6121 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) { 6122 if (oldaip->ai_offset == aip->ai_offset) { 6123 freefrag = allocindir_merge(aip, oldaip); 6124 goto done; 6125 } 6126 } 6127 LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) { 6128 if (oldaip->ai_offset == aip->ai_offset) { 6129 freefrag = allocindir_merge(aip, oldaip); 6130 goto done; 6131 } 6132 } 6133 } 6134 done: 6135 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 6136 return (freefrag); 6137 } 6138 6139 /* 6140 * Merge two allocindirs which refer to the same block. Move newblock 6141 * dependencies and setup the freefrags appropriately. 6142 */ 6143 static struct freefrag * 6144 allocindir_merge(aip, oldaip) 6145 struct allocindir *aip; 6146 struct allocindir *oldaip; 6147 { 6148 struct freefrag *freefrag; 6149 struct worklist *wk; 6150 6151 if (oldaip->ai_newblkno != aip->ai_oldblkno) 6152 panic("allocindir_merge: blkno"); 6153 aip->ai_oldblkno = oldaip->ai_oldblkno; 6154 freefrag = aip->ai_freefrag; 6155 aip->ai_freefrag = oldaip->ai_freefrag; 6156 oldaip->ai_freefrag = NULL; 6157 KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag")); 6158 /* 6159 * If we are tracking a new directory-block allocation, 6160 * move it from the old allocindir to the new allocindir. 6161 */ 6162 if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) { 6163 WORKLIST_REMOVE(wk); 6164 if (!LIST_EMPTY(&oldaip->ai_newdirblk)) 6165 panic("allocindir_merge: extra newdirblk"); 6166 WORKLIST_INSERT(&aip->ai_newdirblk, wk); 6167 } 6168 /* 6169 * We can skip journaling for this freefrag and just complete 6170 * any pending journal work for the allocindir that is being 6171 * removed after the freefrag completes. 6172 */ 6173 if (freefrag->ff_jdep) 6174 cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep)); 6175 LIST_REMOVE(oldaip, ai_next); 6176 freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block, 6177 &freefrag->ff_list, &freefrag->ff_jwork); 6178 free_newblk(&oldaip->ai_block); 6179 6180 return (freefrag); 6181 } 6182 6183 static inline void 6184 setup_freedirect(freeblks, ip, i, needj) 6185 struct freeblks *freeblks; 6186 struct inode *ip; 6187 int i; 6188 int needj; 6189 { 6190 struct ufsmount *ump; 6191 ufs2_daddr_t blkno; 6192 int frags; 6193 6194 blkno = DIP(ip, i_db[i]); 6195 if (blkno == 0) 6196 return; 6197 DIP_SET(ip, i_db[i], 0); 6198 ump = ITOUMP(ip); 6199 frags = sblksize(ump->um_fs, ip->i_size, i); 6200 frags = numfrags(ump->um_fs, frags); 6201 newfreework(ump, freeblks, NULL, i, blkno, frags, 0, needj); 6202 } 6203 6204 static inline void 6205 setup_freeext(freeblks, ip, i, needj) 6206 struct freeblks *freeblks; 6207 struct inode *ip; 6208 int i; 6209 int needj; 6210 { 6211 struct ufsmount *ump; 6212 ufs2_daddr_t blkno; 6213 int frags; 6214 6215 blkno = ip->i_din2->di_extb[i]; 6216 if (blkno == 0) 6217 return; 6218 ip->i_din2->di_extb[i] = 0; 6219 ump = ITOUMP(ip); 6220 frags = sblksize(ump->um_fs, ip->i_din2->di_extsize, i); 6221 frags = numfrags(ump->um_fs, frags); 6222 newfreework(ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj); 6223 } 6224 6225 static inline void 6226 setup_freeindir(freeblks, ip, i, lbn, needj) 6227 struct freeblks *freeblks; 6228 struct inode *ip; 6229 int i; 6230 ufs_lbn_t lbn; 6231 int needj; 6232 { 6233 struct ufsmount *ump; 6234 ufs2_daddr_t blkno; 6235 6236 blkno = DIP(ip, i_ib[i]); 6237 if (blkno == 0) 6238 return; 6239 DIP_SET(ip, i_ib[i], 0); 6240 ump = ITOUMP(ip); 6241 newfreework(ump, freeblks, NULL, lbn, blkno, ump->um_fs->fs_frag, 6242 0, needj); 6243 } 6244 6245 static inline struct freeblks * 6246 newfreeblks(mp, ip) 6247 struct mount *mp; 6248 struct inode *ip; 6249 { 6250 struct freeblks *freeblks; 6251 6252 freeblks = malloc(sizeof(struct freeblks), 6253 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 6254 workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp); 6255 LIST_INIT(&freeblks->fb_jblkdephd); 6256 LIST_INIT(&freeblks->fb_jwork); 6257 freeblks->fb_ref = 0; 6258 freeblks->fb_cgwait = 0; 6259 freeblks->fb_state = ATTACHED; 6260 freeblks->fb_uid = ip->i_uid; 6261 freeblks->fb_inum = ip->i_number; 6262 freeblks->fb_vtype = ITOV(ip)->v_type; 6263 freeblks->fb_modrev = DIP(ip, i_modrev); 6264 freeblks->fb_devvp = ITODEVVP(ip); 6265 freeblks->fb_chkcnt = 0; 6266 freeblks->fb_len = 0; 6267 6268 return (freeblks); 6269 } 6270 6271 static void 6272 trunc_indirdep(indirdep, freeblks, bp, off) 6273 struct indirdep *indirdep; 6274 struct freeblks *freeblks; 6275 struct buf *bp; 6276 int off; 6277 { 6278 struct allocindir *aip, *aipn; 6279 6280 /* 6281 * The first set of allocindirs won't be in savedbp. 6282 */ 6283 LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn) 6284 if (aip->ai_offset > off) 6285 cancel_allocindir(aip, bp, freeblks, 1); 6286 LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn) 6287 if (aip->ai_offset > off) 6288 cancel_allocindir(aip, bp, freeblks, 1); 6289 /* 6290 * These will exist in savedbp. 6291 */ 6292 LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn) 6293 if (aip->ai_offset > off) 6294 cancel_allocindir(aip, NULL, freeblks, 0); 6295 LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn) 6296 if (aip->ai_offset > off) 6297 cancel_allocindir(aip, NULL, freeblks, 0); 6298 } 6299 6300 /* 6301 * Follow the chain of indirects down to lastlbn creating a freework 6302 * structure for each. This will be used to start indir_trunc() at 6303 * the right offset and create the journal records for the parrtial 6304 * truncation. A second step will handle the truncated dependencies. 6305 */ 6306 static int 6307 setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno) 6308 struct freeblks *freeblks; 6309 struct inode *ip; 6310 ufs_lbn_t lbn; 6311 ufs_lbn_t lastlbn; 6312 ufs2_daddr_t blkno; 6313 { 6314 struct indirdep *indirdep; 6315 struct indirdep *indirn; 6316 struct freework *freework; 6317 struct newblk *newblk; 6318 struct mount *mp; 6319 struct ufsmount *ump; 6320 struct buf *bp; 6321 uint8_t *start; 6322 uint8_t *end; 6323 ufs_lbn_t lbnadd; 6324 int level; 6325 int error; 6326 int off; 6327 6328 6329 freework = NULL; 6330 if (blkno == 0) 6331 return (0); 6332 mp = freeblks->fb_list.wk_mp; 6333 ump = VFSTOUFS(mp); 6334 /* 6335 * Here, calls to VOP_BMAP() will fail. However, we already have 6336 * the on-disk address, so we just pass it to bread() instead of 6337 * having bread() attempt to calculate it using VOP_BMAP(). 6338 */ 6339 error = ffs_breadz(ump, ITOV(ip), lbn, blkptrtodb(ump, blkno), 6340 (int)mp->mnt_stat.f_iosize, NULL, NULL, 0, NOCRED, 0, NULL, &bp); 6341 if (error) 6342 return (error); 6343 level = lbn_level(lbn); 6344 lbnadd = lbn_offset(ump->um_fs, level); 6345 /* 6346 * Compute the offset of the last block we want to keep. Store 6347 * in the freework the first block we want to completely free. 6348 */ 6349 off = (lastlbn - -(lbn + level)) / lbnadd; 6350 if (off + 1 == NINDIR(ump->um_fs)) 6351 goto nowork; 6352 freework = newfreework(ump, freeblks, NULL, lbn, blkno, 0, off + 1, 0); 6353 /* 6354 * Link the freework into the indirdep. This will prevent any new 6355 * allocations from proceeding until we are finished with the 6356 * truncate and the block is written. 6357 */ 6358 ACQUIRE_LOCK(ump); 6359 indirdep = indirdep_lookup(mp, ip, bp); 6360 if (indirdep->ir_freeblks) 6361 panic("setup_trunc_indir: indirdep already truncated."); 6362 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); 6363 freework->fw_indir = indirdep; 6364 /* 6365 * Cancel any allocindirs that will not make it to disk. 6366 * We have to do this for all copies of the indirdep that 6367 * live on this newblk. 6368 */ 6369 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 6370 if (newblk_lookup(mp, dbtofsb(ump->um_fs, bp->b_blkno), 0, 6371 &newblk) == 0) 6372 panic("setup_trunc_indir: lost block"); 6373 LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next) 6374 trunc_indirdep(indirn, freeblks, bp, off); 6375 } else 6376 trunc_indirdep(indirdep, freeblks, bp, off); 6377 FREE_LOCK(ump); 6378 /* 6379 * Creation is protected by the buf lock. The saveddata is only 6380 * needed if a full truncation follows a partial truncation but it 6381 * is difficult to allocate in that case so we fetch it anyway. 6382 */ 6383 if (indirdep->ir_saveddata == NULL) 6384 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 6385 M_SOFTDEP_FLAGS); 6386 nowork: 6387 /* Fetch the blkno of the child and the zero start offset. */ 6388 if (I_IS_UFS1(ip)) { 6389 blkno = ((ufs1_daddr_t *)bp->b_data)[off]; 6390 start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1]; 6391 } else { 6392 blkno = ((ufs2_daddr_t *)bp->b_data)[off]; 6393 start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1]; 6394 } 6395 if (freework) { 6396 /* Zero the truncated pointers. */ 6397 end = bp->b_data + bp->b_bcount; 6398 bzero(start, end - start); 6399 bdwrite(bp); 6400 } else 6401 bqrelse(bp); 6402 if (level == 0) 6403 return (0); 6404 lbn++; /* adjust level */ 6405 lbn -= (off * lbnadd); 6406 return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno); 6407 } 6408 6409 /* 6410 * Complete the partial truncation of an indirect block setup by 6411 * setup_trunc_indir(). This zeros the truncated pointers in the saved 6412 * copy and writes them to disk before the freeblks is allowed to complete. 6413 */ 6414 static void 6415 complete_trunc_indir(freework) 6416 struct freework *freework; 6417 { 6418 struct freework *fwn; 6419 struct indirdep *indirdep; 6420 struct ufsmount *ump; 6421 struct buf *bp; 6422 uintptr_t start; 6423 int count; 6424 6425 ump = VFSTOUFS(freework->fw_list.wk_mp); 6426 LOCK_OWNED(ump); 6427 indirdep = freework->fw_indir; 6428 for (;;) { 6429 bp = indirdep->ir_bp; 6430 /* See if the block was discarded. */ 6431 if (bp == NULL) 6432 break; 6433 /* Inline part of getdirtybuf(). We dont want bremfree. */ 6434 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) 6435 break; 6436 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 6437 LOCK_PTR(ump)) == 0) 6438 BUF_UNLOCK(bp); 6439 ACQUIRE_LOCK(ump); 6440 } 6441 freework->fw_state |= DEPCOMPLETE; 6442 TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next); 6443 /* 6444 * Zero the pointers in the saved copy. 6445 */ 6446 if (indirdep->ir_state & UFS1FMT) 6447 start = sizeof(ufs1_daddr_t); 6448 else 6449 start = sizeof(ufs2_daddr_t); 6450 start *= freework->fw_start; 6451 count = indirdep->ir_savebp->b_bcount - start; 6452 start += (uintptr_t)indirdep->ir_savebp->b_data; 6453 bzero((char *)start, count); 6454 /* 6455 * We need to start the next truncation in the list if it has not 6456 * been started yet. 6457 */ 6458 fwn = TAILQ_FIRST(&indirdep->ir_trunc); 6459 if (fwn != NULL) { 6460 if (fwn->fw_freeblks == indirdep->ir_freeblks) 6461 TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next); 6462 if ((fwn->fw_state & ONWORKLIST) == 0) 6463 freework_enqueue(fwn); 6464 } 6465 /* 6466 * If bp is NULL the block was fully truncated, restore 6467 * the saved block list otherwise free it if it is no 6468 * longer needed. 6469 */ 6470 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 6471 if (bp == NULL) 6472 bcopy(indirdep->ir_saveddata, 6473 indirdep->ir_savebp->b_data, 6474 indirdep->ir_savebp->b_bcount); 6475 free(indirdep->ir_saveddata, M_INDIRDEP); 6476 indirdep->ir_saveddata = NULL; 6477 } 6478 /* 6479 * When bp is NULL there is a full truncation pending. We 6480 * must wait for this full truncation to be journaled before 6481 * we can release this freework because the disk pointers will 6482 * never be written as zero. 6483 */ 6484 if (bp == NULL) { 6485 if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd)) 6486 handle_written_freework(freework); 6487 else 6488 WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd, 6489 &freework->fw_list); 6490 if (fwn == NULL) { 6491 freework->fw_indir = (void *)0x0000deadbeef0000; 6492 bp = indirdep->ir_savebp; 6493 indirdep->ir_savebp = NULL; 6494 free_indirdep(indirdep); 6495 FREE_LOCK(ump); 6496 brelse(bp); 6497 ACQUIRE_LOCK(ump); 6498 } 6499 } else { 6500 /* Complete when the real copy is written. */ 6501 WORKLIST_INSERT(&bp->b_dep, &freework->fw_list); 6502 BUF_UNLOCK(bp); 6503 } 6504 } 6505 6506 /* 6507 * Calculate the number of blocks we are going to release where datablocks 6508 * is the current total and length is the new file size. 6509 */ 6510 static ufs2_daddr_t 6511 blkcount(fs, datablocks, length) 6512 struct fs *fs; 6513 ufs2_daddr_t datablocks; 6514 off_t length; 6515 { 6516 off_t totblks, numblks; 6517 6518 totblks = 0; 6519 numblks = howmany(length, fs->fs_bsize); 6520 if (numblks <= UFS_NDADDR) { 6521 totblks = howmany(length, fs->fs_fsize); 6522 goto out; 6523 } 6524 totblks = blkstofrags(fs, numblks); 6525 numblks -= UFS_NDADDR; 6526 /* 6527 * Count all single, then double, then triple indirects required. 6528 * Subtracting one indirects worth of blocks for each pass 6529 * acknowledges one of each pointed to by the inode. 6530 */ 6531 for (;;) { 6532 totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs))); 6533 numblks -= NINDIR(fs); 6534 if (numblks <= 0) 6535 break; 6536 numblks = howmany(numblks, NINDIR(fs)); 6537 } 6538 out: 6539 totblks = fsbtodb(fs, totblks); 6540 /* 6541 * Handle sparse files. We can't reclaim more blocks than the inode 6542 * references. We will correct it later in handle_complete_freeblks() 6543 * when we know the real count. 6544 */ 6545 if (totblks > datablocks) 6546 return (0); 6547 return (datablocks - totblks); 6548 } 6549 6550 /* 6551 * Handle freeblocks for journaled softupdate filesystems. 6552 * 6553 * Contrary to normal softupdates, we must preserve the block pointers in 6554 * indirects until their subordinates are free. This is to avoid journaling 6555 * every block that is freed which may consume more space than the journal 6556 * itself. The recovery program will see the free block journals at the 6557 * base of the truncated area and traverse them to reclaim space. The 6558 * pointers in the inode may be cleared immediately after the journal 6559 * records are written because each direct and indirect pointer in the 6560 * inode is recorded in a journal. This permits full truncation to proceed 6561 * asynchronously. The write order is journal -> inode -> cgs -> indirects. 6562 * 6563 * The algorithm is as follows: 6564 * 1) Traverse the in-memory state and create journal entries to release 6565 * the relevant blocks and full indirect trees. 6566 * 2) Traverse the indirect block chain adding partial truncation freework 6567 * records to indirects in the path to lastlbn. The freework will 6568 * prevent new allocation dependencies from being satisfied in this 6569 * indirect until the truncation completes. 6570 * 3) Read and lock the inode block, performing an update with the new size 6571 * and pointers. This prevents truncated data from becoming valid on 6572 * disk through step 4. 6573 * 4) Reap unsatisfied dependencies that are beyond the truncated area, 6574 * eliminate journal work for those records that do not require it. 6575 * 5) Schedule the journal records to be written followed by the inode block. 6576 * 6) Allocate any necessary frags for the end of file. 6577 * 7) Zero any partially truncated blocks. 6578 * 6579 * From this truncation proceeds asynchronously using the freework and 6580 * indir_trunc machinery. The file will not be extended again into a 6581 * partially truncated indirect block until all work is completed but 6582 * the normal dependency mechanism ensures that it is rolled back/forward 6583 * as appropriate. Further truncation may occur without delay and is 6584 * serialized in indir_trunc(). 6585 */ 6586 void 6587 softdep_journal_freeblocks(ip, cred, length, flags) 6588 struct inode *ip; /* The inode whose length is to be reduced */ 6589 struct ucred *cred; 6590 off_t length; /* The new length for the file */ 6591 int flags; /* IO_EXT and/or IO_NORMAL */ 6592 { 6593 struct freeblks *freeblks, *fbn; 6594 struct worklist *wk, *wkn; 6595 struct inodedep *inodedep; 6596 struct jblkdep *jblkdep; 6597 struct allocdirect *adp, *adpn; 6598 struct ufsmount *ump; 6599 struct fs *fs; 6600 struct buf *bp; 6601 struct vnode *vp; 6602 struct mount *mp; 6603 daddr_t dbn; 6604 ufs2_daddr_t extblocks, datablocks; 6605 ufs_lbn_t tmpval, lbn, lastlbn; 6606 int frags, lastoff, iboff, allocblock, needj, error, i; 6607 6608 ump = ITOUMP(ip); 6609 mp = UFSTOVFS(ump); 6610 fs = ump->um_fs; 6611 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6612 ("softdep_journal_freeblocks called on non-softdep filesystem")); 6613 vp = ITOV(ip); 6614 needj = 1; 6615 iboff = -1; 6616 allocblock = 0; 6617 extblocks = 0; 6618 datablocks = 0; 6619 frags = 0; 6620 freeblks = newfreeblks(mp, ip); 6621 ACQUIRE_LOCK(ump); 6622 /* 6623 * If we're truncating a removed file that will never be written 6624 * we don't need to journal the block frees. The canceled journals 6625 * for the allocations will suffice. 6626 */ 6627 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 6628 if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED && 6629 length == 0) 6630 needj = 0; 6631 CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d", 6632 ip->i_number, length, needj); 6633 FREE_LOCK(ump); 6634 /* 6635 * Calculate the lbn that we are truncating to. This results in -1 6636 * if we're truncating the 0 bytes. So it is the last lbn we want 6637 * to keep, not the first lbn we want to truncate. 6638 */ 6639 lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1; 6640 lastoff = blkoff(fs, length); 6641 /* 6642 * Compute frags we are keeping in lastlbn. 0 means all. 6643 */ 6644 if (lastlbn >= 0 && lastlbn < UFS_NDADDR) { 6645 frags = fragroundup(fs, lastoff); 6646 /* adp offset of last valid allocdirect. */ 6647 iboff = lastlbn; 6648 } else if (lastlbn > 0) 6649 iboff = UFS_NDADDR; 6650 if (fs->fs_magic == FS_UFS2_MAGIC) 6651 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6652 /* 6653 * Handle normal data blocks and indirects. This section saves 6654 * values used after the inode update to complete frag and indirect 6655 * truncation. 6656 */ 6657 if ((flags & IO_NORMAL) != 0) { 6658 /* 6659 * Handle truncation of whole direct and indirect blocks. 6660 */ 6661 for (i = iboff + 1; i < UFS_NDADDR; i++) 6662 setup_freedirect(freeblks, ip, i, needj); 6663 for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR; 6664 i < UFS_NIADDR; 6665 i++, lbn += tmpval, tmpval *= NINDIR(fs)) { 6666 /* Release a whole indirect tree. */ 6667 if (lbn > lastlbn) { 6668 setup_freeindir(freeblks, ip, i, -lbn -i, 6669 needj); 6670 continue; 6671 } 6672 iboff = i + UFS_NDADDR; 6673 /* 6674 * Traverse partially truncated indirect tree. 6675 */ 6676 if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn) 6677 setup_trunc_indir(freeblks, ip, -lbn - i, 6678 lastlbn, DIP(ip, i_ib[i])); 6679 } 6680 /* 6681 * Handle partial truncation to a frag boundary. 6682 */ 6683 if (frags) { 6684 ufs2_daddr_t blkno; 6685 long oldfrags; 6686 6687 oldfrags = blksize(fs, ip, lastlbn); 6688 blkno = DIP(ip, i_db[lastlbn]); 6689 if (blkno && oldfrags != frags) { 6690 oldfrags -= frags; 6691 oldfrags = numfrags(fs, oldfrags); 6692 blkno += numfrags(fs, frags); 6693 newfreework(ump, freeblks, NULL, lastlbn, 6694 blkno, oldfrags, 0, needj); 6695 if (needj) 6696 adjust_newfreework(freeblks, 6697 numfrags(fs, frags)); 6698 } else if (blkno == 0) 6699 allocblock = 1; 6700 } 6701 /* 6702 * Add a journal record for partial truncate if we are 6703 * handling indirect blocks. Non-indirects need no extra 6704 * journaling. 6705 */ 6706 if (length != 0 && lastlbn >= UFS_NDADDR) { 6707 UFS_INODE_SET_FLAG(ip, IN_TRUNCATED); 6708 newjtrunc(freeblks, length, 0); 6709 } 6710 ip->i_size = length; 6711 DIP_SET(ip, i_size, ip->i_size); 6712 datablocks = DIP(ip, i_blocks) - extblocks; 6713 if (length != 0) 6714 datablocks = blkcount(fs, datablocks, length); 6715 freeblks->fb_len = length; 6716 } 6717 if ((flags & IO_EXT) != 0) { 6718 for (i = 0; i < UFS_NXADDR; i++) 6719 setup_freeext(freeblks, ip, i, needj); 6720 ip->i_din2->di_extsize = 0; 6721 datablocks += extblocks; 6722 } 6723 #ifdef QUOTA 6724 /* Reference the quotas in case the block count is wrong in the end. */ 6725 quotaref(vp, freeblks->fb_quota); 6726 (void) chkdq(ip, -datablocks, NOCRED, FORCE); 6727 #endif 6728 freeblks->fb_chkcnt = -datablocks; 6729 UFS_LOCK(ump); 6730 fs->fs_pendingblocks += datablocks; 6731 UFS_UNLOCK(ump); 6732 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6733 /* 6734 * Handle truncation of incomplete alloc direct dependencies. We 6735 * hold the inode block locked to prevent incomplete dependencies 6736 * from reaching the disk while we are eliminating those that 6737 * have been truncated. This is a partially inlined ffs_update(). 6738 */ 6739 ufs_itimes(vp); 6740 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 6741 dbn = fsbtodb(fs, ino_to_fsba(fs, ip->i_number)); 6742 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize, 6743 NULL, NULL, 0, cred, 0, NULL, &bp); 6744 if (error) { 6745 softdep_error("softdep_journal_freeblocks", error); 6746 return; 6747 } 6748 if (bp->b_bufsize == fs->fs_bsize) 6749 bp->b_flags |= B_CLUSTEROK; 6750 softdep_update_inodeblock(ip, bp, 0); 6751 if (ump->um_fstype == UFS1) { 6752 *((struct ufs1_dinode *)bp->b_data + 6753 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 6754 } else { 6755 ffs_update_dinode_ckhash(fs, ip->i_din2); 6756 *((struct ufs2_dinode *)bp->b_data + 6757 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 6758 } 6759 ACQUIRE_LOCK(ump); 6760 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 6761 if ((inodedep->id_state & IOSTARTED) != 0) 6762 panic("softdep_setup_freeblocks: inode busy"); 6763 /* 6764 * Add the freeblks structure to the list of operations that 6765 * must await the zero'ed inode being written to disk. If we 6766 * still have a bitmap dependency (needj), then the inode 6767 * has never been written to disk, so we can process the 6768 * freeblks below once we have deleted the dependencies. 6769 */ 6770 if (needj) 6771 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6772 else 6773 freeblks->fb_state |= COMPLETE; 6774 if ((flags & IO_NORMAL) != 0) { 6775 TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) { 6776 if (adp->ad_offset > iboff) 6777 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6778 freeblks); 6779 /* 6780 * Truncate the allocdirect. We could eliminate 6781 * or modify journal records as well. 6782 */ 6783 else if (adp->ad_offset == iboff && frags) 6784 adp->ad_newsize = frags; 6785 } 6786 } 6787 if ((flags & IO_EXT) != 0) 6788 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 6789 cancel_allocdirect(&inodedep->id_extupdt, adp, 6790 freeblks); 6791 /* 6792 * Scan the bufwait list for newblock dependencies that will never 6793 * make it to disk. 6794 */ 6795 LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) { 6796 if (wk->wk_type != D_ALLOCDIRECT) 6797 continue; 6798 adp = WK_ALLOCDIRECT(wk); 6799 if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) || 6800 ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) { 6801 cancel_jfreeblk(freeblks, adp->ad_newblkno); 6802 cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork); 6803 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 6804 } 6805 } 6806 /* 6807 * Add journal work. 6808 */ 6809 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) 6810 add_to_journal(&jblkdep->jb_list); 6811 FREE_LOCK(ump); 6812 bdwrite(bp); 6813 /* 6814 * Truncate dependency structures beyond length. 6815 */ 6816 trunc_dependencies(ip, freeblks, lastlbn, frags, flags); 6817 /* 6818 * This is only set when we need to allocate a fragment because 6819 * none existed at the end of a frag-sized file. It handles only 6820 * allocating a new, zero filled block. 6821 */ 6822 if (allocblock) { 6823 ip->i_size = length - lastoff; 6824 DIP_SET(ip, i_size, ip->i_size); 6825 error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp); 6826 if (error != 0) { 6827 softdep_error("softdep_journal_freeblks", error); 6828 return; 6829 } 6830 ip->i_size = length; 6831 DIP_SET(ip, i_size, length); 6832 UFS_INODE_SET_FLAG(ip, IN_CHANGE | IN_UPDATE); 6833 allocbuf(bp, frags); 6834 ffs_update(vp, 0); 6835 bawrite(bp); 6836 } else if (lastoff != 0 && vp->v_type != VDIR) { 6837 int size; 6838 6839 /* 6840 * Zero the end of a truncated frag or block. 6841 */ 6842 size = sblksize(fs, length, lastlbn); 6843 error = bread(vp, lastlbn, size, cred, &bp); 6844 if (error == 0) { 6845 bzero((char *)bp->b_data + lastoff, size - lastoff); 6846 bawrite(bp); 6847 } else if (!ffs_fsfail_cleanup(ump, error)) { 6848 softdep_error("softdep_journal_freeblks", error); 6849 return; 6850 } 6851 } 6852 ACQUIRE_LOCK(ump); 6853 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 6854 TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next); 6855 freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST; 6856 /* 6857 * We zero earlier truncations so they don't erroneously 6858 * update i_blocks. 6859 */ 6860 if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0) 6861 TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next) 6862 fbn->fb_len = 0; 6863 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE && 6864 LIST_EMPTY(&freeblks->fb_jblkdephd)) 6865 freeblks->fb_state |= INPROGRESS; 6866 else 6867 freeblks = NULL; 6868 FREE_LOCK(ump); 6869 if (freeblks) 6870 handle_workitem_freeblocks(freeblks, 0); 6871 trunc_pages(ip, length, extblocks, flags); 6872 6873 } 6874 6875 /* 6876 * Flush a JOP_SYNC to the journal. 6877 */ 6878 void 6879 softdep_journal_fsync(ip) 6880 struct inode *ip; 6881 { 6882 struct jfsync *jfsync; 6883 struct ufsmount *ump; 6884 6885 ump = ITOUMP(ip); 6886 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 6887 ("softdep_journal_fsync called on non-softdep filesystem")); 6888 if ((ip->i_flag & IN_TRUNCATED) == 0) 6889 return; 6890 ip->i_flag &= ~IN_TRUNCATED; 6891 jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO); 6892 workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ump)); 6893 jfsync->jfs_size = ip->i_size; 6894 jfsync->jfs_ino = ip->i_number; 6895 ACQUIRE_LOCK(ump); 6896 add_to_journal(&jfsync->jfs_list); 6897 jwait(&jfsync->jfs_list, MNT_WAIT); 6898 FREE_LOCK(ump); 6899 } 6900 6901 /* 6902 * Block de-allocation dependencies. 6903 * 6904 * When blocks are de-allocated, the on-disk pointers must be nullified before 6905 * the blocks are made available for use by other files. (The true 6906 * requirement is that old pointers must be nullified before new on-disk 6907 * pointers are set. We chose this slightly more stringent requirement to 6908 * reduce complexity.) Our implementation handles this dependency by updating 6909 * the inode (or indirect block) appropriately but delaying the actual block 6910 * de-allocation (i.e., freemap and free space count manipulation) until 6911 * after the updated versions reach stable storage. After the disk is 6912 * updated, the blocks can be safely de-allocated whenever it is convenient. 6913 * This implementation handles only the common case of reducing a file's 6914 * length to zero. Other cases are handled by the conventional synchronous 6915 * write approach. 6916 * 6917 * The ffs implementation with which we worked double-checks 6918 * the state of the block pointers and file size as it reduces 6919 * a file's length. Some of this code is replicated here in our 6920 * soft updates implementation. The freeblks->fb_chkcnt field is 6921 * used to transfer a part of this information to the procedure 6922 * that eventually de-allocates the blocks. 6923 * 6924 * This routine should be called from the routine that shortens 6925 * a file's length, before the inode's size or block pointers 6926 * are modified. It will save the block pointer information for 6927 * later release and zero the inode so that the calling routine 6928 * can release it. 6929 */ 6930 void 6931 softdep_setup_freeblocks(ip, length, flags) 6932 struct inode *ip; /* The inode whose length is to be reduced */ 6933 off_t length; /* The new length for the file */ 6934 int flags; /* IO_EXT and/or IO_NORMAL */ 6935 { 6936 struct ufs1_dinode *dp1; 6937 struct ufs2_dinode *dp2; 6938 struct freeblks *freeblks; 6939 struct inodedep *inodedep; 6940 struct allocdirect *adp; 6941 struct ufsmount *ump; 6942 struct buf *bp; 6943 struct fs *fs; 6944 ufs2_daddr_t extblocks, datablocks; 6945 struct mount *mp; 6946 int i, delay, error; 6947 ufs_lbn_t tmpval; 6948 ufs_lbn_t lbn; 6949 6950 ump = ITOUMP(ip); 6951 mp = UFSTOVFS(ump); 6952 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6953 ("softdep_setup_freeblocks called on non-softdep filesystem")); 6954 CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld", 6955 ip->i_number, length); 6956 KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length")); 6957 fs = ump->um_fs; 6958 if ((error = bread(ump->um_devvp, 6959 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6960 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 6961 if (!ffs_fsfail_cleanup(ump, error)) 6962 softdep_error("softdep_setup_freeblocks", error); 6963 return; 6964 } 6965 freeblks = newfreeblks(mp, ip); 6966 extblocks = 0; 6967 datablocks = 0; 6968 if (fs->fs_magic == FS_UFS2_MAGIC) 6969 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6970 if ((flags & IO_NORMAL) != 0) { 6971 for (i = 0; i < UFS_NDADDR; i++) 6972 setup_freedirect(freeblks, ip, i, 0); 6973 for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR; 6974 i < UFS_NIADDR; 6975 i++, lbn += tmpval, tmpval *= NINDIR(fs)) 6976 setup_freeindir(freeblks, ip, i, -lbn -i, 0); 6977 ip->i_size = 0; 6978 DIP_SET(ip, i_size, 0); 6979 datablocks = DIP(ip, i_blocks) - extblocks; 6980 } 6981 if ((flags & IO_EXT) != 0) { 6982 for (i = 0; i < UFS_NXADDR; i++) 6983 setup_freeext(freeblks, ip, i, 0); 6984 ip->i_din2->di_extsize = 0; 6985 datablocks += extblocks; 6986 } 6987 #ifdef QUOTA 6988 /* Reference the quotas in case the block count is wrong in the end. */ 6989 quotaref(ITOV(ip), freeblks->fb_quota); 6990 (void) chkdq(ip, -datablocks, NOCRED, FORCE); 6991 #endif 6992 freeblks->fb_chkcnt = -datablocks; 6993 UFS_LOCK(ump); 6994 fs->fs_pendingblocks += datablocks; 6995 UFS_UNLOCK(ump); 6996 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6997 /* 6998 * Push the zero'ed inode to its disk buffer so that we are free 6999 * to delete its dependencies below. Once the dependencies are gone 7000 * the buffer can be safely released. 7001 */ 7002 if (ump->um_fstype == UFS1) { 7003 dp1 = ((struct ufs1_dinode *)bp->b_data + 7004 ino_to_fsbo(fs, ip->i_number)); 7005 ip->i_din1->di_freelink = dp1->di_freelink; 7006 *dp1 = *ip->i_din1; 7007 } else { 7008 dp2 = ((struct ufs2_dinode *)bp->b_data + 7009 ino_to_fsbo(fs, ip->i_number)); 7010 ip->i_din2->di_freelink = dp2->di_freelink; 7011 ffs_update_dinode_ckhash(fs, ip->i_din2); 7012 *dp2 = *ip->i_din2; 7013 } 7014 /* 7015 * Find and eliminate any inode dependencies. 7016 */ 7017 ACQUIRE_LOCK(ump); 7018 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 7019 if ((inodedep->id_state & IOSTARTED) != 0) 7020 panic("softdep_setup_freeblocks: inode busy"); 7021 /* 7022 * Add the freeblks structure to the list of operations that 7023 * must await the zero'ed inode being written to disk. If we 7024 * still have a bitmap dependency (delay == 0), then the inode 7025 * has never been written to disk, so we can process the 7026 * freeblks below once we have deleted the dependencies. 7027 */ 7028 delay = (inodedep->id_state & DEPCOMPLETE); 7029 if (delay) 7030 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 7031 else 7032 freeblks->fb_state |= COMPLETE; 7033 /* 7034 * Because the file length has been truncated to zero, any 7035 * pending block allocation dependency structures associated 7036 * with this inode are obsolete and can simply be de-allocated. 7037 * We must first merge the two dependency lists to get rid of 7038 * any duplicate freefrag structures, then purge the merged list. 7039 * If we still have a bitmap dependency, then the inode has never 7040 * been written to disk, so we can free any fragments without delay. 7041 */ 7042 if (flags & IO_NORMAL) { 7043 merge_inode_lists(&inodedep->id_newinoupdt, 7044 &inodedep->id_inoupdt); 7045 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 7046 cancel_allocdirect(&inodedep->id_inoupdt, adp, 7047 freeblks); 7048 } 7049 if (flags & IO_EXT) { 7050 merge_inode_lists(&inodedep->id_newextupdt, 7051 &inodedep->id_extupdt); 7052 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 7053 cancel_allocdirect(&inodedep->id_extupdt, adp, 7054 freeblks); 7055 } 7056 FREE_LOCK(ump); 7057 bdwrite(bp); 7058 trunc_dependencies(ip, freeblks, -1, 0, flags); 7059 ACQUIRE_LOCK(ump); 7060 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 7061 (void) free_inodedep(inodedep); 7062 freeblks->fb_state |= DEPCOMPLETE; 7063 /* 7064 * If the inode with zeroed block pointers is now on disk 7065 * we can start freeing blocks. 7066 */ 7067 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 7068 freeblks->fb_state |= INPROGRESS; 7069 else 7070 freeblks = NULL; 7071 FREE_LOCK(ump); 7072 if (freeblks) 7073 handle_workitem_freeblocks(freeblks, 0); 7074 trunc_pages(ip, length, extblocks, flags); 7075 } 7076 7077 /* 7078 * Eliminate pages from the page cache that back parts of this inode and 7079 * adjust the vnode pager's idea of our size. This prevents stale data 7080 * from hanging around in the page cache. 7081 */ 7082 static void 7083 trunc_pages(ip, length, extblocks, flags) 7084 struct inode *ip; 7085 off_t length; 7086 ufs2_daddr_t extblocks; 7087 int flags; 7088 { 7089 struct vnode *vp; 7090 struct fs *fs; 7091 ufs_lbn_t lbn; 7092 off_t end, extend; 7093 7094 vp = ITOV(ip); 7095 fs = ITOFS(ip); 7096 extend = OFF_TO_IDX(lblktosize(fs, -extblocks)); 7097 if ((flags & IO_EXT) != 0) 7098 vn_pages_remove(vp, extend, 0); 7099 if ((flags & IO_NORMAL) == 0) 7100 return; 7101 BO_LOCK(&vp->v_bufobj); 7102 drain_output(vp); 7103 BO_UNLOCK(&vp->v_bufobj); 7104 /* 7105 * The vnode pager eliminates file pages we eliminate indirects 7106 * below. 7107 */ 7108 vnode_pager_setsize(vp, length); 7109 /* 7110 * Calculate the end based on the last indirect we want to keep. If 7111 * the block extends into indirects we can just use the negative of 7112 * its lbn. Doubles and triples exist at lower numbers so we must 7113 * be careful not to remove those, if they exist. double and triple 7114 * indirect lbns do not overlap with others so it is not important 7115 * to verify how many levels are required. 7116 */ 7117 lbn = lblkno(fs, length); 7118 if (lbn >= UFS_NDADDR) { 7119 /* Calculate the virtual lbn of the triple indirect. */ 7120 lbn = -lbn - (UFS_NIADDR - 1); 7121 end = OFF_TO_IDX(lblktosize(fs, lbn)); 7122 } else 7123 end = extend; 7124 vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end); 7125 } 7126 7127 /* 7128 * See if the buf bp is in the range eliminated by truncation. 7129 */ 7130 static int 7131 trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags) 7132 struct buf *bp; 7133 int *blkoffp; 7134 ufs_lbn_t lastlbn; 7135 int lastoff; 7136 int flags; 7137 { 7138 ufs_lbn_t lbn; 7139 7140 *blkoffp = 0; 7141 /* Only match ext/normal blocks as appropriate. */ 7142 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 7143 ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0)) 7144 return (0); 7145 /* ALTDATA is always a full truncation. */ 7146 if ((bp->b_xflags & BX_ALTDATA) != 0) 7147 return (1); 7148 /* -1 is full truncation. */ 7149 if (lastlbn == -1) 7150 return (1); 7151 /* 7152 * If this is a partial truncate we only want those 7153 * blocks and indirect blocks that cover the range 7154 * we're after. 7155 */ 7156 lbn = bp->b_lblkno; 7157 if (lbn < 0) 7158 lbn = -(lbn + lbn_level(lbn)); 7159 if (lbn < lastlbn) 7160 return (0); 7161 /* Here we only truncate lblkno if it's partial. */ 7162 if (lbn == lastlbn) { 7163 if (lastoff == 0) 7164 return (0); 7165 *blkoffp = lastoff; 7166 } 7167 return (1); 7168 } 7169 7170 /* 7171 * Eliminate any dependencies that exist in memory beyond lblkno:off 7172 */ 7173 static void 7174 trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags) 7175 struct inode *ip; 7176 struct freeblks *freeblks; 7177 ufs_lbn_t lastlbn; 7178 int lastoff; 7179 int flags; 7180 { 7181 struct bufobj *bo; 7182 struct vnode *vp; 7183 struct buf *bp; 7184 int blkoff; 7185 7186 /* 7187 * We must wait for any I/O in progress to finish so that 7188 * all potential buffers on the dirty list will be visible. 7189 * Once they are all there, walk the list and get rid of 7190 * any dependencies. 7191 */ 7192 vp = ITOV(ip); 7193 bo = &vp->v_bufobj; 7194 BO_LOCK(bo); 7195 drain_output(vp); 7196 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 7197 bp->b_vflags &= ~BV_SCANNED; 7198 restart: 7199 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 7200 if (bp->b_vflags & BV_SCANNED) 7201 continue; 7202 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 7203 bp->b_vflags |= BV_SCANNED; 7204 continue; 7205 } 7206 KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer")); 7207 if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL) 7208 goto restart; 7209 BO_UNLOCK(bo); 7210 if (deallocate_dependencies(bp, freeblks, blkoff)) 7211 bqrelse(bp); 7212 else 7213 brelse(bp); 7214 BO_LOCK(bo); 7215 goto restart; 7216 } 7217 /* 7218 * Now do the work of vtruncbuf while also matching indirect blocks. 7219 */ 7220 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) 7221 bp->b_vflags &= ~BV_SCANNED; 7222 cleanrestart: 7223 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) { 7224 if (bp->b_vflags & BV_SCANNED) 7225 continue; 7226 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 7227 bp->b_vflags |= BV_SCANNED; 7228 continue; 7229 } 7230 if (BUF_LOCK(bp, 7231 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 7232 BO_LOCKPTR(bo)) == ENOLCK) { 7233 BO_LOCK(bo); 7234 goto cleanrestart; 7235 } 7236 bp->b_vflags |= BV_SCANNED; 7237 bremfree(bp); 7238 if (blkoff != 0) { 7239 allocbuf(bp, blkoff); 7240 bqrelse(bp); 7241 } else { 7242 bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF; 7243 brelse(bp); 7244 } 7245 BO_LOCK(bo); 7246 goto cleanrestart; 7247 } 7248 drain_output(vp); 7249 BO_UNLOCK(bo); 7250 } 7251 7252 static int 7253 cancel_pagedep(pagedep, freeblks, blkoff) 7254 struct pagedep *pagedep; 7255 struct freeblks *freeblks; 7256 int blkoff; 7257 { 7258 struct jremref *jremref; 7259 struct jmvref *jmvref; 7260 struct dirrem *dirrem, *tmp; 7261 int i; 7262 7263 /* 7264 * Copy any directory remove dependencies to the list 7265 * to be processed after the freeblks proceeds. If 7266 * directory entry never made it to disk they 7267 * can be dumped directly onto the work list. 7268 */ 7269 LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) { 7270 /* Skip this directory removal if it is intended to remain. */ 7271 if (dirrem->dm_offset < blkoff) 7272 continue; 7273 /* 7274 * If there are any dirrems we wait for the journal write 7275 * to complete and then restart the buf scan as the lock 7276 * has been dropped. 7277 */ 7278 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) { 7279 jwait(&jremref->jr_list, MNT_WAIT); 7280 return (ERESTART); 7281 } 7282 LIST_REMOVE(dirrem, dm_next); 7283 dirrem->dm_dirinum = pagedep->pd_ino; 7284 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list); 7285 } 7286 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) { 7287 jwait(&jmvref->jm_list, MNT_WAIT); 7288 return (ERESTART); 7289 } 7290 /* 7291 * When we're partially truncating a pagedep we just want to flush 7292 * journal entries and return. There can not be any adds in the 7293 * truncated portion of the directory and newblk must remain if 7294 * part of the block remains. 7295 */ 7296 if (blkoff != 0) { 7297 struct diradd *dap; 7298 7299 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 7300 if (dap->da_offset > blkoff) 7301 panic("cancel_pagedep: diradd %p off %d > %d", 7302 dap, dap->da_offset, blkoff); 7303 for (i = 0; i < DAHASHSZ; i++) 7304 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) 7305 if (dap->da_offset > blkoff) 7306 panic("cancel_pagedep: diradd %p off %d > %d", 7307 dap, dap->da_offset, blkoff); 7308 return (0); 7309 } 7310 /* 7311 * There should be no directory add dependencies present 7312 * as the directory could not be truncated until all 7313 * children were removed. 7314 */ 7315 KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL, 7316 ("deallocate_dependencies: pendinghd != NULL")); 7317 for (i = 0; i < DAHASHSZ; i++) 7318 KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL, 7319 ("deallocate_dependencies: diraddhd != NULL")); 7320 if ((pagedep->pd_state & NEWBLOCK) != 0) 7321 free_newdirblk(pagedep->pd_newdirblk); 7322 if (free_pagedep(pagedep) == 0) 7323 panic("Failed to free pagedep %p", pagedep); 7324 return (0); 7325 } 7326 7327 /* 7328 * Reclaim any dependency structures from a buffer that is about to 7329 * be reallocated to a new vnode. The buffer must be locked, thus, 7330 * no I/O completion operations can occur while we are manipulating 7331 * its associated dependencies. The mutex is held so that other I/O's 7332 * associated with related dependencies do not occur. 7333 */ 7334 static int 7335 deallocate_dependencies(bp, freeblks, off) 7336 struct buf *bp; 7337 struct freeblks *freeblks; 7338 int off; 7339 { 7340 struct indirdep *indirdep; 7341 struct pagedep *pagedep; 7342 struct worklist *wk, *wkn; 7343 struct ufsmount *ump; 7344 7345 ump = softdep_bp_to_mp(bp); 7346 if (ump == NULL) 7347 goto done; 7348 ACQUIRE_LOCK(ump); 7349 LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) { 7350 switch (wk->wk_type) { 7351 case D_INDIRDEP: 7352 indirdep = WK_INDIRDEP(wk); 7353 if (bp->b_lblkno >= 0 || 7354 bp->b_blkno != indirdep->ir_savebp->b_lblkno) 7355 panic("deallocate_dependencies: not indir"); 7356 cancel_indirdep(indirdep, bp, freeblks); 7357 continue; 7358 7359 case D_PAGEDEP: 7360 pagedep = WK_PAGEDEP(wk); 7361 if (cancel_pagedep(pagedep, freeblks, off)) { 7362 FREE_LOCK(ump); 7363 return (ERESTART); 7364 } 7365 continue; 7366 7367 case D_ALLOCINDIR: 7368 /* 7369 * Simply remove the allocindir, we'll find it via 7370 * the indirdep where we can clear pointers if 7371 * needed. 7372 */ 7373 WORKLIST_REMOVE(wk); 7374 continue; 7375 7376 case D_FREEWORK: 7377 /* 7378 * A truncation is waiting for the zero'd pointers 7379 * to be written. It can be freed when the freeblks 7380 * is journaled. 7381 */ 7382 WORKLIST_REMOVE(wk); 7383 wk->wk_state |= ONDEPLIST; 7384 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 7385 break; 7386 7387 case D_ALLOCDIRECT: 7388 if (off != 0) 7389 continue; 7390 /* FALLTHROUGH */ 7391 default: 7392 panic("deallocate_dependencies: Unexpected type %s", 7393 TYPENAME(wk->wk_type)); 7394 /* NOTREACHED */ 7395 } 7396 } 7397 FREE_LOCK(ump); 7398 done: 7399 /* 7400 * Don't throw away this buf, we were partially truncating and 7401 * some deps may always remain. 7402 */ 7403 if (off) { 7404 allocbuf(bp, off); 7405 bp->b_vflags |= BV_SCANNED; 7406 return (EBUSY); 7407 } 7408 bp->b_flags |= B_INVAL | B_NOCACHE; 7409 7410 return (0); 7411 } 7412 7413 /* 7414 * An allocdirect is being canceled due to a truncate. We must make sure 7415 * the journal entry is released in concert with the blkfree that releases 7416 * the storage. Completed journal entries must not be released until the 7417 * space is no longer pointed to by the inode or in the bitmap. 7418 */ 7419 static void 7420 cancel_allocdirect(adphead, adp, freeblks) 7421 struct allocdirectlst *adphead; 7422 struct allocdirect *adp; 7423 struct freeblks *freeblks; 7424 { 7425 struct freework *freework; 7426 struct newblk *newblk; 7427 struct worklist *wk; 7428 7429 TAILQ_REMOVE(adphead, adp, ad_next); 7430 newblk = (struct newblk *)adp; 7431 freework = NULL; 7432 /* 7433 * Find the correct freework structure. 7434 */ 7435 LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) { 7436 if (wk->wk_type != D_FREEWORK) 7437 continue; 7438 freework = WK_FREEWORK(wk); 7439 if (freework->fw_blkno == newblk->nb_newblkno) 7440 break; 7441 } 7442 if (freework == NULL) 7443 panic("cancel_allocdirect: Freework not found"); 7444 /* 7445 * If a newblk exists at all we still have the journal entry that 7446 * initiated the allocation so we do not need to journal the free. 7447 */ 7448 cancel_jfreeblk(freeblks, freework->fw_blkno); 7449 /* 7450 * If the journal hasn't been written the jnewblk must be passed 7451 * to the call to ffs_blkfree that reclaims the space. We accomplish 7452 * this by linking the journal dependency into the freework to be 7453 * freed when freework_freeblock() is called. If the journal has 7454 * been written we can simply reclaim the journal space when the 7455 * freeblks work is complete. 7456 */ 7457 freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list, 7458 &freeblks->fb_jwork); 7459 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 7460 } 7461 7462 7463 /* 7464 * Cancel a new block allocation. May be an indirect or direct block. We 7465 * remove it from various lists and return any journal record that needs to 7466 * be resolved by the caller. 7467 * 7468 * A special consideration is made for indirects which were never pointed 7469 * at on disk and will never be found once this block is released. 7470 */ 7471 static struct jnewblk * 7472 cancel_newblk(newblk, wk, wkhd) 7473 struct newblk *newblk; 7474 struct worklist *wk; 7475 struct workhead *wkhd; 7476 { 7477 struct jnewblk *jnewblk; 7478 7479 CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno); 7480 7481 newblk->nb_state |= GOINGAWAY; 7482 /* 7483 * Previously we traversed the completedhd on each indirdep 7484 * attached to this newblk to cancel them and gather journal 7485 * work. Since we need only the oldest journal segment and 7486 * the lowest point on the tree will always have the oldest 7487 * journal segment we are free to release the segments 7488 * of any subordinates and may leave the indirdep list to 7489 * indirdep_complete() when this newblk is freed. 7490 */ 7491 if (newblk->nb_state & ONDEPLIST) { 7492 newblk->nb_state &= ~ONDEPLIST; 7493 LIST_REMOVE(newblk, nb_deps); 7494 } 7495 if (newblk->nb_state & ONWORKLIST) 7496 WORKLIST_REMOVE(&newblk->nb_list); 7497 /* 7498 * If the journal entry hasn't been written we save a pointer to 7499 * the dependency that frees it until it is written or the 7500 * superseding operation completes. 7501 */ 7502 jnewblk = newblk->nb_jnewblk; 7503 if (jnewblk != NULL && wk != NULL) { 7504 newblk->nb_jnewblk = NULL; 7505 jnewblk->jn_dep = wk; 7506 } 7507 if (!LIST_EMPTY(&newblk->nb_jwork)) 7508 jwork_move(wkhd, &newblk->nb_jwork); 7509 /* 7510 * When truncating we must free the newdirblk early to remove 7511 * the pagedep from the hash before returning. 7512 */ 7513 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7514 free_newdirblk(WK_NEWDIRBLK(wk)); 7515 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7516 panic("cancel_newblk: extra newdirblk"); 7517 7518 return (jnewblk); 7519 } 7520 7521 /* 7522 * Schedule the freefrag associated with a newblk to be released once 7523 * the pointers are written and the previous block is no longer needed. 7524 */ 7525 static void 7526 newblk_freefrag(newblk) 7527 struct newblk *newblk; 7528 { 7529 struct freefrag *freefrag; 7530 7531 if (newblk->nb_freefrag == NULL) 7532 return; 7533 freefrag = newblk->nb_freefrag; 7534 newblk->nb_freefrag = NULL; 7535 freefrag->ff_state |= COMPLETE; 7536 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 7537 add_to_worklist(&freefrag->ff_list, 0); 7538 } 7539 7540 /* 7541 * Free a newblk. Generate a new freefrag work request if appropriate. 7542 * This must be called after the inode pointer and any direct block pointers 7543 * are valid or fully removed via truncate or frag extension. 7544 */ 7545 static void 7546 free_newblk(newblk) 7547 struct newblk *newblk; 7548 { 7549 struct indirdep *indirdep; 7550 struct worklist *wk; 7551 7552 KASSERT(newblk->nb_jnewblk == NULL, 7553 ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk)); 7554 KASSERT(newblk->nb_list.wk_type != D_NEWBLK, 7555 ("free_newblk: unclaimed newblk")); 7556 LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp)); 7557 newblk_freefrag(newblk); 7558 if (newblk->nb_state & ONDEPLIST) 7559 LIST_REMOVE(newblk, nb_deps); 7560 if (newblk->nb_state & ONWORKLIST) 7561 WORKLIST_REMOVE(&newblk->nb_list); 7562 LIST_REMOVE(newblk, nb_hash); 7563 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7564 free_newdirblk(WK_NEWDIRBLK(wk)); 7565 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7566 panic("free_newblk: extra newdirblk"); 7567 while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL) 7568 indirdep_complete(indirdep); 7569 handle_jwork(&newblk->nb_jwork); 7570 WORKITEM_FREE(newblk, D_NEWBLK); 7571 } 7572 7573 /* 7574 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 7575 */ 7576 static void 7577 free_newdirblk(newdirblk) 7578 struct newdirblk *newdirblk; 7579 { 7580 struct pagedep *pagedep; 7581 struct diradd *dap; 7582 struct worklist *wk; 7583 7584 LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp)); 7585 WORKLIST_REMOVE(&newdirblk->db_list); 7586 /* 7587 * If the pagedep is still linked onto the directory buffer 7588 * dependency chain, then some of the entries on the 7589 * pd_pendinghd list may not be committed to disk yet. In 7590 * this case, we will simply clear the NEWBLOCK flag and 7591 * let the pd_pendinghd list be processed when the pagedep 7592 * is next written. If the pagedep is no longer on the buffer 7593 * dependency chain, then all the entries on the pd_pending 7594 * list are committed to disk and we can free them here. 7595 */ 7596 pagedep = newdirblk->db_pagedep; 7597 pagedep->pd_state &= ~NEWBLOCK; 7598 if ((pagedep->pd_state & ONWORKLIST) == 0) { 7599 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 7600 free_diradd(dap, NULL); 7601 /* 7602 * If no dependencies remain, the pagedep will be freed. 7603 */ 7604 free_pagedep(pagedep); 7605 } 7606 /* Should only ever be one item in the list. */ 7607 while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) { 7608 WORKLIST_REMOVE(wk); 7609 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 7610 } 7611 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 7612 } 7613 7614 /* 7615 * Prepare an inode to be freed. The actual free operation is not 7616 * done until the zero'ed inode has been written to disk. 7617 */ 7618 void 7619 softdep_freefile(pvp, ino, mode) 7620 struct vnode *pvp; 7621 ino_t ino; 7622 int mode; 7623 { 7624 struct inode *ip = VTOI(pvp); 7625 struct inodedep *inodedep; 7626 struct freefile *freefile; 7627 struct freeblks *freeblks; 7628 struct ufsmount *ump; 7629 7630 ump = ITOUMP(ip); 7631 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 7632 ("softdep_freefile called on non-softdep filesystem")); 7633 /* 7634 * This sets up the inode de-allocation dependency. 7635 */ 7636 freefile = malloc(sizeof(struct freefile), 7637 M_FREEFILE, M_SOFTDEP_FLAGS); 7638 workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount); 7639 freefile->fx_mode = mode; 7640 freefile->fx_oldinum = ino; 7641 freefile->fx_devvp = ump->um_devvp; 7642 LIST_INIT(&freefile->fx_jwork); 7643 UFS_LOCK(ump); 7644 ump->um_fs->fs_pendinginodes += 1; 7645 UFS_UNLOCK(ump); 7646 7647 /* 7648 * If the inodedep does not exist, then the zero'ed inode has 7649 * been written to disk. If the allocated inode has never been 7650 * written to disk, then the on-disk inode is zero'ed. In either 7651 * case we can free the file immediately. If the journal was 7652 * canceled before being written the inode will never make it to 7653 * disk and we must send the canceled journal entrys to 7654 * ffs_freefile() to be cleared in conjunction with the bitmap. 7655 * Any blocks waiting on the inode to write can be safely freed 7656 * here as it will never been written. 7657 */ 7658 ACQUIRE_LOCK(ump); 7659 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7660 if (inodedep) { 7661 /* 7662 * Clear out freeblks that no longer need to reference 7663 * this inode. 7664 */ 7665 while ((freeblks = 7666 TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) { 7667 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, 7668 fb_next); 7669 freeblks->fb_state &= ~ONDEPLIST; 7670 } 7671 /* 7672 * Remove this inode from the unlinked list. 7673 */ 7674 if (inodedep->id_state & UNLINKED) { 7675 /* 7676 * Save the journal work to be freed with the bitmap 7677 * before we clear UNLINKED. Otherwise it can be lost 7678 * if the inode block is written. 7679 */ 7680 handle_bufwait(inodedep, &freefile->fx_jwork); 7681 clear_unlinked_inodedep(inodedep); 7682 /* 7683 * Re-acquire inodedep as we've dropped the 7684 * per-filesystem lock in clear_unlinked_inodedep(). 7685 */ 7686 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7687 } 7688 } 7689 if (inodedep == NULL || check_inode_unwritten(inodedep)) { 7690 FREE_LOCK(ump); 7691 handle_workitem_freefile(freefile); 7692 return; 7693 } 7694 if ((inodedep->id_state & DEPCOMPLETE) == 0) 7695 inodedep->id_state |= GOINGAWAY; 7696 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 7697 FREE_LOCK(ump); 7698 if (ip->i_number == ino) 7699 UFS_INODE_SET_FLAG(ip, IN_MODIFIED); 7700 } 7701 7702 /* 7703 * Check to see if an inode has never been written to disk. If 7704 * so free the inodedep and return success, otherwise return failure. 7705 * 7706 * If we still have a bitmap dependency, then the inode has never 7707 * been written to disk. Drop the dependency as it is no longer 7708 * necessary since the inode is being deallocated. We set the 7709 * ALLCOMPLETE flags since the bitmap now properly shows that the 7710 * inode is not allocated. Even if the inode is actively being 7711 * written, it has been rolled back to its zero'ed state, so we 7712 * are ensured that a zero inode is what is on the disk. For short 7713 * lived files, this change will usually result in removing all the 7714 * dependencies from the inode so that it can be freed immediately. 7715 */ 7716 static int 7717 check_inode_unwritten(inodedep) 7718 struct inodedep *inodedep; 7719 { 7720 7721 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7722 7723 if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 || 7724 !LIST_EMPTY(&inodedep->id_dirremhd) || 7725 !LIST_EMPTY(&inodedep->id_pendinghd) || 7726 !LIST_EMPTY(&inodedep->id_bufwait) || 7727 !LIST_EMPTY(&inodedep->id_inowait) || 7728 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7729 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7730 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7731 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7732 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7733 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7734 inodedep->id_mkdiradd != NULL || 7735 inodedep->id_nlinkdelta != 0) 7736 return (0); 7737 /* 7738 * Another process might be in initiate_write_inodeblock_ufs[12] 7739 * trying to allocate memory without holding "Softdep Lock". 7740 */ 7741 if ((inodedep->id_state & IOSTARTED) != 0 && 7742 inodedep->id_savedino1 == NULL) 7743 return (0); 7744 7745 if (inodedep->id_state & ONDEPLIST) 7746 LIST_REMOVE(inodedep, id_deps); 7747 inodedep->id_state &= ~ONDEPLIST; 7748 inodedep->id_state |= ALLCOMPLETE; 7749 inodedep->id_bmsafemap = NULL; 7750 if (inodedep->id_state & ONWORKLIST) 7751 WORKLIST_REMOVE(&inodedep->id_list); 7752 if (inodedep->id_savedino1 != NULL) { 7753 free(inodedep->id_savedino1, M_SAVEDINO); 7754 inodedep->id_savedino1 = NULL; 7755 } 7756 if (free_inodedep(inodedep) == 0) 7757 panic("check_inode_unwritten: busy inode"); 7758 return (1); 7759 } 7760 7761 static int 7762 check_inodedep_free(inodedep) 7763 struct inodedep *inodedep; 7764 { 7765 7766 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7767 if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 7768 !LIST_EMPTY(&inodedep->id_dirremhd) || 7769 !LIST_EMPTY(&inodedep->id_pendinghd) || 7770 !LIST_EMPTY(&inodedep->id_bufwait) || 7771 !LIST_EMPTY(&inodedep->id_inowait) || 7772 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7773 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7774 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7775 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7776 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7777 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7778 inodedep->id_mkdiradd != NULL || 7779 inodedep->id_nlinkdelta != 0 || 7780 inodedep->id_savedino1 != NULL) 7781 return (0); 7782 return (1); 7783 } 7784 7785 /* 7786 * Try to free an inodedep structure. Return 1 if it could be freed. 7787 */ 7788 static int 7789 free_inodedep(inodedep) 7790 struct inodedep *inodedep; 7791 { 7792 7793 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7794 if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 || 7795 !check_inodedep_free(inodedep)) 7796 return (0); 7797 if (inodedep->id_state & ONDEPLIST) 7798 LIST_REMOVE(inodedep, id_deps); 7799 LIST_REMOVE(inodedep, id_hash); 7800 WORKITEM_FREE(inodedep, D_INODEDEP); 7801 return (1); 7802 } 7803 7804 /* 7805 * Free the block referenced by a freework structure. The parent freeblks 7806 * structure is released and completed when the final cg bitmap reaches 7807 * the disk. This routine may be freeing a jnewblk which never made it to 7808 * disk in which case we do not have to wait as the operation is undone 7809 * in memory immediately. 7810 */ 7811 static void 7812 freework_freeblock(freework, key) 7813 struct freework *freework; 7814 u_long key; 7815 { 7816 struct freeblks *freeblks; 7817 struct jnewblk *jnewblk; 7818 struct ufsmount *ump; 7819 struct workhead wkhd; 7820 struct fs *fs; 7821 int bsize; 7822 int needj; 7823 7824 ump = VFSTOUFS(freework->fw_list.wk_mp); 7825 LOCK_OWNED(ump); 7826 /* 7827 * Handle partial truncate separately. 7828 */ 7829 if (freework->fw_indir) { 7830 complete_trunc_indir(freework); 7831 return; 7832 } 7833 freeblks = freework->fw_freeblks; 7834 fs = ump->um_fs; 7835 needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0; 7836 bsize = lfragtosize(fs, freework->fw_frags); 7837 LIST_INIT(&wkhd); 7838 /* 7839 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives 7840 * on the indirblk hashtable and prevents premature freeing. 7841 */ 7842 freework->fw_state |= DEPCOMPLETE; 7843 /* 7844 * SUJ needs to wait for the segment referencing freed indirect 7845 * blocks to expire so that we know the checker will not confuse 7846 * a re-allocated indirect block with its old contents. 7847 */ 7848 if (needj && freework->fw_lbn <= -UFS_NDADDR) 7849 indirblk_insert(freework); 7850 /* 7851 * If we are canceling an existing jnewblk pass it to the free 7852 * routine, otherwise pass the freeblk which will ultimately 7853 * release the freeblks. If we're not journaling, we can just 7854 * free the freeblks immediately. 7855 */ 7856 jnewblk = freework->fw_jnewblk; 7857 if (jnewblk != NULL) { 7858 cancel_jnewblk(jnewblk, &wkhd); 7859 needj = 0; 7860 } else if (needj) { 7861 freework->fw_state |= DELAYEDFREE; 7862 freeblks->fb_cgwait++; 7863 WORKLIST_INSERT(&wkhd, &freework->fw_list); 7864 } 7865 FREE_LOCK(ump); 7866 freeblks_free(ump, freeblks, btodb(bsize)); 7867 CTR4(KTR_SUJ, 7868 "freework_freeblock: ino %jd blkno %jd lbn %jd size %d", 7869 freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize); 7870 ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize, 7871 freeblks->fb_inum, freeblks->fb_vtype, &wkhd, key); 7872 ACQUIRE_LOCK(ump); 7873 /* 7874 * The jnewblk will be discarded and the bits in the map never 7875 * made it to disk. We can immediately free the freeblk. 7876 */ 7877 if (needj == 0) 7878 handle_written_freework(freework); 7879 } 7880 7881 /* 7882 * We enqueue freework items that need processing back on the freeblks and 7883 * add the freeblks to the worklist. This makes it easier to find all work 7884 * required to flush a truncation in process_truncates(). 7885 */ 7886 static void 7887 freework_enqueue(freework) 7888 struct freework *freework; 7889 { 7890 struct freeblks *freeblks; 7891 7892 freeblks = freework->fw_freeblks; 7893 if ((freework->fw_state & INPROGRESS) == 0) 7894 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 7895 if ((freeblks->fb_state & 7896 (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE && 7897 LIST_EMPTY(&freeblks->fb_jblkdephd)) 7898 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7899 } 7900 7901 /* 7902 * Start, continue, or finish the process of freeing an indirect block tree. 7903 * The free operation may be paused at any point with fw_off containing the 7904 * offset to restart from. This enables us to implement some flow control 7905 * for large truncates which may fan out and generate a huge number of 7906 * dependencies. 7907 */ 7908 static void 7909 handle_workitem_indirblk(freework) 7910 struct freework *freework; 7911 { 7912 struct freeblks *freeblks; 7913 struct ufsmount *ump; 7914 struct fs *fs; 7915 7916 freeblks = freework->fw_freeblks; 7917 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7918 fs = ump->um_fs; 7919 if (freework->fw_state & DEPCOMPLETE) { 7920 handle_written_freework(freework); 7921 return; 7922 } 7923 if (freework->fw_off == NINDIR(fs)) { 7924 freework_freeblock(freework, SINGLETON_KEY); 7925 return; 7926 } 7927 freework->fw_state |= INPROGRESS; 7928 FREE_LOCK(ump); 7929 indir_trunc(freework, fsbtodb(fs, freework->fw_blkno), 7930 freework->fw_lbn); 7931 ACQUIRE_LOCK(ump); 7932 } 7933 7934 /* 7935 * Called when a freework structure attached to a cg buf is written. The 7936 * ref on either the parent or the freeblks structure is released and 7937 * the freeblks is added back to the worklist if there is more work to do. 7938 */ 7939 static void 7940 handle_written_freework(freework) 7941 struct freework *freework; 7942 { 7943 struct freeblks *freeblks; 7944 struct freework *parent; 7945 7946 freeblks = freework->fw_freeblks; 7947 parent = freework->fw_parent; 7948 if (freework->fw_state & DELAYEDFREE) 7949 freeblks->fb_cgwait--; 7950 freework->fw_state |= COMPLETE; 7951 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 7952 WORKITEM_FREE(freework, D_FREEWORK); 7953 if (parent) { 7954 if (--parent->fw_ref == 0) 7955 freework_enqueue(parent); 7956 return; 7957 } 7958 if (--freeblks->fb_ref != 0) 7959 return; 7960 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) == 7961 ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) 7962 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7963 } 7964 7965 /* 7966 * This workitem routine performs the block de-allocation. 7967 * The workitem is added to the pending list after the updated 7968 * inode block has been written to disk. As mentioned above, 7969 * checks regarding the number of blocks de-allocated (compared 7970 * to the number of blocks allocated for the file) are also 7971 * performed in this function. 7972 */ 7973 static int 7974 handle_workitem_freeblocks(freeblks, flags) 7975 struct freeblks *freeblks; 7976 int flags; 7977 { 7978 struct freework *freework; 7979 struct newblk *newblk; 7980 struct allocindir *aip; 7981 struct ufsmount *ump; 7982 struct worklist *wk; 7983 u_long key; 7984 7985 KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd), 7986 ("handle_workitem_freeblocks: Journal entries not written.")); 7987 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7988 key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum); 7989 ACQUIRE_LOCK(ump); 7990 while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) { 7991 WORKLIST_REMOVE(wk); 7992 switch (wk->wk_type) { 7993 case D_DIRREM: 7994 wk->wk_state |= COMPLETE; 7995 add_to_worklist(wk, 0); 7996 continue; 7997 7998 case D_ALLOCDIRECT: 7999 free_newblk(WK_NEWBLK(wk)); 8000 continue; 8001 8002 case D_ALLOCINDIR: 8003 aip = WK_ALLOCINDIR(wk); 8004 freework = NULL; 8005 if (aip->ai_state & DELAYEDFREE) { 8006 FREE_LOCK(ump); 8007 freework = newfreework(ump, freeblks, NULL, 8008 aip->ai_lbn, aip->ai_newblkno, 8009 ump->um_fs->fs_frag, 0, 0); 8010 ACQUIRE_LOCK(ump); 8011 } 8012 newblk = WK_NEWBLK(wk); 8013 if (newblk->nb_jnewblk) { 8014 freework->fw_jnewblk = newblk->nb_jnewblk; 8015 newblk->nb_jnewblk->jn_dep = &freework->fw_list; 8016 newblk->nb_jnewblk = NULL; 8017 } 8018 free_newblk(newblk); 8019 continue; 8020 8021 case D_FREEWORK: 8022 freework = WK_FREEWORK(wk); 8023 if (freework->fw_lbn <= -UFS_NDADDR) 8024 handle_workitem_indirblk(freework); 8025 else 8026 freework_freeblock(freework, key); 8027 continue; 8028 default: 8029 panic("handle_workitem_freeblocks: Unknown type %s", 8030 TYPENAME(wk->wk_type)); 8031 } 8032 } 8033 if (freeblks->fb_ref != 0) { 8034 freeblks->fb_state &= ~INPROGRESS; 8035 wake_worklist(&freeblks->fb_list); 8036 freeblks = NULL; 8037 } 8038 FREE_LOCK(ump); 8039 ffs_blkrelease_finish(ump, key); 8040 if (freeblks) 8041 return handle_complete_freeblocks(freeblks, flags); 8042 return (0); 8043 } 8044 8045 /* 8046 * Handle completion of block free via truncate. This allows fs_pending 8047 * to track the actual free block count more closely than if we only updated 8048 * it at the end. We must be careful to handle cases where the block count 8049 * on free was incorrect. 8050 */ 8051 static void 8052 freeblks_free(ump, freeblks, blocks) 8053 struct ufsmount *ump; 8054 struct freeblks *freeblks; 8055 int blocks; 8056 { 8057 struct fs *fs; 8058 ufs2_daddr_t remain; 8059 8060 UFS_LOCK(ump); 8061 remain = -freeblks->fb_chkcnt; 8062 freeblks->fb_chkcnt += blocks; 8063 if (remain > 0) { 8064 if (remain < blocks) 8065 blocks = remain; 8066 fs = ump->um_fs; 8067 fs->fs_pendingblocks -= blocks; 8068 } 8069 UFS_UNLOCK(ump); 8070 } 8071 8072 /* 8073 * Once all of the freework workitems are complete we can retire the 8074 * freeblocks dependency and any journal work awaiting completion. This 8075 * can not be called until all other dependencies are stable on disk. 8076 */ 8077 static int 8078 handle_complete_freeblocks(freeblks, flags) 8079 struct freeblks *freeblks; 8080 int flags; 8081 { 8082 struct inodedep *inodedep; 8083 struct inode *ip; 8084 struct vnode *vp; 8085 struct fs *fs; 8086 struct ufsmount *ump; 8087 ufs2_daddr_t spare; 8088 8089 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 8090 fs = ump->um_fs; 8091 flags = LK_EXCLUSIVE | flags; 8092 spare = freeblks->fb_chkcnt; 8093 8094 /* 8095 * If we did not release the expected number of blocks we may have 8096 * to adjust the inode block count here. Only do so if it wasn't 8097 * a truncation to zero and the modrev still matches. 8098 */ 8099 if (spare && freeblks->fb_len != 0) { 8100 if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum, 8101 flags, &vp, FFSV_FORCEINSMQ) != 0) 8102 return (EBUSY); 8103 ip = VTOI(vp); 8104 if (ip->i_mode == 0) { 8105 vgone(vp); 8106 } else if (DIP(ip, i_modrev) == freeblks->fb_modrev) { 8107 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare); 8108 UFS_INODE_SET_FLAG(ip, IN_CHANGE); 8109 /* 8110 * We must wait so this happens before the 8111 * journal is reclaimed. 8112 */ 8113 ffs_update(vp, 1); 8114 } 8115 vput(vp); 8116 } 8117 if (spare < 0) { 8118 UFS_LOCK(ump); 8119 fs->fs_pendingblocks += spare; 8120 UFS_UNLOCK(ump); 8121 } 8122 #ifdef QUOTA 8123 /* Handle spare. */ 8124 if (spare) 8125 quotaadj(freeblks->fb_quota, ump, -spare); 8126 quotarele(freeblks->fb_quota); 8127 #endif 8128 ACQUIRE_LOCK(ump); 8129 if (freeblks->fb_state & ONDEPLIST) { 8130 inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum, 8131 0, &inodedep); 8132 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); 8133 freeblks->fb_state &= ~ONDEPLIST; 8134 if (TAILQ_EMPTY(&inodedep->id_freeblklst)) 8135 free_inodedep(inodedep); 8136 } 8137 /* 8138 * All of the freeblock deps must be complete prior to this call 8139 * so it's now safe to complete earlier outstanding journal entries. 8140 */ 8141 handle_jwork(&freeblks->fb_jwork); 8142 WORKITEM_FREE(freeblks, D_FREEBLKS); 8143 FREE_LOCK(ump); 8144 return (0); 8145 } 8146 8147 /* 8148 * Release blocks associated with the freeblks and stored in the indirect 8149 * block dbn. If level is greater than SINGLE, the block is an indirect block 8150 * and recursive calls to indirtrunc must be used to cleanse other indirect 8151 * blocks. 8152 * 8153 * This handles partial and complete truncation of blocks. Partial is noted 8154 * with goingaway == 0. In this case the freework is completed after the 8155 * zero'd indirects are written to disk. For full truncation the freework 8156 * is completed after the block is freed. 8157 */ 8158 static void 8159 indir_trunc(freework, dbn, lbn) 8160 struct freework *freework; 8161 ufs2_daddr_t dbn; 8162 ufs_lbn_t lbn; 8163 { 8164 struct freework *nfreework; 8165 struct workhead wkhd; 8166 struct freeblks *freeblks; 8167 struct buf *bp; 8168 struct fs *fs; 8169 struct indirdep *indirdep; 8170 struct mount *mp; 8171 struct ufsmount *ump; 8172 ufs1_daddr_t *bap1; 8173 ufs2_daddr_t nb, nnb, *bap2; 8174 ufs_lbn_t lbnadd, nlbn; 8175 u_long key; 8176 int nblocks, ufs1fmt, freedblocks; 8177 int goingaway, freedeps, needj, level, cnt, i, error; 8178 8179 freeblks = freework->fw_freeblks; 8180 mp = freeblks->fb_list.wk_mp; 8181 ump = VFSTOUFS(mp); 8182 fs = ump->um_fs; 8183 /* 8184 * Get buffer of block pointers to be freed. There are three cases: 8185 * 8186 * 1) Partial truncate caches the indirdep pointer in the freework 8187 * which provides us a back copy to the save bp which holds the 8188 * pointers we want to clear. When this completes the zero 8189 * pointers are written to the real copy. 8190 * 2) The indirect is being completely truncated, cancel_indirdep() 8191 * eliminated the real copy and placed the indirdep on the saved 8192 * copy. The indirdep and buf are discarded when this completes. 8193 * 3) The indirect was not in memory, we read a copy off of the disk 8194 * using the devvp and drop and invalidate the buffer when we're 8195 * done. 8196 */ 8197 goingaway = 1; 8198 indirdep = NULL; 8199 if (freework->fw_indir != NULL) { 8200 goingaway = 0; 8201 indirdep = freework->fw_indir; 8202 bp = indirdep->ir_savebp; 8203 if (bp == NULL || bp->b_blkno != dbn) 8204 panic("indir_trunc: Bad saved buf %p blkno %jd", 8205 bp, (intmax_t)dbn); 8206 } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { 8207 /* 8208 * The lock prevents the buf dep list from changing and 8209 * indirects on devvp should only ever have one dependency. 8210 */ 8211 indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep)); 8212 if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0) 8213 panic("indir_trunc: Bad indirdep %p from buf %p", 8214 indirdep, bp); 8215 } else { 8216 error = ffs_breadz(ump, freeblks->fb_devvp, dbn, dbn, 8217 (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp); 8218 if (error) 8219 return; 8220 } 8221 ACQUIRE_LOCK(ump); 8222 /* Protects against a race with complete_trunc_indir(). */ 8223 freework->fw_state &= ~INPROGRESS; 8224 /* 8225 * If we have an indirdep we need to enforce the truncation order 8226 * and discard it when it is complete. 8227 */ 8228 if (indirdep) { 8229 if (freework != TAILQ_FIRST(&indirdep->ir_trunc) && 8230 !TAILQ_EMPTY(&indirdep->ir_trunc)) { 8231 /* 8232 * Add the complete truncate to the list on the 8233 * indirdep to enforce in-order processing. 8234 */ 8235 if (freework->fw_indir == NULL) 8236 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, 8237 freework, fw_next); 8238 FREE_LOCK(ump); 8239 return; 8240 } 8241 /* 8242 * If we're goingaway, free the indirdep. Otherwise it will 8243 * linger until the write completes. 8244 */ 8245 if (goingaway) { 8246 KASSERT(indirdep->ir_savebp == bp, 8247 ("indir_trunc: losing ir_savebp %p", 8248 indirdep->ir_savebp)); 8249 indirdep->ir_savebp = NULL; 8250 free_indirdep(indirdep); 8251 } 8252 } 8253 FREE_LOCK(ump); 8254 /* Initialize pointers depending on block size. */ 8255 if (ump->um_fstype == UFS1) { 8256 bap1 = (ufs1_daddr_t *)bp->b_data; 8257 nb = bap1[freework->fw_off]; 8258 ufs1fmt = 1; 8259 bap2 = NULL; 8260 } else { 8261 bap2 = (ufs2_daddr_t *)bp->b_data; 8262 nb = bap2[freework->fw_off]; 8263 ufs1fmt = 0; 8264 bap1 = NULL; 8265 } 8266 level = lbn_level(lbn); 8267 needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0; 8268 lbnadd = lbn_offset(fs, level); 8269 nblocks = btodb(fs->fs_bsize); 8270 nfreework = freework; 8271 freedeps = 0; 8272 cnt = 0; 8273 /* 8274 * Reclaim blocks. Traverses into nested indirect levels and 8275 * arranges for the current level to be freed when subordinates 8276 * are free when journaling. 8277 */ 8278 key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum); 8279 for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) { 8280 if (UFS_CHECK_BLKNO(mp, freeblks->fb_inum, nb, 8281 fs->fs_bsize) != 0) 8282 nb = 0; 8283 if (i != NINDIR(fs) - 1) { 8284 if (ufs1fmt) 8285 nnb = bap1[i+1]; 8286 else 8287 nnb = bap2[i+1]; 8288 } else 8289 nnb = 0; 8290 if (nb == 0) 8291 continue; 8292 cnt++; 8293 if (level != 0) { 8294 nlbn = (lbn + 1) - (i * lbnadd); 8295 if (needj != 0) { 8296 nfreework = newfreework(ump, freeblks, freework, 8297 nlbn, nb, fs->fs_frag, 0, 0); 8298 freedeps++; 8299 } 8300 indir_trunc(nfreework, fsbtodb(fs, nb), nlbn); 8301 } else { 8302 struct freedep *freedep; 8303 8304 /* 8305 * Attempt to aggregate freedep dependencies for 8306 * all blocks being released to the same CG. 8307 */ 8308 LIST_INIT(&wkhd); 8309 if (needj != 0 && 8310 (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) { 8311 freedep = newfreedep(freework); 8312 WORKLIST_INSERT_UNLOCKED(&wkhd, 8313 &freedep->fd_list); 8314 freedeps++; 8315 } 8316 CTR3(KTR_SUJ, 8317 "indir_trunc: ino %jd blkno %jd size %d", 8318 freeblks->fb_inum, nb, fs->fs_bsize); 8319 ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, 8320 fs->fs_bsize, freeblks->fb_inum, 8321 freeblks->fb_vtype, &wkhd, key); 8322 } 8323 } 8324 ffs_blkrelease_finish(ump, key); 8325 if (goingaway) { 8326 bp->b_flags |= B_INVAL | B_NOCACHE; 8327 brelse(bp); 8328 } 8329 freedblocks = 0; 8330 if (level == 0) 8331 freedblocks = (nblocks * cnt); 8332 if (needj == 0) 8333 freedblocks += nblocks; 8334 freeblks_free(ump, freeblks, freedblocks); 8335 /* 8336 * If we are journaling set up the ref counts and offset so this 8337 * indirect can be completed when its children are free. 8338 */ 8339 if (needj) { 8340 ACQUIRE_LOCK(ump); 8341 freework->fw_off = i; 8342 freework->fw_ref += freedeps; 8343 freework->fw_ref -= NINDIR(fs) + 1; 8344 if (level == 0) 8345 freeblks->fb_cgwait += freedeps; 8346 if (freework->fw_ref == 0) 8347 freework_freeblock(freework, SINGLETON_KEY); 8348 FREE_LOCK(ump); 8349 return; 8350 } 8351 /* 8352 * If we're not journaling we can free the indirect now. 8353 */ 8354 dbn = dbtofsb(fs, dbn); 8355 CTR3(KTR_SUJ, 8356 "indir_trunc 2: ino %jd blkno %jd size %d", 8357 freeblks->fb_inum, dbn, fs->fs_bsize); 8358 ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize, 8359 freeblks->fb_inum, freeblks->fb_vtype, NULL, SINGLETON_KEY); 8360 /* Non SUJ softdep does single-threaded truncations. */ 8361 if (freework->fw_blkno == dbn) { 8362 freework->fw_state |= ALLCOMPLETE; 8363 ACQUIRE_LOCK(ump); 8364 handle_written_freework(freework); 8365 FREE_LOCK(ump); 8366 } 8367 return; 8368 } 8369 8370 /* 8371 * Cancel an allocindir when it is removed via truncation. When bp is not 8372 * NULL the indirect never appeared on disk and is scheduled to be freed 8373 * independently of the indir so we can more easily track journal work. 8374 */ 8375 static void 8376 cancel_allocindir(aip, bp, freeblks, trunc) 8377 struct allocindir *aip; 8378 struct buf *bp; 8379 struct freeblks *freeblks; 8380 int trunc; 8381 { 8382 struct indirdep *indirdep; 8383 struct freefrag *freefrag; 8384 struct newblk *newblk; 8385 8386 newblk = (struct newblk *)aip; 8387 LIST_REMOVE(aip, ai_next); 8388 /* 8389 * We must eliminate the pointer in bp if it must be freed on its 8390 * own due to partial truncate or pending journal work. 8391 */ 8392 if (bp && (trunc || newblk->nb_jnewblk)) { 8393 /* 8394 * Clear the pointer and mark the aip to be freed 8395 * directly if it never existed on disk. 8396 */ 8397 aip->ai_state |= DELAYEDFREE; 8398 indirdep = aip->ai_indirdep; 8399 if (indirdep->ir_state & UFS1FMT) 8400 ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8401 else 8402 ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8403 } 8404 /* 8405 * When truncating the previous pointer will be freed via 8406 * savedbp. Eliminate the freefrag which would dup free. 8407 */ 8408 if (trunc && (freefrag = newblk->nb_freefrag) != NULL) { 8409 newblk->nb_freefrag = NULL; 8410 if (freefrag->ff_jdep) 8411 cancel_jfreefrag( 8412 WK_JFREEFRAG(freefrag->ff_jdep)); 8413 jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork); 8414 WORKITEM_FREE(freefrag, D_FREEFRAG); 8415 } 8416 /* 8417 * If the journal hasn't been written the jnewblk must be passed 8418 * to the call to ffs_blkfree that reclaims the space. We accomplish 8419 * this by leaving the journal dependency on the newblk to be freed 8420 * when a freework is created in handle_workitem_freeblocks(). 8421 */ 8422 cancel_newblk(newblk, NULL, &freeblks->fb_jwork); 8423 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 8424 } 8425 8426 /* 8427 * Create the mkdir dependencies for . and .. in a new directory. Link them 8428 * in to a newdirblk so any subsequent additions are tracked properly. The 8429 * caller is responsible for adding the mkdir1 dependency to the journal 8430 * and updating id_mkdiradd. This function returns with the per-filesystem 8431 * lock held. 8432 */ 8433 static struct mkdir * 8434 setup_newdir(dap, newinum, dinum, newdirbp, mkdirp) 8435 struct diradd *dap; 8436 ino_t newinum; 8437 ino_t dinum; 8438 struct buf *newdirbp; 8439 struct mkdir **mkdirp; 8440 { 8441 struct newblk *newblk; 8442 struct pagedep *pagedep; 8443 struct inodedep *inodedep; 8444 struct newdirblk *newdirblk; 8445 struct mkdir *mkdir1, *mkdir2; 8446 struct worklist *wk; 8447 struct jaddref *jaddref; 8448 struct ufsmount *ump; 8449 struct mount *mp; 8450 8451 mp = dap->da_list.wk_mp; 8452 ump = VFSTOUFS(mp); 8453 newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, 8454 M_SOFTDEP_FLAGS); 8455 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8456 LIST_INIT(&newdirblk->db_mkdir); 8457 mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8458 workitem_alloc(&mkdir1->md_list, D_MKDIR, mp); 8459 mkdir1->md_state = ATTACHED | MKDIR_BODY; 8460 mkdir1->md_diradd = dap; 8461 mkdir1->md_jaddref = NULL; 8462 mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8463 workitem_alloc(&mkdir2->md_list, D_MKDIR, mp); 8464 mkdir2->md_state = ATTACHED | MKDIR_PARENT; 8465 mkdir2->md_diradd = dap; 8466 mkdir2->md_jaddref = NULL; 8467 if (MOUNTEDSUJ(mp) == 0) { 8468 mkdir1->md_state |= DEPCOMPLETE; 8469 mkdir2->md_state |= DEPCOMPLETE; 8470 } 8471 /* 8472 * Dependency on "." and ".." being written to disk. 8473 */ 8474 mkdir1->md_buf = newdirbp; 8475 ACQUIRE_LOCK(VFSTOUFS(mp)); 8476 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs); 8477 /* 8478 * We must link the pagedep, allocdirect, and newdirblk for 8479 * the initial file page so the pointer to the new directory 8480 * is not written until the directory contents are live and 8481 * any subsequent additions are not marked live until the 8482 * block is reachable via the inode. 8483 */ 8484 if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0) 8485 panic("setup_newdir: lost pagedep"); 8486 LIST_FOREACH(wk, &newdirbp->b_dep, wk_list) 8487 if (wk->wk_type == D_ALLOCDIRECT) 8488 break; 8489 if (wk == NULL) 8490 panic("setup_newdir: lost allocdirect"); 8491 if (pagedep->pd_state & NEWBLOCK) 8492 panic("setup_newdir: NEWBLOCK already set"); 8493 newblk = WK_NEWBLK(wk); 8494 pagedep->pd_state |= NEWBLOCK; 8495 pagedep->pd_newdirblk = newdirblk; 8496 newdirblk->db_pagedep = pagedep; 8497 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8498 WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list); 8499 /* 8500 * Look up the inodedep for the parent directory so that we 8501 * can link mkdir2 into the pending dotdot jaddref or 8502 * the inode write if there is none. If the inode is 8503 * ALLCOMPLETE and no jaddref is present all dependencies have 8504 * been satisfied and mkdir2 can be freed. 8505 */ 8506 inodedep_lookup(mp, dinum, 0, &inodedep); 8507 if (MOUNTEDSUJ(mp)) { 8508 if (inodedep == NULL) 8509 panic("setup_newdir: Lost parent."); 8510 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8511 inoreflst); 8512 KASSERT(jaddref != NULL && jaddref->ja_parent == newinum && 8513 (jaddref->ja_state & MKDIR_PARENT), 8514 ("setup_newdir: bad dotdot jaddref %p", jaddref)); 8515 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8516 mkdir2->md_jaddref = jaddref; 8517 jaddref->ja_mkdir = mkdir2; 8518 } else if (inodedep == NULL || 8519 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 8520 dap->da_state &= ~MKDIR_PARENT; 8521 WORKITEM_FREE(mkdir2, D_MKDIR); 8522 mkdir2 = NULL; 8523 } else { 8524 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8525 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list); 8526 } 8527 *mkdirp = mkdir2; 8528 8529 return (mkdir1); 8530 } 8531 8532 /* 8533 * Directory entry addition dependencies. 8534 * 8535 * When adding a new directory entry, the inode (with its incremented link 8536 * count) must be written to disk before the directory entry's pointer to it. 8537 * Also, if the inode is newly allocated, the corresponding freemap must be 8538 * updated (on disk) before the directory entry's pointer. These requirements 8539 * are met via undo/redo on the directory entry's pointer, which consists 8540 * simply of the inode number. 8541 * 8542 * As directory entries are added and deleted, the free space within a 8543 * directory block can become fragmented. The ufs filesystem will compact 8544 * a fragmented directory block to make space for a new entry. When this 8545 * occurs, the offsets of previously added entries change. Any "diradd" 8546 * dependency structures corresponding to these entries must be updated with 8547 * the new offsets. 8548 */ 8549 8550 /* 8551 * This routine is called after the in-memory inode's link 8552 * count has been incremented, but before the directory entry's 8553 * pointer to the inode has been set. 8554 */ 8555 int 8556 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 8557 struct buf *bp; /* buffer containing directory block */ 8558 struct inode *dp; /* inode for directory */ 8559 off_t diroffset; /* offset of new entry in directory */ 8560 ino_t newinum; /* inode referenced by new directory entry */ 8561 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 8562 int isnewblk; /* entry is in a newly allocated block */ 8563 { 8564 int offset; /* offset of new entry within directory block */ 8565 ufs_lbn_t lbn; /* block in directory containing new entry */ 8566 struct fs *fs; 8567 struct diradd *dap; 8568 struct newblk *newblk; 8569 struct pagedep *pagedep; 8570 struct inodedep *inodedep; 8571 struct newdirblk *newdirblk; 8572 struct mkdir *mkdir1, *mkdir2; 8573 struct jaddref *jaddref; 8574 struct ufsmount *ump; 8575 struct mount *mp; 8576 int isindir; 8577 8578 mp = ITOVFS(dp); 8579 ump = VFSTOUFS(mp); 8580 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8581 ("softdep_setup_directory_add called on non-softdep filesystem")); 8582 /* 8583 * Whiteouts have no dependencies. 8584 */ 8585 if (newinum == UFS_WINO) { 8586 if (newdirbp != NULL) 8587 bdwrite(newdirbp); 8588 return (0); 8589 } 8590 jaddref = NULL; 8591 mkdir1 = mkdir2 = NULL; 8592 fs = ump->um_fs; 8593 lbn = lblkno(fs, diroffset); 8594 offset = blkoff(fs, diroffset); 8595 dap = malloc(sizeof(struct diradd), M_DIRADD, 8596 M_SOFTDEP_FLAGS|M_ZERO); 8597 workitem_alloc(&dap->da_list, D_DIRADD, mp); 8598 dap->da_offset = offset; 8599 dap->da_newinum = newinum; 8600 dap->da_state = ATTACHED; 8601 LIST_INIT(&dap->da_jwork); 8602 isindir = bp->b_lblkno >= UFS_NDADDR; 8603 newdirblk = NULL; 8604 if (isnewblk && 8605 (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) { 8606 newdirblk = malloc(sizeof(struct newdirblk), 8607 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 8608 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8609 LIST_INIT(&newdirblk->db_mkdir); 8610 } 8611 /* 8612 * If we're creating a new directory setup the dependencies and set 8613 * the dap state to wait for them. Otherwise it's COMPLETE and 8614 * we can move on. 8615 */ 8616 if (newdirbp == NULL) { 8617 dap->da_state |= DEPCOMPLETE; 8618 ACQUIRE_LOCK(ump); 8619 } else { 8620 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 8621 mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp, 8622 &mkdir2); 8623 } 8624 /* 8625 * Link into parent directory pagedep to await its being written. 8626 */ 8627 pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep); 8628 #ifdef INVARIANTS 8629 if (diradd_lookup(pagedep, offset) != NULL) 8630 panic("softdep_setup_directory_add: %p already at off %d\n", 8631 diradd_lookup(pagedep, offset), offset); 8632 #endif 8633 dap->da_pagedep = pagedep; 8634 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 8635 da_pdlist); 8636 inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); 8637 /* 8638 * If we're journaling, link the diradd into the jaddref so it 8639 * may be completed after the journal entry is written. Otherwise, 8640 * link the diradd into its inodedep. If the inode is not yet 8641 * written place it on the bufwait list, otherwise do the post-inode 8642 * write processing to put it on the id_pendinghd list. 8643 */ 8644 if (MOUNTEDSUJ(mp)) { 8645 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8646 inoreflst); 8647 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 8648 ("softdep_setup_directory_add: bad jaddref %p", jaddref)); 8649 jaddref->ja_diroff = diroffset; 8650 jaddref->ja_diradd = dap; 8651 add_to_journal(&jaddref->ja_list); 8652 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 8653 diradd_inode_written(dap, inodedep); 8654 else 8655 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 8656 /* 8657 * Add the journal entries for . and .. links now that the primary 8658 * link is written. 8659 */ 8660 if (mkdir1 != NULL && MOUNTEDSUJ(mp)) { 8661 jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 8662 inoreflst, if_deps); 8663 KASSERT(jaddref != NULL && 8664 jaddref->ja_ino == jaddref->ja_parent && 8665 (jaddref->ja_state & MKDIR_BODY), 8666 ("softdep_setup_directory_add: bad dot jaddref %p", 8667 jaddref)); 8668 mkdir1->md_jaddref = jaddref; 8669 jaddref->ja_mkdir = mkdir1; 8670 /* 8671 * It is important that the dotdot journal entry 8672 * is added prior to the dot entry since dot writes 8673 * both the dot and dotdot links. These both must 8674 * be added after the primary link for the journal 8675 * to remain consistent. 8676 */ 8677 add_to_journal(&mkdir2->md_jaddref->ja_list); 8678 add_to_journal(&jaddref->ja_list); 8679 } 8680 /* 8681 * If we are adding a new directory remember this diradd so that if 8682 * we rename it we can keep the dot and dotdot dependencies. If 8683 * we are adding a new name for an inode that has a mkdiradd we 8684 * must be in rename and we have to move the dot and dotdot 8685 * dependencies to this new name. The old name is being orphaned 8686 * soon. 8687 */ 8688 if (mkdir1 != NULL) { 8689 if (inodedep->id_mkdiradd != NULL) 8690 panic("softdep_setup_directory_add: Existing mkdir"); 8691 inodedep->id_mkdiradd = dap; 8692 } else if (inodedep->id_mkdiradd) 8693 merge_diradd(inodedep, dap); 8694 if (newdirblk != NULL) { 8695 /* 8696 * There is nothing to do if we are already tracking 8697 * this block. 8698 */ 8699 if ((pagedep->pd_state & NEWBLOCK) != 0) { 8700 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 8701 FREE_LOCK(ump); 8702 return (0); 8703 } 8704 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk) 8705 == 0) 8706 panic("softdep_setup_directory_add: lost entry"); 8707 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8708 pagedep->pd_state |= NEWBLOCK; 8709 pagedep->pd_newdirblk = newdirblk; 8710 newdirblk->db_pagedep = pagedep; 8711 FREE_LOCK(ump); 8712 /* 8713 * If we extended into an indirect signal direnter to sync. 8714 */ 8715 if (isindir) 8716 return (1); 8717 return (0); 8718 } 8719 FREE_LOCK(ump); 8720 return (0); 8721 } 8722 8723 /* 8724 * This procedure is called to change the offset of a directory 8725 * entry when compacting a directory block which must be owned 8726 * exclusively by the caller. Note that the actual entry movement 8727 * must be done in this procedure to ensure that no I/O completions 8728 * occur while the move is in progress. 8729 */ 8730 void 8731 softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 8732 struct buf *bp; /* Buffer holding directory block. */ 8733 struct inode *dp; /* inode for directory */ 8734 caddr_t base; /* address of dp->i_offset */ 8735 caddr_t oldloc; /* address of old directory location */ 8736 caddr_t newloc; /* address of new directory location */ 8737 int entrysize; /* size of directory entry */ 8738 { 8739 int offset, oldoffset, newoffset; 8740 struct pagedep *pagedep; 8741 struct jmvref *jmvref; 8742 struct diradd *dap; 8743 struct direct *de; 8744 struct mount *mp; 8745 struct ufsmount *ump; 8746 ufs_lbn_t lbn; 8747 int flags; 8748 8749 mp = ITOVFS(dp); 8750 ump = VFSTOUFS(mp); 8751 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8752 ("softdep_change_directoryentry_offset called on " 8753 "non-softdep filesystem")); 8754 de = (struct direct *)oldloc; 8755 jmvref = NULL; 8756 flags = 0; 8757 /* 8758 * Moves are always journaled as it would be too complex to 8759 * determine if any affected adds or removes are present in the 8760 * journal. 8761 */ 8762 if (MOUNTEDSUJ(mp)) { 8763 flags = DEPALLOC; 8764 jmvref = newjmvref(dp, de->d_ino, 8765 dp->i_offset + (oldloc - base), 8766 dp->i_offset + (newloc - base)); 8767 } 8768 lbn = lblkno(ump->um_fs, dp->i_offset); 8769 offset = blkoff(ump->um_fs, dp->i_offset); 8770 oldoffset = offset + (oldloc - base); 8771 newoffset = offset + (newloc - base); 8772 ACQUIRE_LOCK(ump); 8773 if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0) 8774 goto done; 8775 dap = diradd_lookup(pagedep, oldoffset); 8776 if (dap) { 8777 dap->da_offset = newoffset; 8778 newoffset = DIRADDHASH(newoffset); 8779 oldoffset = DIRADDHASH(oldoffset); 8780 if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE && 8781 newoffset != oldoffset) { 8782 LIST_REMOVE(dap, da_pdlist); 8783 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset], 8784 dap, da_pdlist); 8785 } 8786 } 8787 done: 8788 if (jmvref) { 8789 jmvref->jm_pagedep = pagedep; 8790 LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps); 8791 add_to_journal(&jmvref->jm_list); 8792 } 8793 bcopy(oldloc, newloc, entrysize); 8794 FREE_LOCK(ump); 8795 } 8796 8797 /* 8798 * Move the mkdir dependencies and journal work from one diradd to another 8799 * when renaming a directory. The new name must depend on the mkdir deps 8800 * completing as the old name did. Directories can only have one valid link 8801 * at a time so one must be canonical. 8802 */ 8803 static void 8804 merge_diradd(inodedep, newdap) 8805 struct inodedep *inodedep; 8806 struct diradd *newdap; 8807 { 8808 struct diradd *olddap; 8809 struct mkdir *mkdir, *nextmd; 8810 struct ufsmount *ump; 8811 short state; 8812 8813 olddap = inodedep->id_mkdiradd; 8814 inodedep->id_mkdiradd = newdap; 8815 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8816 newdap->da_state &= ~DEPCOMPLETE; 8817 ump = VFSTOUFS(inodedep->id_list.wk_mp); 8818 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8819 mkdir = nextmd) { 8820 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8821 if (mkdir->md_diradd != olddap) 8822 continue; 8823 mkdir->md_diradd = newdap; 8824 state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY); 8825 newdap->da_state |= state; 8826 olddap->da_state &= ~state; 8827 if ((olddap->da_state & 8828 (MKDIR_PARENT | MKDIR_BODY)) == 0) 8829 break; 8830 } 8831 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8832 panic("merge_diradd: unfound ref"); 8833 } 8834 /* 8835 * Any mkdir related journal items are not safe to be freed until 8836 * the new name is stable. 8837 */ 8838 jwork_move(&newdap->da_jwork, &olddap->da_jwork); 8839 olddap->da_state |= DEPCOMPLETE; 8840 complete_diradd(olddap); 8841 } 8842 8843 /* 8844 * Move the diradd to the pending list when all diradd dependencies are 8845 * complete. 8846 */ 8847 static void 8848 complete_diradd(dap) 8849 struct diradd *dap; 8850 { 8851 struct pagedep *pagedep; 8852 8853 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 8854 if (dap->da_state & DIRCHG) 8855 pagedep = dap->da_previous->dm_pagedep; 8856 else 8857 pagedep = dap->da_pagedep; 8858 LIST_REMOVE(dap, da_pdlist); 8859 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 8860 } 8861 } 8862 8863 /* 8864 * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal 8865 * add entries and conditonally journal the remove. 8866 */ 8867 static void 8868 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref) 8869 struct diradd *dap; 8870 struct dirrem *dirrem; 8871 struct jremref *jremref; 8872 struct jremref *dotremref; 8873 struct jremref *dotdotremref; 8874 { 8875 struct inodedep *inodedep; 8876 struct jaddref *jaddref; 8877 struct inoref *inoref; 8878 struct ufsmount *ump; 8879 struct mkdir *mkdir; 8880 8881 /* 8882 * If no remove references were allocated we're on a non-journaled 8883 * filesystem and can skip the cancel step. 8884 */ 8885 if (jremref == NULL) { 8886 free_diradd(dap, NULL); 8887 return; 8888 } 8889 /* 8890 * Cancel the primary name an free it if it does not require 8891 * journaling. 8892 */ 8893 if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum, 8894 0, &inodedep) != 0) { 8895 /* Abort the addref that reference this diradd. */ 8896 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 8897 if (inoref->if_list.wk_type != D_JADDREF) 8898 continue; 8899 jaddref = (struct jaddref *)inoref; 8900 if (jaddref->ja_diradd != dap) 8901 continue; 8902 if (cancel_jaddref(jaddref, inodedep, 8903 &dirrem->dm_jwork) == 0) { 8904 free_jremref(jremref); 8905 jremref = NULL; 8906 } 8907 break; 8908 } 8909 } 8910 /* 8911 * Cancel subordinate names and free them if they do not require 8912 * journaling. 8913 */ 8914 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8915 ump = VFSTOUFS(dap->da_list.wk_mp); 8916 LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) { 8917 if (mkdir->md_diradd != dap) 8918 continue; 8919 if ((jaddref = mkdir->md_jaddref) == NULL) 8920 continue; 8921 mkdir->md_jaddref = NULL; 8922 if (mkdir->md_state & MKDIR_PARENT) { 8923 if (cancel_jaddref(jaddref, NULL, 8924 &dirrem->dm_jwork) == 0) { 8925 free_jremref(dotdotremref); 8926 dotdotremref = NULL; 8927 } 8928 } else { 8929 if (cancel_jaddref(jaddref, inodedep, 8930 &dirrem->dm_jwork) == 0) { 8931 free_jremref(dotremref); 8932 dotremref = NULL; 8933 } 8934 } 8935 } 8936 } 8937 8938 if (jremref) 8939 journal_jremref(dirrem, jremref, inodedep); 8940 if (dotremref) 8941 journal_jremref(dirrem, dotremref, inodedep); 8942 if (dotdotremref) 8943 journal_jremref(dirrem, dotdotremref, NULL); 8944 jwork_move(&dirrem->dm_jwork, &dap->da_jwork); 8945 free_diradd(dap, &dirrem->dm_jwork); 8946 } 8947 8948 /* 8949 * Free a diradd dependency structure. 8950 */ 8951 static void 8952 free_diradd(dap, wkhd) 8953 struct diradd *dap; 8954 struct workhead *wkhd; 8955 { 8956 struct dirrem *dirrem; 8957 struct pagedep *pagedep; 8958 struct inodedep *inodedep; 8959 struct mkdir *mkdir, *nextmd; 8960 struct ufsmount *ump; 8961 8962 ump = VFSTOUFS(dap->da_list.wk_mp); 8963 LOCK_OWNED(ump); 8964 LIST_REMOVE(dap, da_pdlist); 8965 if (dap->da_state & ONWORKLIST) 8966 WORKLIST_REMOVE(&dap->da_list); 8967 if ((dap->da_state & DIRCHG) == 0) { 8968 pagedep = dap->da_pagedep; 8969 } else { 8970 dirrem = dap->da_previous; 8971 pagedep = dirrem->dm_pagedep; 8972 dirrem->dm_dirinum = pagedep->pd_ino; 8973 dirrem->dm_state |= COMPLETE; 8974 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 8975 add_to_worklist(&dirrem->dm_list, 0); 8976 } 8977 if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum, 8978 0, &inodedep) != 0) 8979 if (inodedep->id_mkdiradd == dap) 8980 inodedep->id_mkdiradd = NULL; 8981 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8982 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8983 mkdir = nextmd) { 8984 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8985 if (mkdir->md_diradd != dap) 8986 continue; 8987 dap->da_state &= 8988 ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 8989 LIST_REMOVE(mkdir, md_mkdirs); 8990 if (mkdir->md_state & ONWORKLIST) 8991 WORKLIST_REMOVE(&mkdir->md_list); 8992 if (mkdir->md_jaddref != NULL) 8993 panic("free_diradd: Unexpected jaddref"); 8994 WORKITEM_FREE(mkdir, D_MKDIR); 8995 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 8996 break; 8997 } 8998 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8999 panic("free_diradd: unfound ref"); 9000 } 9001 if (inodedep) 9002 free_inodedep(inodedep); 9003 /* 9004 * Free any journal segments waiting for the directory write. 9005 */ 9006 handle_jwork(&dap->da_jwork); 9007 WORKITEM_FREE(dap, D_DIRADD); 9008 } 9009 9010 /* 9011 * Directory entry removal dependencies. 9012 * 9013 * When removing a directory entry, the entry's inode pointer must be 9014 * zero'ed on disk before the corresponding inode's link count is decremented 9015 * (possibly freeing the inode for re-use). This dependency is handled by 9016 * updating the directory entry but delaying the inode count reduction until 9017 * after the directory block has been written to disk. After this point, the 9018 * inode count can be decremented whenever it is convenient. 9019 */ 9020 9021 /* 9022 * This routine should be called immediately after removing 9023 * a directory entry. The inode's link count should not be 9024 * decremented by the calling procedure -- the soft updates 9025 * code will do this task when it is safe. 9026 */ 9027 void 9028 softdep_setup_remove(bp, dp, ip, isrmdir) 9029 struct buf *bp; /* buffer containing directory block */ 9030 struct inode *dp; /* inode for the directory being modified */ 9031 struct inode *ip; /* inode for directory entry being removed */ 9032 int isrmdir; /* indicates if doing RMDIR */ 9033 { 9034 struct dirrem *dirrem, *prevdirrem; 9035 struct inodedep *inodedep; 9036 struct ufsmount *ump; 9037 int direct; 9038 9039 ump = ITOUMP(ip); 9040 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 9041 ("softdep_setup_remove called on non-softdep filesystem")); 9042 /* 9043 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want 9044 * newdirrem() to setup the full directory remove which requires 9045 * isrmdir > 1. 9046 */ 9047 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 9048 /* 9049 * Add the dirrem to the inodedep's pending remove list for quick 9050 * discovery later. 9051 */ 9052 if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) 9053 panic("softdep_setup_remove: Lost inodedep."); 9054 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 9055 dirrem->dm_state |= ONDEPLIST; 9056 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9057 9058 /* 9059 * If the COMPLETE flag is clear, then there were no active 9060 * entries and we want to roll back to a zeroed entry until 9061 * the new inode is committed to disk. If the COMPLETE flag is 9062 * set then we have deleted an entry that never made it to 9063 * disk. If the entry we deleted resulted from a name change, 9064 * then the old name still resides on disk. We cannot delete 9065 * its inode (returned to us in prevdirrem) until the zeroed 9066 * directory entry gets to disk. The new inode has never been 9067 * referenced on the disk, so can be deleted immediately. 9068 */ 9069 if ((dirrem->dm_state & COMPLETE) == 0) { 9070 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 9071 dm_next); 9072 FREE_LOCK(ump); 9073 } else { 9074 if (prevdirrem != NULL) 9075 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 9076 prevdirrem, dm_next); 9077 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 9078 direct = LIST_EMPTY(&dirrem->dm_jremrefhd); 9079 FREE_LOCK(ump); 9080 if (direct) 9081 handle_workitem_remove(dirrem, 0); 9082 } 9083 } 9084 9085 /* 9086 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the 9087 * pd_pendinghd list of a pagedep. 9088 */ 9089 static struct diradd * 9090 diradd_lookup(pagedep, offset) 9091 struct pagedep *pagedep; 9092 int offset; 9093 { 9094 struct diradd *dap; 9095 9096 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 9097 if (dap->da_offset == offset) 9098 return (dap); 9099 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 9100 if (dap->da_offset == offset) 9101 return (dap); 9102 return (NULL); 9103 } 9104 9105 /* 9106 * Search for a .. diradd dependency in a directory that is being removed. 9107 * If the directory was renamed to a new parent we have a diradd rather 9108 * than a mkdir for the .. entry. We need to cancel it now before 9109 * it is found in truncate(). 9110 */ 9111 static struct jremref * 9112 cancel_diradd_dotdot(ip, dirrem, jremref) 9113 struct inode *ip; 9114 struct dirrem *dirrem; 9115 struct jremref *jremref; 9116 { 9117 struct pagedep *pagedep; 9118 struct diradd *dap; 9119 struct worklist *wk; 9120 9121 if (pagedep_lookup(ITOVFS(ip), NULL, ip->i_number, 0, 0, &pagedep) == 0) 9122 return (jremref); 9123 dap = diradd_lookup(pagedep, DOTDOT_OFFSET); 9124 if (dap == NULL) 9125 return (jremref); 9126 cancel_diradd(dap, dirrem, jremref, NULL, NULL); 9127 /* 9128 * Mark any journal work as belonging to the parent so it is freed 9129 * with the .. reference. 9130 */ 9131 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 9132 wk->wk_state |= MKDIR_PARENT; 9133 return (NULL); 9134 } 9135 9136 /* 9137 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to 9138 * replace it with a dirrem/diradd pair as a result of re-parenting a 9139 * directory. This ensures that we don't simultaneously have a mkdir and 9140 * a diradd for the same .. entry. 9141 */ 9142 static struct jremref * 9143 cancel_mkdir_dotdot(ip, dirrem, jremref) 9144 struct inode *ip; 9145 struct dirrem *dirrem; 9146 struct jremref *jremref; 9147 { 9148 struct inodedep *inodedep; 9149 struct jaddref *jaddref; 9150 struct ufsmount *ump; 9151 struct mkdir *mkdir; 9152 struct diradd *dap; 9153 struct mount *mp; 9154 9155 mp = ITOVFS(ip); 9156 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 9157 return (jremref); 9158 dap = inodedep->id_mkdiradd; 9159 if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0) 9160 return (jremref); 9161 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9162 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 9163 mkdir = LIST_NEXT(mkdir, md_mkdirs)) 9164 if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT) 9165 break; 9166 if (mkdir == NULL) 9167 panic("cancel_mkdir_dotdot: Unable to find mkdir\n"); 9168 if ((jaddref = mkdir->md_jaddref) != NULL) { 9169 mkdir->md_jaddref = NULL; 9170 jaddref->ja_state &= ~MKDIR_PARENT; 9171 if (inodedep_lookup(mp, jaddref->ja_ino, 0, &inodedep) == 0) 9172 panic("cancel_mkdir_dotdot: Lost parent inodedep"); 9173 if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) { 9174 journal_jremref(dirrem, jremref, inodedep); 9175 jremref = NULL; 9176 } 9177 } 9178 if (mkdir->md_state & ONWORKLIST) 9179 WORKLIST_REMOVE(&mkdir->md_list); 9180 mkdir->md_state |= ALLCOMPLETE; 9181 complete_mkdir(mkdir); 9182 return (jremref); 9183 } 9184 9185 static void 9186 journal_jremref(dirrem, jremref, inodedep) 9187 struct dirrem *dirrem; 9188 struct jremref *jremref; 9189 struct inodedep *inodedep; 9190 { 9191 9192 if (inodedep == NULL) 9193 if (inodedep_lookup(jremref->jr_list.wk_mp, 9194 jremref->jr_ref.if_ino, 0, &inodedep) == 0) 9195 panic("journal_jremref: Lost inodedep"); 9196 LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps); 9197 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 9198 add_to_journal(&jremref->jr_list); 9199 } 9200 9201 static void 9202 dirrem_journal(dirrem, jremref, dotremref, dotdotremref) 9203 struct dirrem *dirrem; 9204 struct jremref *jremref; 9205 struct jremref *dotremref; 9206 struct jremref *dotdotremref; 9207 { 9208 struct inodedep *inodedep; 9209 9210 9211 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, 9212 &inodedep) == 0) 9213 panic("dirrem_journal: Lost inodedep"); 9214 journal_jremref(dirrem, jremref, inodedep); 9215 if (dotremref) 9216 journal_jremref(dirrem, dotremref, inodedep); 9217 if (dotdotremref) 9218 journal_jremref(dirrem, dotdotremref, NULL); 9219 } 9220 9221 /* 9222 * Allocate a new dirrem if appropriate and return it along with 9223 * its associated pagedep. Called without a lock, returns with lock. 9224 */ 9225 static struct dirrem * 9226 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 9227 struct buf *bp; /* buffer containing directory block */ 9228 struct inode *dp; /* inode for the directory being modified */ 9229 struct inode *ip; /* inode for directory entry being removed */ 9230 int isrmdir; /* indicates if doing RMDIR */ 9231 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 9232 { 9233 int offset; 9234 ufs_lbn_t lbn; 9235 struct diradd *dap; 9236 struct dirrem *dirrem; 9237 struct pagedep *pagedep; 9238 struct jremref *jremref; 9239 struct jremref *dotremref; 9240 struct jremref *dotdotremref; 9241 struct vnode *dvp; 9242 struct ufsmount *ump; 9243 9244 /* 9245 * Whiteouts have no deletion dependencies. 9246 */ 9247 if (ip == NULL) 9248 panic("newdirrem: whiteout"); 9249 dvp = ITOV(dp); 9250 ump = ITOUMP(dp); 9251 9252 /* 9253 * If the system is over its limit and our filesystem is 9254 * responsible for more than our share of that usage and 9255 * we are not a snapshot, request some inodedep cleanup. 9256 * Limiting the number of dirrem structures will also limit 9257 * the number of freefile and freeblks structures. 9258 */ 9259 ACQUIRE_LOCK(ump); 9260 if (!IS_SNAPSHOT(ip) && softdep_excess_items(ump, D_DIRREM)) 9261 schedule_cleanup(UFSTOVFS(ump)); 9262 else 9263 FREE_LOCK(ump); 9264 dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS | 9265 M_ZERO); 9266 workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount); 9267 LIST_INIT(&dirrem->dm_jremrefhd); 9268 LIST_INIT(&dirrem->dm_jwork); 9269 dirrem->dm_state = isrmdir ? RMDIR : 0; 9270 dirrem->dm_oldinum = ip->i_number; 9271 *prevdirremp = NULL; 9272 /* 9273 * Allocate remove reference structures to track journal write 9274 * dependencies. We will always have one for the link and 9275 * when doing directories we will always have one more for dot. 9276 * When renaming a directory we skip the dotdot link change so 9277 * this is not needed. 9278 */ 9279 jremref = dotremref = dotdotremref = NULL; 9280 if (DOINGSUJ(dvp)) { 9281 if (isrmdir) { 9282 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9283 ip->i_effnlink + 2); 9284 dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET, 9285 ip->i_effnlink + 1); 9286 dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET, 9287 dp->i_effnlink + 1); 9288 dotdotremref->jr_state |= MKDIR_PARENT; 9289 } else 9290 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9291 ip->i_effnlink + 1); 9292 } 9293 ACQUIRE_LOCK(ump); 9294 lbn = lblkno(ump->um_fs, dp->i_offset); 9295 offset = blkoff(ump->um_fs, dp->i_offset); 9296 pagedep_lookup(UFSTOVFS(ump), bp, dp->i_number, lbn, DEPALLOC, 9297 &pagedep); 9298 dirrem->dm_pagedep = pagedep; 9299 dirrem->dm_offset = offset; 9300 /* 9301 * If we're renaming a .. link to a new directory, cancel any 9302 * existing MKDIR_PARENT mkdir. If it has already been canceled 9303 * the jremref is preserved for any potential diradd in this 9304 * location. This can not coincide with a rmdir. 9305 */ 9306 if (dp->i_offset == DOTDOT_OFFSET) { 9307 if (isrmdir) 9308 panic("newdirrem: .. directory change during remove?"); 9309 jremref = cancel_mkdir_dotdot(dp, dirrem, jremref); 9310 } 9311 /* 9312 * If we're removing a directory search for the .. dependency now and 9313 * cancel it. Any pending journal work will be added to the dirrem 9314 * to be completed when the workitem remove completes. 9315 */ 9316 if (isrmdir) 9317 dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref); 9318 /* 9319 * Check for a diradd dependency for the same directory entry. 9320 * If present, then both dependencies become obsolete and can 9321 * be de-allocated. 9322 */ 9323 dap = diradd_lookup(pagedep, offset); 9324 if (dap == NULL) { 9325 /* 9326 * Link the jremref structures into the dirrem so they are 9327 * written prior to the pagedep. 9328 */ 9329 if (jremref) 9330 dirrem_journal(dirrem, jremref, dotremref, 9331 dotdotremref); 9332 return (dirrem); 9333 } 9334 /* 9335 * Must be ATTACHED at this point. 9336 */ 9337 if ((dap->da_state & ATTACHED) == 0) 9338 panic("newdirrem: not ATTACHED"); 9339 if (dap->da_newinum != ip->i_number) 9340 panic("newdirrem: inum %ju should be %ju", 9341 (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum); 9342 /* 9343 * If we are deleting a changed name that never made it to disk, 9344 * then return the dirrem describing the previous inode (which 9345 * represents the inode currently referenced from this entry on disk). 9346 */ 9347 if ((dap->da_state & DIRCHG) != 0) { 9348 *prevdirremp = dap->da_previous; 9349 dap->da_state &= ~DIRCHG; 9350 dap->da_pagedep = pagedep; 9351 } 9352 /* 9353 * We are deleting an entry that never made it to disk. 9354 * Mark it COMPLETE so we can delete its inode immediately. 9355 */ 9356 dirrem->dm_state |= COMPLETE; 9357 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref); 9358 #ifdef INVARIANTS 9359 if (isrmdir == 0) { 9360 struct worklist *wk; 9361 9362 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 9363 if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT)) 9364 panic("bad wk %p (0x%X)\n", wk, wk->wk_state); 9365 } 9366 #endif 9367 9368 return (dirrem); 9369 } 9370 9371 /* 9372 * Directory entry change dependencies. 9373 * 9374 * Changing an existing directory entry requires that an add operation 9375 * be completed first followed by a deletion. The semantics for the addition 9376 * are identical to the description of adding a new entry above except 9377 * that the rollback is to the old inode number rather than zero. Once 9378 * the addition dependency is completed, the removal is done as described 9379 * in the removal routine above. 9380 */ 9381 9382 /* 9383 * This routine should be called immediately after changing 9384 * a directory entry. The inode's link count should not be 9385 * decremented by the calling procedure -- the soft updates 9386 * code will perform this task when it is safe. 9387 */ 9388 void 9389 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 9390 struct buf *bp; /* buffer containing directory block */ 9391 struct inode *dp; /* inode for the directory being modified */ 9392 struct inode *ip; /* inode for directory entry being removed */ 9393 ino_t newinum; /* new inode number for changed entry */ 9394 int isrmdir; /* indicates if doing RMDIR */ 9395 { 9396 int offset; 9397 struct diradd *dap = NULL; 9398 struct dirrem *dirrem, *prevdirrem; 9399 struct pagedep *pagedep; 9400 struct inodedep *inodedep; 9401 struct jaddref *jaddref; 9402 struct mount *mp; 9403 struct ufsmount *ump; 9404 9405 mp = ITOVFS(dp); 9406 ump = VFSTOUFS(mp); 9407 offset = blkoff(ump->um_fs, dp->i_offset); 9408 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 9409 ("softdep_setup_directory_change called on non-softdep filesystem")); 9410 9411 /* 9412 * Whiteouts do not need diradd dependencies. 9413 */ 9414 if (newinum != UFS_WINO) { 9415 dap = malloc(sizeof(struct diradd), 9416 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 9417 workitem_alloc(&dap->da_list, D_DIRADD, mp); 9418 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 9419 dap->da_offset = offset; 9420 dap->da_newinum = newinum; 9421 LIST_INIT(&dap->da_jwork); 9422 } 9423 9424 /* 9425 * Allocate a new dirrem and ACQUIRE_LOCK. 9426 */ 9427 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 9428 pagedep = dirrem->dm_pagedep; 9429 /* 9430 * The possible values for isrmdir: 9431 * 0 - non-directory file rename 9432 * 1 - directory rename within same directory 9433 * inum - directory rename to new directory of given inode number 9434 * When renaming to a new directory, we are both deleting and 9435 * creating a new directory entry, so the link count on the new 9436 * directory should not change. Thus we do not need the followup 9437 * dirrem which is usually done in handle_workitem_remove. We set 9438 * the DIRCHG flag to tell handle_workitem_remove to skip the 9439 * followup dirrem. 9440 */ 9441 if (isrmdir > 1) 9442 dirrem->dm_state |= DIRCHG; 9443 9444 /* 9445 * Whiteouts have no additional dependencies, 9446 * so just put the dirrem on the correct list. 9447 */ 9448 if (newinum == UFS_WINO) { 9449 if ((dirrem->dm_state & COMPLETE) == 0) { 9450 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 9451 dm_next); 9452 } else { 9453 dirrem->dm_dirinum = pagedep->pd_ino; 9454 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9455 add_to_worklist(&dirrem->dm_list, 0); 9456 } 9457 FREE_LOCK(ump); 9458 return; 9459 } 9460 /* 9461 * Add the dirrem to the inodedep's pending remove list for quick 9462 * discovery later. A valid nlinkdelta ensures that this lookup 9463 * will not fail. 9464 */ 9465 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 9466 panic("softdep_setup_directory_change: Lost inodedep."); 9467 dirrem->dm_state |= ONDEPLIST; 9468 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9469 9470 /* 9471 * If the COMPLETE flag is clear, then there were no active 9472 * entries and we want to roll back to the previous inode until 9473 * the new inode is committed to disk. If the COMPLETE flag is 9474 * set, then we have deleted an entry that never made it to disk. 9475 * If the entry we deleted resulted from a name change, then the old 9476 * inode reference still resides on disk. Any rollback that we do 9477 * needs to be to that old inode (returned to us in prevdirrem). If 9478 * the entry we deleted resulted from a create, then there is 9479 * no entry on the disk, so we want to roll back to zero rather 9480 * than the uncommitted inode. In either of the COMPLETE cases we 9481 * want to immediately free the unwritten and unreferenced inode. 9482 */ 9483 if ((dirrem->dm_state & COMPLETE) == 0) { 9484 dap->da_previous = dirrem; 9485 } else { 9486 if (prevdirrem != NULL) { 9487 dap->da_previous = prevdirrem; 9488 } else { 9489 dap->da_state &= ~DIRCHG; 9490 dap->da_pagedep = pagedep; 9491 } 9492 dirrem->dm_dirinum = pagedep->pd_ino; 9493 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9494 add_to_worklist(&dirrem->dm_list, 0); 9495 } 9496 /* 9497 * Lookup the jaddref for this journal entry. We must finish 9498 * initializing it and make the diradd write dependent on it. 9499 * If we're not journaling, put it on the id_bufwait list if the 9500 * inode is not yet written. If it is written, do the post-inode 9501 * write processing to put it on the id_pendinghd list. 9502 */ 9503 inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); 9504 if (MOUNTEDSUJ(mp)) { 9505 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 9506 inoreflst); 9507 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 9508 ("softdep_setup_directory_change: bad jaddref %p", 9509 jaddref)); 9510 jaddref->ja_diroff = dp->i_offset; 9511 jaddref->ja_diradd = dap; 9512 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9513 dap, da_pdlist); 9514 add_to_journal(&jaddref->ja_list); 9515 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 9516 dap->da_state |= COMPLETE; 9517 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 9518 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 9519 } else { 9520 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9521 dap, da_pdlist); 9522 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 9523 } 9524 /* 9525 * If we're making a new name for a directory that has not been 9526 * committed when need to move the dot and dotdot references to 9527 * this new name. 9528 */ 9529 if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET) 9530 merge_diradd(inodedep, dap); 9531 FREE_LOCK(ump); 9532 } 9533 9534 /* 9535 * Called whenever the link count on an inode is changed. 9536 * It creates an inode dependency so that the new reference(s) 9537 * to the inode cannot be committed to disk until the updated 9538 * inode has been written. 9539 */ 9540 void 9541 softdep_change_linkcnt(ip) 9542 struct inode *ip; /* the inode with the increased link count */ 9543 { 9544 struct inodedep *inodedep; 9545 struct ufsmount *ump; 9546 9547 ump = ITOUMP(ip); 9548 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 9549 ("softdep_change_linkcnt called on non-softdep filesystem")); 9550 ACQUIRE_LOCK(ump); 9551 inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep); 9552 if (ip->i_nlink < ip->i_effnlink) 9553 panic("softdep_change_linkcnt: bad delta"); 9554 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9555 FREE_LOCK(ump); 9556 } 9557 9558 /* 9559 * Attach a sbdep dependency to the superblock buf so that we can keep 9560 * track of the head of the linked list of referenced but unlinked inodes. 9561 */ 9562 void 9563 softdep_setup_sbupdate(ump, fs, bp) 9564 struct ufsmount *ump; 9565 struct fs *fs; 9566 struct buf *bp; 9567 { 9568 struct sbdep *sbdep; 9569 struct worklist *wk; 9570 9571 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 9572 ("softdep_setup_sbupdate called on non-softdep filesystem")); 9573 LIST_FOREACH(wk, &bp->b_dep, wk_list) 9574 if (wk->wk_type == D_SBDEP) 9575 break; 9576 if (wk != NULL) 9577 return; 9578 sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS); 9579 workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump)); 9580 sbdep->sb_fs = fs; 9581 sbdep->sb_ump = ump; 9582 ACQUIRE_LOCK(ump); 9583 WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list); 9584 FREE_LOCK(ump); 9585 } 9586 9587 /* 9588 * Return the first unlinked inodedep which is ready to be the head of the 9589 * list. The inodedep and all those after it must have valid next pointers. 9590 */ 9591 static struct inodedep * 9592 first_unlinked_inodedep(ump) 9593 struct ufsmount *ump; 9594 { 9595 struct inodedep *inodedep; 9596 struct inodedep *idp; 9597 9598 LOCK_OWNED(ump); 9599 for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst); 9600 inodedep; inodedep = idp) { 9601 if ((inodedep->id_state & UNLINKNEXT) == 0) 9602 return (NULL); 9603 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9604 if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0) 9605 break; 9606 if ((inodedep->id_state & UNLINKPREV) == 0) 9607 break; 9608 } 9609 return (inodedep); 9610 } 9611 9612 /* 9613 * Set the sujfree unlinked head pointer prior to writing a superblock. 9614 */ 9615 static void 9616 initiate_write_sbdep(sbdep) 9617 struct sbdep *sbdep; 9618 { 9619 struct inodedep *inodedep; 9620 struct fs *bpfs; 9621 struct fs *fs; 9622 9623 bpfs = sbdep->sb_fs; 9624 fs = sbdep->sb_ump->um_fs; 9625 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9626 if (inodedep) { 9627 fs->fs_sujfree = inodedep->id_ino; 9628 inodedep->id_state |= UNLINKPREV; 9629 } else 9630 fs->fs_sujfree = 0; 9631 bpfs->fs_sujfree = fs->fs_sujfree; 9632 /* 9633 * Because we have made changes to the superblock, we need to 9634 * recompute its check-hash. 9635 */ 9636 bpfs->fs_ckhash = ffs_calc_sbhash(bpfs); 9637 } 9638 9639 /* 9640 * After a superblock is written determine whether it must be written again 9641 * due to a changing unlinked list head. 9642 */ 9643 static int 9644 handle_written_sbdep(sbdep, bp) 9645 struct sbdep *sbdep; 9646 struct buf *bp; 9647 { 9648 struct inodedep *inodedep; 9649 struct fs *fs; 9650 9651 LOCK_OWNED(sbdep->sb_ump); 9652 fs = sbdep->sb_fs; 9653 /* 9654 * If the superblock doesn't match the in-memory list start over. 9655 */ 9656 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9657 if ((inodedep && fs->fs_sujfree != inodedep->id_ino) || 9658 (inodedep == NULL && fs->fs_sujfree != 0)) { 9659 bdirty(bp); 9660 return (1); 9661 } 9662 WORKITEM_FREE(sbdep, D_SBDEP); 9663 if (fs->fs_sujfree == 0) 9664 return (0); 9665 /* 9666 * Now that we have a record of this inode in stable store allow it 9667 * to be written to free up pending work. Inodes may see a lot of 9668 * write activity after they are unlinked which we must not hold up. 9669 */ 9670 for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 9671 if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS) 9672 panic("handle_written_sbdep: Bad inodedep %p (0x%X)", 9673 inodedep, inodedep->id_state); 9674 if (inodedep->id_state & UNLINKONLIST) 9675 break; 9676 inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST; 9677 } 9678 9679 return (0); 9680 } 9681 9682 /* 9683 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list. 9684 */ 9685 static void 9686 unlinked_inodedep(mp, inodedep) 9687 struct mount *mp; 9688 struct inodedep *inodedep; 9689 { 9690 struct ufsmount *ump; 9691 9692 ump = VFSTOUFS(mp); 9693 LOCK_OWNED(ump); 9694 if (MOUNTEDSUJ(mp) == 0) 9695 return; 9696 ump->um_fs->fs_fmod = 1; 9697 if (inodedep->id_state & UNLINKED) 9698 panic("unlinked_inodedep: %p already unlinked\n", inodedep); 9699 inodedep->id_state |= UNLINKED; 9700 TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked); 9701 } 9702 9703 /* 9704 * Remove an inodedep from the unlinked inodedep list. This may require 9705 * disk writes if the inode has made it that far. 9706 */ 9707 static void 9708 clear_unlinked_inodedep(inodedep) 9709 struct inodedep *inodedep; 9710 { 9711 struct ufs2_dinode *dip; 9712 struct ufsmount *ump; 9713 struct inodedep *idp; 9714 struct inodedep *idn; 9715 struct fs *fs, *bpfs; 9716 struct buf *bp; 9717 daddr_t dbn; 9718 ino_t ino; 9719 ino_t nino; 9720 ino_t pino; 9721 int error; 9722 9723 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9724 fs = ump->um_fs; 9725 ino = inodedep->id_ino; 9726 error = 0; 9727 for (;;) { 9728 LOCK_OWNED(ump); 9729 KASSERT((inodedep->id_state & UNLINKED) != 0, 9730 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9731 inodedep)); 9732 /* 9733 * If nothing has yet been written simply remove us from 9734 * the in memory list and return. This is the most common 9735 * case where handle_workitem_remove() loses the final 9736 * reference. 9737 */ 9738 if ((inodedep->id_state & UNLINKLINKS) == 0) 9739 break; 9740 /* 9741 * If we have a NEXT pointer and no PREV pointer we can simply 9742 * clear NEXT's PREV and remove ourselves from the list. Be 9743 * careful not to clear PREV if the superblock points at 9744 * next as well. 9745 */ 9746 idn = TAILQ_NEXT(inodedep, id_unlinked); 9747 if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) { 9748 if (idn && fs->fs_sujfree != idn->id_ino) 9749 idn->id_state &= ~UNLINKPREV; 9750 break; 9751 } 9752 /* 9753 * Here we have an inodedep which is actually linked into 9754 * the list. We must remove it by forcing a write to the 9755 * link before us, whether it be the superblock or an inode. 9756 * Unfortunately the list may change while we're waiting 9757 * on the buf lock for either resource so we must loop until 9758 * we lock the right one. If both the superblock and an 9759 * inode point to this inode we must clear the inode first 9760 * followed by the superblock. 9761 */ 9762 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9763 pino = 0; 9764 if (idp && (idp->id_state & UNLINKNEXT)) 9765 pino = idp->id_ino; 9766 FREE_LOCK(ump); 9767 if (pino == 0) { 9768 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9769 (int)fs->fs_sbsize, 0, 0, 0); 9770 } else { 9771 dbn = fsbtodb(fs, ino_to_fsba(fs, pino)); 9772 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, 9773 (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, 9774 &bp); 9775 } 9776 ACQUIRE_LOCK(ump); 9777 if (error) 9778 break; 9779 /* If the list has changed restart the loop. */ 9780 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9781 nino = 0; 9782 if (idp && (idp->id_state & UNLINKNEXT)) 9783 nino = idp->id_ino; 9784 if (nino != pino || 9785 (inodedep->id_state & UNLINKPREV) != UNLINKPREV) { 9786 FREE_LOCK(ump); 9787 brelse(bp); 9788 ACQUIRE_LOCK(ump); 9789 continue; 9790 } 9791 nino = 0; 9792 idn = TAILQ_NEXT(inodedep, id_unlinked); 9793 if (idn) 9794 nino = idn->id_ino; 9795 /* 9796 * Remove us from the in memory list. After this we cannot 9797 * access the inodedep. 9798 */ 9799 KASSERT((inodedep->id_state & UNLINKED) != 0, 9800 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9801 inodedep)); 9802 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9803 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9804 FREE_LOCK(ump); 9805 /* 9806 * The predecessor's next pointer is manually updated here 9807 * so that the NEXT flag is never cleared for an element 9808 * that is in the list. 9809 */ 9810 if (pino == 0) { 9811 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9812 bpfs = (struct fs *)bp->b_data; 9813 ffs_oldfscompat_write(bpfs, ump); 9814 softdep_setup_sbupdate(ump, bpfs, bp); 9815 /* 9816 * Because we may have made changes to the superblock, 9817 * we need to recompute its check-hash. 9818 */ 9819 bpfs->fs_ckhash = ffs_calc_sbhash(bpfs); 9820 } else if (fs->fs_magic == FS_UFS1_MAGIC) { 9821 ((struct ufs1_dinode *)bp->b_data + 9822 ino_to_fsbo(fs, pino))->di_freelink = nino; 9823 } else { 9824 dip = (struct ufs2_dinode *)bp->b_data + 9825 ino_to_fsbo(fs, pino); 9826 dip->di_freelink = nino; 9827 ffs_update_dinode_ckhash(fs, dip); 9828 } 9829 /* 9830 * If the bwrite fails we have no recourse to recover. The 9831 * filesystem is corrupted already. 9832 */ 9833 bwrite(bp); 9834 ACQUIRE_LOCK(ump); 9835 /* 9836 * If the superblock pointer still needs to be cleared force 9837 * a write here. 9838 */ 9839 if (fs->fs_sujfree == ino) { 9840 FREE_LOCK(ump); 9841 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9842 (int)fs->fs_sbsize, 0, 0, 0); 9843 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9844 bpfs = (struct fs *)bp->b_data; 9845 ffs_oldfscompat_write(bpfs, ump); 9846 softdep_setup_sbupdate(ump, bpfs, bp); 9847 /* 9848 * Because we may have made changes to the superblock, 9849 * we need to recompute its check-hash. 9850 */ 9851 bpfs->fs_ckhash = ffs_calc_sbhash(bpfs); 9852 bwrite(bp); 9853 ACQUIRE_LOCK(ump); 9854 } 9855 9856 if (fs->fs_sujfree != ino) 9857 return; 9858 panic("clear_unlinked_inodedep: Failed to clear free head"); 9859 } 9860 if (inodedep->id_ino == fs->fs_sujfree) 9861 panic("clear_unlinked_inodedep: Freeing head of free list"); 9862 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9863 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9864 return; 9865 } 9866 9867 /* 9868 * This workitem decrements the inode's link count. 9869 * If the link count reaches zero, the file is removed. 9870 */ 9871 static int 9872 handle_workitem_remove(dirrem, flags) 9873 struct dirrem *dirrem; 9874 int flags; 9875 { 9876 struct inodedep *inodedep; 9877 struct workhead dotdotwk; 9878 struct worklist *wk; 9879 struct ufsmount *ump; 9880 struct mount *mp; 9881 struct vnode *vp; 9882 struct inode *ip; 9883 ino_t oldinum; 9884 9885 if (dirrem->dm_state & ONWORKLIST) 9886 panic("handle_workitem_remove: dirrem %p still on worklist", 9887 dirrem); 9888 oldinum = dirrem->dm_oldinum; 9889 mp = dirrem->dm_list.wk_mp; 9890 ump = VFSTOUFS(mp); 9891 flags |= LK_EXCLUSIVE; 9892 if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0) 9893 return (EBUSY); 9894 ip = VTOI(vp); 9895 MPASS(ip->i_mode != 0); 9896 ACQUIRE_LOCK(ump); 9897 if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0) 9898 panic("handle_workitem_remove: lost inodedep"); 9899 if (dirrem->dm_state & ONDEPLIST) 9900 LIST_REMOVE(dirrem, dm_inonext); 9901 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 9902 ("handle_workitem_remove: Journal entries not written.")); 9903 9904 /* 9905 * Move all dependencies waiting on the remove to complete 9906 * from the dirrem to the inode inowait list to be completed 9907 * after the inode has been updated and written to disk. 9908 * 9909 * Any marked MKDIR_PARENT are saved to be completed when the 9910 * dotdot ref is removed unless DIRCHG is specified. For 9911 * directory change operations there will be no further 9912 * directory writes and the jsegdeps need to be moved along 9913 * with the rest to be completed when the inode is free or 9914 * stable in the inode free list. 9915 */ 9916 LIST_INIT(&dotdotwk); 9917 while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) { 9918 WORKLIST_REMOVE(wk); 9919 if ((dirrem->dm_state & DIRCHG) == 0 && 9920 wk->wk_state & MKDIR_PARENT) { 9921 wk->wk_state &= ~MKDIR_PARENT; 9922 WORKLIST_INSERT(&dotdotwk, wk); 9923 continue; 9924 } 9925 WORKLIST_INSERT(&inodedep->id_inowait, wk); 9926 } 9927 LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list); 9928 /* 9929 * Normal file deletion. 9930 */ 9931 if ((dirrem->dm_state & RMDIR) == 0) { 9932 ip->i_nlink--; 9933 KASSERT(ip->i_nlink >= 0, ("handle_workitem_remove: file ino " 9934 "%ju negative i_nlink %d", (intmax_t)ip->i_number, 9935 ip->i_nlink)); 9936 DIP_SET(ip, i_nlink, ip->i_nlink); 9937 UFS_INODE_SET_FLAG(ip, IN_CHANGE); 9938 if (ip->i_nlink < ip->i_effnlink) 9939 panic("handle_workitem_remove: bad file delta"); 9940 if (ip->i_nlink == 0) 9941 unlinked_inodedep(mp, inodedep); 9942 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9943 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9944 ("handle_workitem_remove: worklist not empty. %s", 9945 TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type))); 9946 WORKITEM_FREE(dirrem, D_DIRREM); 9947 FREE_LOCK(ump); 9948 goto out; 9949 } 9950 /* 9951 * Directory deletion. Decrement reference count for both the 9952 * just deleted parent directory entry and the reference for ".". 9953 * Arrange to have the reference count on the parent decremented 9954 * to account for the loss of "..". 9955 */ 9956 ip->i_nlink -= 2; 9957 KASSERT(ip->i_nlink >= 0, ("handle_workitem_remove: directory ino " 9958 "%ju negative i_nlink %d", (intmax_t)ip->i_number, ip->i_nlink)); 9959 DIP_SET(ip, i_nlink, ip->i_nlink); 9960 UFS_INODE_SET_FLAG(ip, IN_CHANGE); 9961 if (ip->i_nlink < ip->i_effnlink) 9962 panic("handle_workitem_remove: bad dir delta"); 9963 if (ip->i_nlink == 0) 9964 unlinked_inodedep(mp, inodedep); 9965 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9966 /* 9967 * Rename a directory to a new parent. Since, we are both deleting 9968 * and creating a new directory entry, the link count on the new 9969 * directory should not change. Thus we skip the followup dirrem. 9970 */ 9971 if (dirrem->dm_state & DIRCHG) { 9972 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9973 ("handle_workitem_remove: DIRCHG and worklist not empty.")); 9974 WORKITEM_FREE(dirrem, D_DIRREM); 9975 FREE_LOCK(ump); 9976 goto out; 9977 } 9978 dirrem->dm_state = ONDEPLIST; 9979 dirrem->dm_oldinum = dirrem->dm_dirinum; 9980 /* 9981 * Place the dirrem on the parent's diremhd list. 9982 */ 9983 if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0) 9984 panic("handle_workitem_remove: lost dir inodedep"); 9985 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9986 /* 9987 * If the allocated inode has never been written to disk, then 9988 * the on-disk inode is zero'ed and we can remove the file 9989 * immediately. When journaling if the inode has been marked 9990 * unlinked and not DEPCOMPLETE we know it can never be written. 9991 */ 9992 inodedep_lookup(mp, oldinum, 0, &inodedep); 9993 if (inodedep == NULL || 9994 (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED || 9995 check_inode_unwritten(inodedep)) { 9996 FREE_LOCK(ump); 9997 vput(vp); 9998 return handle_workitem_remove(dirrem, flags); 9999 } 10000 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 10001 FREE_LOCK(ump); 10002 UFS_INODE_SET_FLAG(ip, IN_CHANGE); 10003 out: 10004 ffs_update(vp, 0); 10005 vput(vp); 10006 return (0); 10007 } 10008 10009 /* 10010 * Inode de-allocation dependencies. 10011 * 10012 * When an inode's link count is reduced to zero, it can be de-allocated. We 10013 * found it convenient to postpone de-allocation until after the inode is 10014 * written to disk with its new link count (zero). At this point, all of the 10015 * on-disk inode's block pointers are nullified and, with careful dependency 10016 * list ordering, all dependencies related to the inode will be satisfied and 10017 * the corresponding dependency structures de-allocated. So, if/when the 10018 * inode is reused, there will be no mixing of old dependencies with new 10019 * ones. This artificial dependency is set up by the block de-allocation 10020 * procedure above (softdep_setup_freeblocks) and completed by the 10021 * following procedure. 10022 */ 10023 static void 10024 handle_workitem_freefile(freefile) 10025 struct freefile *freefile; 10026 { 10027 struct workhead wkhd; 10028 struct fs *fs; 10029 struct ufsmount *ump; 10030 int error; 10031 #ifdef INVARIANTS 10032 struct inodedep *idp; 10033 #endif 10034 10035 ump = VFSTOUFS(freefile->fx_list.wk_mp); 10036 fs = ump->um_fs; 10037 #ifdef INVARIANTS 10038 ACQUIRE_LOCK(ump); 10039 error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp); 10040 FREE_LOCK(ump); 10041 if (error) 10042 panic("handle_workitem_freefile: inodedep %p survived", idp); 10043 #endif 10044 UFS_LOCK(ump); 10045 fs->fs_pendinginodes -= 1; 10046 UFS_UNLOCK(ump); 10047 LIST_INIT(&wkhd); 10048 LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list); 10049 if ((error = ffs_freefile(ump, fs, freefile->fx_devvp, 10050 freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0) 10051 softdep_error("handle_workitem_freefile", error); 10052 ACQUIRE_LOCK(ump); 10053 WORKITEM_FREE(freefile, D_FREEFILE); 10054 FREE_LOCK(ump); 10055 } 10056 10057 10058 /* 10059 * Helper function which unlinks marker element from work list and returns 10060 * the next element on the list. 10061 */ 10062 static __inline struct worklist * 10063 markernext(struct worklist *marker) 10064 { 10065 struct worklist *next; 10066 10067 next = LIST_NEXT(marker, wk_list); 10068 LIST_REMOVE(marker, wk_list); 10069 return next; 10070 } 10071 10072 /* 10073 * Disk writes. 10074 * 10075 * The dependency structures constructed above are most actively used when file 10076 * system blocks are written to disk. No constraints are placed on when a 10077 * block can be written, but unsatisfied update dependencies are made safe by 10078 * modifying (or replacing) the source memory for the duration of the disk 10079 * write. When the disk write completes, the memory block is again brought 10080 * up-to-date. 10081 * 10082 * In-core inode structure reclamation. 10083 * 10084 * Because there are a finite number of "in-core" inode structures, they are 10085 * reused regularly. By transferring all inode-related dependencies to the 10086 * in-memory inode block and indexing them separately (via "inodedep"s), we 10087 * can allow "in-core" inode structures to be reused at any time and avoid 10088 * any increase in contention. 10089 * 10090 * Called just before entering the device driver to initiate a new disk I/O. 10091 * The buffer must be locked, thus, no I/O completion operations can occur 10092 * while we are manipulating its associated dependencies. 10093 */ 10094 static void 10095 softdep_disk_io_initiation(bp) 10096 struct buf *bp; /* structure describing disk write to occur */ 10097 { 10098 struct worklist *wk; 10099 struct worklist marker; 10100 struct inodedep *inodedep; 10101 struct freeblks *freeblks; 10102 struct jblkdep *jblkdep; 10103 struct newblk *newblk; 10104 struct ufsmount *ump; 10105 10106 /* 10107 * We only care about write operations. There should never 10108 * be dependencies for reads. 10109 */ 10110 if (bp->b_iocmd != BIO_WRITE) 10111 panic("softdep_disk_io_initiation: not write"); 10112 10113 if (bp->b_vflags & BV_BKGRDINPROG) 10114 panic("softdep_disk_io_initiation: Writing buffer with " 10115 "background write in progress: %p", bp); 10116 10117 ump = softdep_bp_to_mp(bp); 10118 if (ump == NULL) 10119 return; 10120 10121 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 10122 PHOLD(curproc); /* Don't swap out kernel stack */ 10123 ACQUIRE_LOCK(ump); 10124 /* 10125 * Do any necessary pre-I/O processing. 10126 */ 10127 for (wk = LIST_FIRST(&bp->b_dep); wk != NULL; 10128 wk = markernext(&marker)) { 10129 LIST_INSERT_AFTER(wk, &marker, wk_list); 10130 switch (wk->wk_type) { 10131 10132 case D_PAGEDEP: 10133 initiate_write_filepage(WK_PAGEDEP(wk), bp); 10134 continue; 10135 10136 case D_INODEDEP: 10137 inodedep = WK_INODEDEP(wk); 10138 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 10139 initiate_write_inodeblock_ufs1(inodedep, bp); 10140 else 10141 initiate_write_inodeblock_ufs2(inodedep, bp); 10142 continue; 10143 10144 case D_INDIRDEP: 10145 initiate_write_indirdep(WK_INDIRDEP(wk), bp); 10146 continue; 10147 10148 case D_BMSAFEMAP: 10149 initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp); 10150 continue; 10151 10152 case D_JSEG: 10153 WK_JSEG(wk)->js_buf = NULL; 10154 continue; 10155 10156 case D_FREEBLKS: 10157 freeblks = WK_FREEBLKS(wk); 10158 jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd); 10159 /* 10160 * We have to wait for the freeblks to be journaled 10161 * before we can write an inodeblock with updated 10162 * pointers. Be careful to arrange the marker so 10163 * we revisit the freeblks if it's not removed by 10164 * the first jwait(). 10165 */ 10166 if (jblkdep != NULL) { 10167 LIST_REMOVE(&marker, wk_list); 10168 LIST_INSERT_BEFORE(wk, &marker, wk_list); 10169 jwait(&jblkdep->jb_list, MNT_WAIT); 10170 } 10171 continue; 10172 case D_ALLOCDIRECT: 10173 case D_ALLOCINDIR: 10174 /* 10175 * We have to wait for the jnewblk to be journaled 10176 * before we can write to a block if the contents 10177 * may be confused with an earlier file's indirect 10178 * at recovery time. Handle the marker as described 10179 * above. 10180 */ 10181 newblk = WK_NEWBLK(wk); 10182 if (newblk->nb_jnewblk != NULL && 10183 indirblk_lookup(newblk->nb_list.wk_mp, 10184 newblk->nb_newblkno)) { 10185 LIST_REMOVE(&marker, wk_list); 10186 LIST_INSERT_BEFORE(wk, &marker, wk_list); 10187 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 10188 } 10189 continue; 10190 10191 case D_SBDEP: 10192 initiate_write_sbdep(WK_SBDEP(wk)); 10193 continue; 10194 10195 case D_MKDIR: 10196 case D_FREEWORK: 10197 case D_FREEDEP: 10198 case D_JSEGDEP: 10199 continue; 10200 10201 default: 10202 panic("handle_disk_io_initiation: Unexpected type %s", 10203 TYPENAME(wk->wk_type)); 10204 /* NOTREACHED */ 10205 } 10206 } 10207 FREE_LOCK(ump); 10208 PRELE(curproc); /* Allow swapout of kernel stack */ 10209 } 10210 10211 /* 10212 * Called from within the procedure above to deal with unsatisfied 10213 * allocation dependencies in a directory. The buffer must be locked, 10214 * thus, no I/O completion operations can occur while we are 10215 * manipulating its associated dependencies. 10216 */ 10217 static void 10218 initiate_write_filepage(pagedep, bp) 10219 struct pagedep *pagedep; 10220 struct buf *bp; 10221 { 10222 struct jremref *jremref; 10223 struct jmvref *jmvref; 10224 struct dirrem *dirrem; 10225 struct diradd *dap; 10226 struct direct *ep; 10227 int i; 10228 10229 if (pagedep->pd_state & IOSTARTED) { 10230 /* 10231 * This can only happen if there is a driver that does not 10232 * understand chaining. Here biodone will reissue the call 10233 * to strategy for the incomplete buffers. 10234 */ 10235 printf("initiate_write_filepage: already started\n"); 10236 return; 10237 } 10238 pagedep->pd_state |= IOSTARTED; 10239 /* 10240 * Wait for all journal remove dependencies to hit the disk. 10241 * We can not allow any potentially conflicting directory adds 10242 * to be visible before removes and rollback is too difficult. 10243 * The per-filesystem lock may be dropped and re-acquired, however 10244 * we hold the buf locked so the dependency can not go away. 10245 */ 10246 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) 10247 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) 10248 jwait(&jremref->jr_list, MNT_WAIT); 10249 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) 10250 jwait(&jmvref->jm_list, MNT_WAIT); 10251 for (i = 0; i < DAHASHSZ; i++) { 10252 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 10253 ep = (struct direct *) 10254 ((char *)bp->b_data + dap->da_offset); 10255 if (ep->d_ino != dap->da_newinum) 10256 panic("%s: dir inum %ju != new %ju", 10257 "initiate_write_filepage", 10258 (uintmax_t)ep->d_ino, 10259 (uintmax_t)dap->da_newinum); 10260 if (dap->da_state & DIRCHG) 10261 ep->d_ino = dap->da_previous->dm_oldinum; 10262 else 10263 ep->d_ino = 0; 10264 dap->da_state &= ~ATTACHED; 10265 dap->da_state |= UNDONE; 10266 } 10267 } 10268 } 10269 10270 /* 10271 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 10272 * Note that any bug fixes made to this routine must be done in the 10273 * version found below. 10274 * 10275 * Called from within the procedure above to deal with unsatisfied 10276 * allocation dependencies in an inodeblock. The buffer must be 10277 * locked, thus, no I/O completion operations can occur while we 10278 * are manipulating its associated dependencies. 10279 */ 10280 static void 10281 initiate_write_inodeblock_ufs1(inodedep, bp) 10282 struct inodedep *inodedep; 10283 struct buf *bp; /* The inode block */ 10284 { 10285 struct allocdirect *adp, *lastadp; 10286 struct ufs1_dinode *dp; 10287 struct ufs1_dinode *sip; 10288 struct inoref *inoref; 10289 struct ufsmount *ump; 10290 struct fs *fs; 10291 ufs_lbn_t i; 10292 #ifdef INVARIANTS 10293 ufs_lbn_t prevlbn = 0; 10294 #endif 10295 int deplist; 10296 10297 if (inodedep->id_state & IOSTARTED) 10298 panic("initiate_write_inodeblock_ufs1: already started"); 10299 inodedep->id_state |= IOSTARTED; 10300 fs = inodedep->id_fs; 10301 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10302 LOCK_OWNED(ump); 10303 dp = (struct ufs1_dinode *)bp->b_data + 10304 ino_to_fsbo(fs, inodedep->id_ino); 10305 10306 /* 10307 * If we're on the unlinked list but have not yet written our 10308 * next pointer initialize it here. 10309 */ 10310 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10311 struct inodedep *inon; 10312 10313 inon = TAILQ_NEXT(inodedep, id_unlinked); 10314 dp->di_freelink = inon ? inon->id_ino : 0; 10315 } 10316 /* 10317 * If the bitmap is not yet written, then the allocated 10318 * inode cannot be written to disk. 10319 */ 10320 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10321 if (inodedep->id_savedino1 != NULL) 10322 panic("initiate_write_inodeblock_ufs1: I/O underway"); 10323 FREE_LOCK(ump); 10324 sip = malloc(sizeof(struct ufs1_dinode), 10325 M_SAVEDINO, M_SOFTDEP_FLAGS); 10326 ACQUIRE_LOCK(ump); 10327 inodedep->id_savedino1 = sip; 10328 *inodedep->id_savedino1 = *dp; 10329 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 10330 dp->di_gen = inodedep->id_savedino1->di_gen; 10331 dp->di_freelink = inodedep->id_savedino1->di_freelink; 10332 return; 10333 } 10334 /* 10335 * If no dependencies, then there is nothing to roll back. 10336 */ 10337 inodedep->id_savedsize = dp->di_size; 10338 inodedep->id_savedextsize = 0; 10339 inodedep->id_savednlink = dp->di_nlink; 10340 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10341 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10342 return; 10343 /* 10344 * Revert the link count to that of the first unwritten journal entry. 10345 */ 10346 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10347 if (inoref) 10348 dp->di_nlink = inoref->if_nlink; 10349 /* 10350 * Set the dependencies to busy. 10351 */ 10352 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10353 adp = TAILQ_NEXT(adp, ad_next)) { 10354 #ifdef INVARIANTS 10355 if (deplist != 0 && prevlbn >= adp->ad_offset) 10356 panic("softdep_write_inodeblock: lbn order"); 10357 prevlbn = adp->ad_offset; 10358 if (adp->ad_offset < UFS_NDADDR && 10359 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10360 panic("initiate_write_inodeblock_ufs1: " 10361 "direct pointer #%jd mismatch %d != %jd", 10362 (intmax_t)adp->ad_offset, 10363 dp->di_db[adp->ad_offset], 10364 (intmax_t)adp->ad_newblkno); 10365 if (adp->ad_offset >= UFS_NDADDR && 10366 dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno) 10367 panic("initiate_write_inodeblock_ufs1: " 10368 "indirect pointer #%jd mismatch %d != %jd", 10369 (intmax_t)adp->ad_offset - UFS_NDADDR, 10370 dp->di_ib[adp->ad_offset - UFS_NDADDR], 10371 (intmax_t)adp->ad_newblkno); 10372 deplist |= 1 << adp->ad_offset; 10373 if ((adp->ad_state & ATTACHED) == 0) 10374 panic("initiate_write_inodeblock_ufs1: " 10375 "Unknown state 0x%x", adp->ad_state); 10376 #endif /* INVARIANTS */ 10377 adp->ad_state &= ~ATTACHED; 10378 adp->ad_state |= UNDONE; 10379 } 10380 /* 10381 * The on-disk inode cannot claim to be any larger than the last 10382 * fragment that has been written. Otherwise, the on-disk inode 10383 * might have fragments that were not the last block in the file 10384 * which would corrupt the filesystem. 10385 */ 10386 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10387 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10388 if (adp->ad_offset >= UFS_NDADDR) 10389 break; 10390 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10391 /* keep going until hitting a rollback to a frag */ 10392 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10393 continue; 10394 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10395 for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) { 10396 #ifdef INVARIANTS 10397 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10398 panic("initiate_write_inodeblock_ufs1: " 10399 "lost dep1"); 10400 #endif /* INVARIANTS */ 10401 dp->di_db[i] = 0; 10402 } 10403 for (i = 0; i < UFS_NIADDR; i++) { 10404 #ifdef INVARIANTS 10405 if (dp->di_ib[i] != 0 && 10406 (deplist & ((1 << UFS_NDADDR) << i)) == 0) 10407 panic("initiate_write_inodeblock_ufs1: " 10408 "lost dep2"); 10409 #endif /* INVARIANTS */ 10410 dp->di_ib[i] = 0; 10411 } 10412 return; 10413 } 10414 /* 10415 * If we have zero'ed out the last allocated block of the file, 10416 * roll back the size to the last currently allocated block. 10417 * We know that this last allocated block is a full-sized as 10418 * we already checked for fragments in the loop above. 10419 */ 10420 if (lastadp != NULL && 10421 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10422 for (i = lastadp->ad_offset; i >= 0; i--) 10423 if (dp->di_db[i] != 0) 10424 break; 10425 dp->di_size = (i + 1) * fs->fs_bsize; 10426 } 10427 /* 10428 * The only dependencies are for indirect blocks. 10429 * 10430 * The file size for indirect block additions is not guaranteed. 10431 * Such a guarantee would be non-trivial to achieve. The conventional 10432 * synchronous write implementation also does not make this guarantee. 10433 * Fsck should catch and fix discrepancies. Arguably, the file size 10434 * can be over-estimated without destroying integrity when the file 10435 * moves into the indirect blocks (i.e., is large). If we want to 10436 * postpone fsck, we are stuck with this argument. 10437 */ 10438 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10439 dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0; 10440 } 10441 10442 /* 10443 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 10444 * Note that any bug fixes made to this routine must be done in the 10445 * version found above. 10446 * 10447 * Called from within the procedure above to deal with unsatisfied 10448 * allocation dependencies in an inodeblock. The buffer must be 10449 * locked, thus, no I/O completion operations can occur while we 10450 * are manipulating its associated dependencies. 10451 */ 10452 static void 10453 initiate_write_inodeblock_ufs2(inodedep, bp) 10454 struct inodedep *inodedep; 10455 struct buf *bp; /* The inode block */ 10456 { 10457 struct allocdirect *adp, *lastadp; 10458 struct ufs2_dinode *dp; 10459 struct ufs2_dinode *sip; 10460 struct inoref *inoref; 10461 struct ufsmount *ump; 10462 struct fs *fs; 10463 ufs_lbn_t i; 10464 #ifdef INVARIANTS 10465 ufs_lbn_t prevlbn = 0; 10466 #endif 10467 int deplist; 10468 10469 if (inodedep->id_state & IOSTARTED) 10470 panic("initiate_write_inodeblock_ufs2: already started"); 10471 inodedep->id_state |= IOSTARTED; 10472 fs = inodedep->id_fs; 10473 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10474 LOCK_OWNED(ump); 10475 dp = (struct ufs2_dinode *)bp->b_data + 10476 ino_to_fsbo(fs, inodedep->id_ino); 10477 10478 /* 10479 * If we're on the unlinked list but have not yet written our 10480 * next pointer initialize it here. 10481 */ 10482 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10483 struct inodedep *inon; 10484 10485 inon = TAILQ_NEXT(inodedep, id_unlinked); 10486 dp->di_freelink = inon ? inon->id_ino : 0; 10487 ffs_update_dinode_ckhash(fs, dp); 10488 } 10489 /* 10490 * If the bitmap is not yet written, then the allocated 10491 * inode cannot be written to disk. 10492 */ 10493 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10494 if (inodedep->id_savedino2 != NULL) 10495 panic("initiate_write_inodeblock_ufs2: I/O underway"); 10496 FREE_LOCK(ump); 10497 sip = malloc(sizeof(struct ufs2_dinode), 10498 M_SAVEDINO, M_SOFTDEP_FLAGS); 10499 ACQUIRE_LOCK(ump); 10500 inodedep->id_savedino2 = sip; 10501 *inodedep->id_savedino2 = *dp; 10502 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 10503 dp->di_gen = inodedep->id_savedino2->di_gen; 10504 dp->di_freelink = inodedep->id_savedino2->di_freelink; 10505 return; 10506 } 10507 /* 10508 * If no dependencies, then there is nothing to roll back. 10509 */ 10510 inodedep->id_savedsize = dp->di_size; 10511 inodedep->id_savedextsize = dp->di_extsize; 10512 inodedep->id_savednlink = dp->di_nlink; 10513 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10514 TAILQ_EMPTY(&inodedep->id_extupdt) && 10515 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10516 return; 10517 /* 10518 * Revert the link count to that of the first unwritten journal entry. 10519 */ 10520 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10521 if (inoref) 10522 dp->di_nlink = inoref->if_nlink; 10523 10524 /* 10525 * Set the ext data dependencies to busy. 10526 */ 10527 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10528 adp = TAILQ_NEXT(adp, ad_next)) { 10529 #ifdef INVARIANTS 10530 if (deplist != 0 && prevlbn >= adp->ad_offset) 10531 panic("initiate_write_inodeblock_ufs2: lbn order"); 10532 prevlbn = adp->ad_offset; 10533 if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno) 10534 panic("initiate_write_inodeblock_ufs2: " 10535 "ext pointer #%jd mismatch %jd != %jd", 10536 (intmax_t)adp->ad_offset, 10537 (intmax_t)dp->di_extb[adp->ad_offset], 10538 (intmax_t)adp->ad_newblkno); 10539 deplist |= 1 << adp->ad_offset; 10540 if ((adp->ad_state & ATTACHED) == 0) 10541 panic("initiate_write_inodeblock_ufs2: Unknown " 10542 "state 0x%x", adp->ad_state); 10543 #endif /* INVARIANTS */ 10544 adp->ad_state &= ~ATTACHED; 10545 adp->ad_state |= UNDONE; 10546 } 10547 /* 10548 * The on-disk inode cannot claim to be any larger than the last 10549 * fragment that has been written. Otherwise, the on-disk inode 10550 * might have fragments that were not the last block in the ext 10551 * data which would corrupt the filesystem. 10552 */ 10553 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10554 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10555 dp->di_extb[adp->ad_offset] = adp->ad_oldblkno; 10556 /* keep going until hitting a rollback to a frag */ 10557 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10558 continue; 10559 dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10560 for (i = adp->ad_offset + 1; i < UFS_NXADDR; i++) { 10561 #ifdef INVARIANTS 10562 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) 10563 panic("initiate_write_inodeblock_ufs2: " 10564 "lost dep1"); 10565 #endif /* INVARIANTS */ 10566 dp->di_extb[i] = 0; 10567 } 10568 lastadp = NULL; 10569 break; 10570 } 10571 /* 10572 * If we have zero'ed out the last allocated block of the ext 10573 * data, roll back the size to the last currently allocated block. 10574 * We know that this last allocated block is a full-sized as 10575 * we already checked for fragments in the loop above. 10576 */ 10577 if (lastadp != NULL && 10578 dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10579 for (i = lastadp->ad_offset; i >= 0; i--) 10580 if (dp->di_extb[i] != 0) 10581 break; 10582 dp->di_extsize = (i + 1) * fs->fs_bsize; 10583 } 10584 /* 10585 * Set the file data dependencies to busy. 10586 */ 10587 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10588 adp = TAILQ_NEXT(adp, ad_next)) { 10589 #ifdef INVARIANTS 10590 if (deplist != 0 && prevlbn >= adp->ad_offset) 10591 panic("softdep_write_inodeblock: lbn order"); 10592 if ((adp->ad_state & ATTACHED) == 0) 10593 panic("inodedep %p and adp %p not attached", inodedep, adp); 10594 prevlbn = adp->ad_offset; 10595 if (!ffs_fsfail_cleanup(ump, 0) && 10596 adp->ad_offset < UFS_NDADDR && 10597 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10598 panic("initiate_write_inodeblock_ufs2: " 10599 "direct pointer #%jd mismatch %jd != %jd", 10600 (intmax_t)adp->ad_offset, 10601 (intmax_t)dp->di_db[adp->ad_offset], 10602 (intmax_t)adp->ad_newblkno); 10603 if (!ffs_fsfail_cleanup(ump, 0) && 10604 adp->ad_offset >= UFS_NDADDR && 10605 dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno) 10606 panic("initiate_write_inodeblock_ufs2: " 10607 "indirect pointer #%jd mismatch %jd != %jd", 10608 (intmax_t)adp->ad_offset - UFS_NDADDR, 10609 (intmax_t)dp->di_ib[adp->ad_offset - UFS_NDADDR], 10610 (intmax_t)adp->ad_newblkno); 10611 deplist |= 1 << adp->ad_offset; 10612 if ((adp->ad_state & ATTACHED) == 0) 10613 panic("initiate_write_inodeblock_ufs2: Unknown " 10614 "state 0x%x", adp->ad_state); 10615 #endif /* INVARIANTS */ 10616 adp->ad_state &= ~ATTACHED; 10617 adp->ad_state |= UNDONE; 10618 } 10619 /* 10620 * The on-disk inode cannot claim to be any larger than the last 10621 * fragment that has been written. Otherwise, the on-disk inode 10622 * might have fragments that were not the last block in the file 10623 * which would corrupt the filesystem. 10624 */ 10625 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10626 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10627 if (adp->ad_offset >= UFS_NDADDR) 10628 break; 10629 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10630 /* keep going until hitting a rollback to a frag */ 10631 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10632 continue; 10633 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10634 for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) { 10635 #ifdef INVARIANTS 10636 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10637 panic("initiate_write_inodeblock_ufs2: " 10638 "lost dep2"); 10639 #endif /* INVARIANTS */ 10640 dp->di_db[i] = 0; 10641 } 10642 for (i = 0; i < UFS_NIADDR; i++) { 10643 #ifdef INVARIANTS 10644 if (dp->di_ib[i] != 0 && 10645 (deplist & ((1 << UFS_NDADDR) << i)) == 0) 10646 panic("initiate_write_inodeblock_ufs2: " 10647 "lost dep3"); 10648 #endif /* INVARIANTS */ 10649 dp->di_ib[i] = 0; 10650 } 10651 ffs_update_dinode_ckhash(fs, dp); 10652 return; 10653 } 10654 /* 10655 * If we have zero'ed out the last allocated block of the file, 10656 * roll back the size to the last currently allocated block. 10657 * We know that this last allocated block is a full-sized as 10658 * we already checked for fragments in the loop above. 10659 */ 10660 if (lastadp != NULL && 10661 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10662 for (i = lastadp->ad_offset; i >= 0; i--) 10663 if (dp->di_db[i] != 0) 10664 break; 10665 dp->di_size = (i + 1) * fs->fs_bsize; 10666 } 10667 /* 10668 * The only dependencies are for indirect blocks. 10669 * 10670 * The file size for indirect block additions is not guaranteed. 10671 * Such a guarantee would be non-trivial to achieve. The conventional 10672 * synchronous write implementation also does not make this guarantee. 10673 * Fsck should catch and fix discrepancies. Arguably, the file size 10674 * can be over-estimated without destroying integrity when the file 10675 * moves into the indirect blocks (i.e., is large). If we want to 10676 * postpone fsck, we are stuck with this argument. 10677 */ 10678 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10679 dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0; 10680 ffs_update_dinode_ckhash(fs, dp); 10681 } 10682 10683 /* 10684 * Cancel an indirdep as a result of truncation. Release all of the 10685 * children allocindirs and place their journal work on the appropriate 10686 * list. 10687 */ 10688 static void 10689 cancel_indirdep(indirdep, bp, freeblks) 10690 struct indirdep *indirdep; 10691 struct buf *bp; 10692 struct freeblks *freeblks; 10693 { 10694 struct allocindir *aip; 10695 10696 /* 10697 * None of the indirect pointers will ever be visible, 10698 * so they can simply be tossed. GOINGAWAY ensures 10699 * that allocated pointers will be saved in the buffer 10700 * cache until they are freed. Note that they will 10701 * only be able to be found by their physical address 10702 * since the inode mapping the logical address will 10703 * be gone. The save buffer used for the safe copy 10704 * was allocated in setup_allocindir_phase2 using 10705 * the physical address so it could be used for this 10706 * purpose. Hence we swap the safe copy with the real 10707 * copy, allowing the safe copy to be freed and holding 10708 * on to the real copy for later use in indir_trunc. 10709 */ 10710 if (indirdep->ir_state & GOINGAWAY) 10711 panic("cancel_indirdep: already gone"); 10712 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 10713 indirdep->ir_state |= DEPCOMPLETE; 10714 LIST_REMOVE(indirdep, ir_next); 10715 } 10716 indirdep->ir_state |= GOINGAWAY; 10717 /* 10718 * Pass in bp for blocks still have journal writes 10719 * pending so we can cancel them on their own. 10720 */ 10721 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL) 10722 cancel_allocindir(aip, bp, freeblks, 0); 10723 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) 10724 cancel_allocindir(aip, NULL, freeblks, 0); 10725 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) 10726 cancel_allocindir(aip, NULL, freeblks, 0); 10727 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) 10728 cancel_allocindir(aip, NULL, freeblks, 0); 10729 /* 10730 * If there are pending partial truncations we need to keep the 10731 * old block copy around until they complete. This is because 10732 * the current b_data is not a perfect superset of the available 10733 * blocks. 10734 */ 10735 if (TAILQ_EMPTY(&indirdep->ir_trunc)) 10736 bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount); 10737 else 10738 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10739 WORKLIST_REMOVE(&indirdep->ir_list); 10740 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list); 10741 indirdep->ir_bp = NULL; 10742 indirdep->ir_freeblks = freeblks; 10743 } 10744 10745 /* 10746 * Free an indirdep once it no longer has new pointers to track. 10747 */ 10748 static void 10749 free_indirdep(indirdep) 10750 struct indirdep *indirdep; 10751 { 10752 10753 KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc), 10754 ("free_indirdep: Indir trunc list not empty.")); 10755 KASSERT(LIST_EMPTY(&indirdep->ir_completehd), 10756 ("free_indirdep: Complete head not empty.")); 10757 KASSERT(LIST_EMPTY(&indirdep->ir_writehd), 10758 ("free_indirdep: write head not empty.")); 10759 KASSERT(LIST_EMPTY(&indirdep->ir_donehd), 10760 ("free_indirdep: done head not empty.")); 10761 KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd), 10762 ("free_indirdep: deplist head not empty.")); 10763 KASSERT((indirdep->ir_state & DEPCOMPLETE), 10764 ("free_indirdep: %p still on newblk list.", indirdep)); 10765 KASSERT(indirdep->ir_saveddata == NULL, 10766 ("free_indirdep: %p still has saved data.", indirdep)); 10767 KASSERT(indirdep->ir_savebp == NULL, 10768 ("free_indirdep: %p still has savebp buffer.", indirdep)); 10769 if (indirdep->ir_state & ONWORKLIST) 10770 WORKLIST_REMOVE(&indirdep->ir_list); 10771 WORKITEM_FREE(indirdep, D_INDIRDEP); 10772 } 10773 10774 /* 10775 * Called before a write to an indirdep. This routine is responsible for 10776 * rolling back pointers to a safe state which includes only those 10777 * allocindirs which have been completed. 10778 */ 10779 static void 10780 initiate_write_indirdep(indirdep, bp) 10781 struct indirdep *indirdep; 10782 struct buf *bp; 10783 { 10784 struct ufsmount *ump; 10785 10786 indirdep->ir_state |= IOSTARTED; 10787 if (indirdep->ir_state & GOINGAWAY) 10788 panic("disk_io_initiation: indirdep gone"); 10789 /* 10790 * If there are no remaining dependencies, this will be writing 10791 * the real pointers. 10792 */ 10793 if (LIST_EMPTY(&indirdep->ir_deplisthd) && 10794 TAILQ_EMPTY(&indirdep->ir_trunc)) 10795 return; 10796 /* 10797 * Replace up-to-date version with safe version. 10798 */ 10799 if (indirdep->ir_saveddata == NULL) { 10800 ump = VFSTOUFS(indirdep->ir_list.wk_mp); 10801 LOCK_OWNED(ump); 10802 FREE_LOCK(ump); 10803 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 10804 M_SOFTDEP_FLAGS); 10805 ACQUIRE_LOCK(ump); 10806 } 10807 indirdep->ir_state &= ~ATTACHED; 10808 indirdep->ir_state |= UNDONE; 10809 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10810 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 10811 bp->b_bcount); 10812 } 10813 10814 /* 10815 * Called when an inode has been cleared in a cg bitmap. This finally 10816 * eliminates any canceled jaddrefs 10817 */ 10818 void 10819 softdep_setup_inofree(mp, bp, ino, wkhd) 10820 struct mount *mp; 10821 struct buf *bp; 10822 ino_t ino; 10823 struct workhead *wkhd; 10824 { 10825 struct worklist *wk, *wkn; 10826 struct inodedep *inodedep; 10827 struct ufsmount *ump; 10828 uint8_t *inosused; 10829 struct cg *cgp; 10830 struct fs *fs; 10831 10832 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 10833 ("softdep_setup_inofree called on non-softdep filesystem")); 10834 ump = VFSTOUFS(mp); 10835 ACQUIRE_LOCK(ump); 10836 if (!ffs_fsfail_cleanup(ump, 0)) { 10837 fs = ump->um_fs; 10838 cgp = (struct cg *)bp->b_data; 10839 inosused = cg_inosused(cgp); 10840 if (isset(inosused, ino % fs->fs_ipg)) 10841 panic("softdep_setup_inofree: inode %ju not freed.", 10842 (uintmax_t)ino); 10843 } 10844 if (inodedep_lookup(mp, ino, 0, &inodedep)) 10845 panic("softdep_setup_inofree: ino %ju has existing inodedep %p", 10846 (uintmax_t)ino, inodedep); 10847 if (wkhd) { 10848 LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) { 10849 if (wk->wk_type != D_JADDREF) 10850 continue; 10851 WORKLIST_REMOVE(wk); 10852 /* 10853 * We can free immediately even if the jaddref 10854 * isn't attached in a background write as now 10855 * the bitmaps are reconciled. 10856 */ 10857 wk->wk_state |= COMPLETE | ATTACHED; 10858 free_jaddref(WK_JADDREF(wk)); 10859 } 10860 jwork_move(&bp->b_dep, wkhd); 10861 } 10862 FREE_LOCK(ump); 10863 } 10864 10865 /* 10866 * Called via ffs_blkfree() after a set of frags has been cleared from a cg 10867 * map. Any dependencies waiting for the write to clear are added to the 10868 * buf's list and any jnewblks that are being canceled are discarded 10869 * immediately. 10870 */ 10871 void 10872 softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 10873 struct mount *mp; 10874 struct buf *bp; 10875 ufs2_daddr_t blkno; 10876 int frags; 10877 struct workhead *wkhd; 10878 { 10879 struct bmsafemap *bmsafemap; 10880 struct jnewblk *jnewblk; 10881 struct ufsmount *ump; 10882 struct worklist *wk; 10883 struct fs *fs; 10884 #ifdef INVARIANTS 10885 uint8_t *blksfree; 10886 struct cg *cgp; 10887 ufs2_daddr_t jstart; 10888 ufs2_daddr_t jend; 10889 ufs2_daddr_t end; 10890 long bno; 10891 int i; 10892 #endif 10893 10894 CTR3(KTR_SUJ, 10895 "softdep_setup_blkfree: blkno %jd frags %d wk head %p", 10896 blkno, frags, wkhd); 10897 10898 ump = VFSTOUFS(mp); 10899 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 10900 ("softdep_setup_blkfree called on non-softdep filesystem")); 10901 ACQUIRE_LOCK(ump); 10902 /* Lookup the bmsafemap so we track when it is dirty. */ 10903 fs = ump->um_fs; 10904 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10905 /* 10906 * Detach any jnewblks which have been canceled. They must linger 10907 * until the bitmap is cleared again by ffs_blkfree() to prevent 10908 * an unjournaled allocation from hitting the disk. 10909 */ 10910 if (wkhd) { 10911 while ((wk = LIST_FIRST(wkhd)) != NULL) { 10912 CTR2(KTR_SUJ, 10913 "softdep_setup_blkfree: blkno %jd wk type %d", 10914 blkno, wk->wk_type); 10915 WORKLIST_REMOVE(wk); 10916 if (wk->wk_type != D_JNEWBLK) { 10917 WORKLIST_INSERT(&bmsafemap->sm_freehd, wk); 10918 continue; 10919 } 10920 jnewblk = WK_JNEWBLK(wk); 10921 KASSERT(jnewblk->jn_state & GOINGAWAY, 10922 ("softdep_setup_blkfree: jnewblk not canceled.")); 10923 #ifdef INVARIANTS 10924 /* 10925 * Assert that this block is free in the bitmap 10926 * before we discard the jnewblk. 10927 */ 10928 cgp = (struct cg *)bp->b_data; 10929 blksfree = cg_blksfree(cgp); 10930 bno = dtogd(fs, jnewblk->jn_blkno); 10931 for (i = jnewblk->jn_oldfrags; 10932 i < jnewblk->jn_frags; i++) { 10933 if (isset(blksfree, bno + i)) 10934 continue; 10935 panic("softdep_setup_blkfree: not free"); 10936 } 10937 #endif 10938 /* 10939 * Even if it's not attached we can free immediately 10940 * as the new bitmap is correct. 10941 */ 10942 wk->wk_state |= COMPLETE | ATTACHED; 10943 free_jnewblk(jnewblk); 10944 } 10945 } 10946 10947 #ifdef INVARIANTS 10948 /* 10949 * Assert that we are not freeing a block which has an outstanding 10950 * allocation dependency. 10951 */ 10952 fs = VFSTOUFS(mp)->um_fs; 10953 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10954 end = blkno + frags; 10955 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10956 /* 10957 * Don't match against blocks that will be freed when the 10958 * background write is done. 10959 */ 10960 if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) == 10961 (COMPLETE | DEPCOMPLETE)) 10962 continue; 10963 jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags; 10964 jend = jnewblk->jn_blkno + jnewblk->jn_frags; 10965 if ((blkno >= jstart && blkno < jend) || 10966 (end > jstart && end <= jend)) { 10967 printf("state 0x%X %jd - %d %d dep %p\n", 10968 jnewblk->jn_state, jnewblk->jn_blkno, 10969 jnewblk->jn_oldfrags, jnewblk->jn_frags, 10970 jnewblk->jn_dep); 10971 panic("softdep_setup_blkfree: " 10972 "%jd-%jd(%d) overlaps with %jd-%jd", 10973 blkno, end, frags, jstart, jend); 10974 } 10975 } 10976 #endif 10977 FREE_LOCK(ump); 10978 } 10979 10980 /* 10981 * Revert a block allocation when the journal record that describes it 10982 * is not yet written. 10983 */ 10984 static int 10985 jnewblk_rollback(jnewblk, fs, cgp, blksfree) 10986 struct jnewblk *jnewblk; 10987 struct fs *fs; 10988 struct cg *cgp; 10989 uint8_t *blksfree; 10990 { 10991 ufs1_daddr_t fragno; 10992 long cgbno, bbase; 10993 int frags, blk; 10994 int i; 10995 10996 frags = 0; 10997 cgbno = dtogd(fs, jnewblk->jn_blkno); 10998 /* 10999 * We have to test which frags need to be rolled back. We may 11000 * be operating on a stale copy when doing background writes. 11001 */ 11002 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) 11003 if (isclr(blksfree, cgbno + i)) 11004 frags++; 11005 if (frags == 0) 11006 return (0); 11007 /* 11008 * This is mostly ffs_blkfree() sans some validation and 11009 * superblock updates. 11010 */ 11011 if (frags == fs->fs_frag) { 11012 fragno = fragstoblks(fs, cgbno); 11013 ffs_setblock(fs, blksfree, fragno); 11014 ffs_clusteracct(fs, cgp, fragno, 1); 11015 cgp->cg_cs.cs_nbfree++; 11016 } else { 11017 cgbno += jnewblk->jn_oldfrags; 11018 bbase = cgbno - fragnum(fs, cgbno); 11019 /* Decrement the old frags. */ 11020 blk = blkmap(fs, blksfree, bbase); 11021 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 11022 /* Deallocate the fragment */ 11023 for (i = 0; i < frags; i++) 11024 setbit(blksfree, cgbno + i); 11025 cgp->cg_cs.cs_nffree += frags; 11026 /* Add back in counts associated with the new frags */ 11027 blk = blkmap(fs, blksfree, bbase); 11028 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 11029 /* If a complete block has been reassembled, account for it. */ 11030 fragno = fragstoblks(fs, bbase); 11031 if (ffs_isblock(fs, blksfree, fragno)) { 11032 cgp->cg_cs.cs_nffree -= fs->fs_frag; 11033 ffs_clusteracct(fs, cgp, fragno, 1); 11034 cgp->cg_cs.cs_nbfree++; 11035 } 11036 } 11037 stat_jnewblk++; 11038 jnewblk->jn_state &= ~ATTACHED; 11039 jnewblk->jn_state |= UNDONE; 11040 11041 return (frags); 11042 } 11043 11044 static void 11045 initiate_write_bmsafemap(bmsafemap, bp) 11046 struct bmsafemap *bmsafemap; 11047 struct buf *bp; /* The cg block. */ 11048 { 11049 struct jaddref *jaddref; 11050 struct jnewblk *jnewblk; 11051 uint8_t *inosused; 11052 uint8_t *blksfree; 11053 struct cg *cgp; 11054 struct fs *fs; 11055 ino_t ino; 11056 11057 /* 11058 * If this is a background write, we did this at the time that 11059 * the copy was made, so do not need to do it again. 11060 */ 11061 if (bmsafemap->sm_state & IOSTARTED) 11062 return; 11063 bmsafemap->sm_state |= IOSTARTED; 11064 /* 11065 * Clear any inode allocations which are pending journal writes. 11066 */ 11067 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) { 11068 cgp = (struct cg *)bp->b_data; 11069 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11070 inosused = cg_inosused(cgp); 11071 LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) { 11072 ino = jaddref->ja_ino % fs->fs_ipg; 11073 if (isset(inosused, ino)) { 11074 if ((jaddref->ja_mode & IFMT) == IFDIR) 11075 cgp->cg_cs.cs_ndir--; 11076 cgp->cg_cs.cs_nifree++; 11077 clrbit(inosused, ino); 11078 jaddref->ja_state &= ~ATTACHED; 11079 jaddref->ja_state |= UNDONE; 11080 stat_jaddref++; 11081 } else 11082 panic("initiate_write_bmsafemap: inode %ju " 11083 "marked free", (uintmax_t)jaddref->ja_ino); 11084 } 11085 } 11086 /* 11087 * Clear any block allocations which are pending journal writes. 11088 */ 11089 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 11090 cgp = (struct cg *)bp->b_data; 11091 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11092 blksfree = cg_blksfree(cgp); 11093 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 11094 if (jnewblk_rollback(jnewblk, fs, cgp, blksfree)) 11095 continue; 11096 panic("initiate_write_bmsafemap: block %jd " 11097 "marked free", jnewblk->jn_blkno); 11098 } 11099 } 11100 /* 11101 * Move allocation lists to the written lists so they can be 11102 * cleared once the block write is complete. 11103 */ 11104 LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr, 11105 inodedep, id_deps); 11106 LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, 11107 newblk, nb_deps); 11108 LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, 11109 wk_list); 11110 } 11111 11112 void 11113 softdep_handle_error(struct buf *bp) 11114 { 11115 struct ufsmount *ump; 11116 11117 ump = softdep_bp_to_mp(bp); 11118 if (ump == NULL) 11119 return; 11120 11121 if (ffs_fsfail_cleanup(ump, bp->b_error)) { 11122 /* 11123 * No future writes will succeed, so the on-disk image is safe. 11124 * Pretend that this write succeeded so that the softdep state 11125 * will be cleaned up naturally. 11126 */ 11127 bp->b_ioflags &= ~BIO_ERROR; 11128 bp->b_error = 0; 11129 } 11130 } 11131 11132 /* 11133 * This routine is called during the completion interrupt 11134 * service routine for a disk write (from the procedure called 11135 * by the device driver to inform the filesystem caches of 11136 * a request completion). It should be called early in this 11137 * procedure, before the block is made available to other 11138 * processes or other routines are called. 11139 * 11140 */ 11141 static void 11142 softdep_disk_write_complete(bp) 11143 struct buf *bp; /* describes the completed disk write */ 11144 { 11145 struct worklist *wk; 11146 struct worklist *owk; 11147 struct ufsmount *ump; 11148 struct workhead reattach; 11149 struct freeblks *freeblks; 11150 struct buf *sbp; 11151 11152 ump = softdep_bp_to_mp(bp); 11153 KASSERT(LIST_EMPTY(&bp->b_dep) || ump != NULL, 11154 ("softdep_disk_write_complete: softdep_bp_to_mp returned NULL " 11155 "with outstanding dependencies for buffer %p", bp)); 11156 if (ump == NULL) 11157 return; 11158 if ((bp->b_ioflags & BIO_ERROR) != 0) 11159 softdep_handle_error(bp); 11160 /* 11161 * If an error occurred while doing the write, then the data 11162 * has not hit the disk and the dependencies cannot be processed. 11163 * But we do have to go through and roll forward any dependencies 11164 * that were rolled back before the disk write. 11165 */ 11166 sbp = NULL; 11167 ACQUIRE_LOCK(ump); 11168 if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) { 11169 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 11170 switch (wk->wk_type) { 11171 11172 case D_PAGEDEP: 11173 handle_written_filepage(WK_PAGEDEP(wk), bp, 0); 11174 continue; 11175 11176 case D_INODEDEP: 11177 handle_written_inodeblock(WK_INODEDEP(wk), 11178 bp, 0); 11179 continue; 11180 11181 case D_BMSAFEMAP: 11182 handle_written_bmsafemap(WK_BMSAFEMAP(wk), 11183 bp, 0); 11184 continue; 11185 11186 case D_INDIRDEP: 11187 handle_written_indirdep(WK_INDIRDEP(wk), 11188 bp, &sbp, 0); 11189 continue; 11190 default: 11191 /* nothing to roll forward */ 11192 continue; 11193 } 11194 } 11195 FREE_LOCK(ump); 11196 if (sbp) 11197 brelse(sbp); 11198 return; 11199 } 11200 LIST_INIT(&reattach); 11201 11202 /* 11203 * Ump SU lock must not be released anywhere in this code segment. 11204 */ 11205 owk = NULL; 11206 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 11207 WORKLIST_REMOVE(wk); 11208 atomic_add_long(&dep_write[wk->wk_type], 1); 11209 if (wk == owk) 11210 panic("duplicate worklist: %p\n", wk); 11211 owk = wk; 11212 switch (wk->wk_type) { 11213 11214 case D_PAGEDEP: 11215 if (handle_written_filepage(WK_PAGEDEP(wk), bp, 11216 WRITESUCCEEDED)) 11217 WORKLIST_INSERT(&reattach, wk); 11218 continue; 11219 11220 case D_INODEDEP: 11221 if (handle_written_inodeblock(WK_INODEDEP(wk), bp, 11222 WRITESUCCEEDED)) 11223 WORKLIST_INSERT(&reattach, wk); 11224 continue; 11225 11226 case D_BMSAFEMAP: 11227 if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp, 11228 WRITESUCCEEDED)) 11229 WORKLIST_INSERT(&reattach, wk); 11230 continue; 11231 11232 case D_MKDIR: 11233 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 11234 continue; 11235 11236 case D_ALLOCDIRECT: 11237 wk->wk_state |= COMPLETE; 11238 handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL); 11239 continue; 11240 11241 case D_ALLOCINDIR: 11242 wk->wk_state |= COMPLETE; 11243 handle_allocindir_partdone(WK_ALLOCINDIR(wk)); 11244 continue; 11245 11246 case D_INDIRDEP: 11247 if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp, 11248 WRITESUCCEEDED)) 11249 WORKLIST_INSERT(&reattach, wk); 11250 continue; 11251 11252 case D_FREEBLKS: 11253 wk->wk_state |= COMPLETE; 11254 freeblks = WK_FREEBLKS(wk); 11255 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE && 11256 LIST_EMPTY(&freeblks->fb_jblkdephd)) 11257 add_to_worklist(wk, WK_NODELAY); 11258 continue; 11259 11260 case D_FREEWORK: 11261 handle_written_freework(WK_FREEWORK(wk)); 11262 break; 11263 11264 case D_JSEGDEP: 11265 free_jsegdep(WK_JSEGDEP(wk)); 11266 continue; 11267 11268 case D_JSEG: 11269 handle_written_jseg(WK_JSEG(wk), bp); 11270 continue; 11271 11272 case D_SBDEP: 11273 if (handle_written_sbdep(WK_SBDEP(wk), bp)) 11274 WORKLIST_INSERT(&reattach, wk); 11275 continue; 11276 11277 case D_FREEDEP: 11278 free_freedep(WK_FREEDEP(wk)); 11279 continue; 11280 11281 default: 11282 panic("handle_disk_write_complete: Unknown type %s", 11283 TYPENAME(wk->wk_type)); 11284 /* NOTREACHED */ 11285 } 11286 } 11287 /* 11288 * Reattach any requests that must be redone. 11289 */ 11290 while ((wk = LIST_FIRST(&reattach)) != NULL) { 11291 WORKLIST_REMOVE(wk); 11292 WORKLIST_INSERT(&bp->b_dep, wk); 11293 } 11294 FREE_LOCK(ump); 11295 if (sbp) 11296 brelse(sbp); 11297 } 11298 11299 /* 11300 * Called from within softdep_disk_write_complete above. 11301 */ 11302 static void 11303 handle_allocdirect_partdone(adp, wkhd) 11304 struct allocdirect *adp; /* the completed allocdirect */ 11305 struct workhead *wkhd; /* Work to do when inode is writtne. */ 11306 { 11307 struct allocdirectlst *listhead; 11308 struct allocdirect *listadp; 11309 struct inodedep *inodedep; 11310 long bsize; 11311 11312 LOCK_OWNED(VFSTOUFS(adp->ad_block.nb_list.wk_mp)); 11313 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 11314 return; 11315 /* 11316 * The on-disk inode cannot claim to be any larger than the last 11317 * fragment that has been written. Otherwise, the on-disk inode 11318 * might have fragments that were not the last block in the file 11319 * which would corrupt the filesystem. Thus, we cannot free any 11320 * allocdirects after one whose ad_oldblkno claims a fragment as 11321 * these blocks must be rolled back to zero before writing the inode. 11322 * We check the currently active set of allocdirects in id_inoupdt 11323 * or id_extupdt as appropriate. 11324 */ 11325 inodedep = adp->ad_inodedep; 11326 bsize = inodedep->id_fs->fs_bsize; 11327 if (adp->ad_state & EXTDATA) 11328 listhead = &inodedep->id_extupdt; 11329 else 11330 listhead = &inodedep->id_inoupdt; 11331 TAILQ_FOREACH(listadp, listhead, ad_next) { 11332 /* found our block */ 11333 if (listadp == adp) 11334 break; 11335 /* continue if ad_oldlbn is not a fragment */ 11336 if (listadp->ad_oldsize == 0 || 11337 listadp->ad_oldsize == bsize) 11338 continue; 11339 /* hit a fragment */ 11340 return; 11341 } 11342 /* 11343 * If we have reached the end of the current list without 11344 * finding the just finished dependency, then it must be 11345 * on the future dependency list. Future dependencies cannot 11346 * be freed until they are moved to the current list. 11347 */ 11348 if (listadp == NULL) { 11349 #ifdef INVARIANTS 11350 if (adp->ad_state & EXTDATA) 11351 listhead = &inodedep->id_newextupdt; 11352 else 11353 listhead = &inodedep->id_newinoupdt; 11354 TAILQ_FOREACH(listadp, listhead, ad_next) 11355 /* found our block */ 11356 if (listadp == adp) 11357 break; 11358 if (listadp == NULL) 11359 panic("handle_allocdirect_partdone: lost dep"); 11360 #endif /* INVARIANTS */ 11361 return; 11362 } 11363 /* 11364 * If we have found the just finished dependency, then queue 11365 * it along with anything that follows it that is complete. 11366 * Since the pointer has not yet been written in the inode 11367 * as the dependency prevents it, place the allocdirect on the 11368 * bufwait list where it will be freed once the pointer is 11369 * valid. 11370 */ 11371 if (wkhd == NULL) 11372 wkhd = &inodedep->id_bufwait; 11373 for (; adp; adp = listadp) { 11374 listadp = TAILQ_NEXT(adp, ad_next); 11375 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 11376 return; 11377 TAILQ_REMOVE(listhead, adp, ad_next); 11378 WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list); 11379 } 11380 } 11381 11382 /* 11383 * Called from within softdep_disk_write_complete above. This routine 11384 * completes successfully written allocindirs. 11385 */ 11386 static void 11387 handle_allocindir_partdone(aip) 11388 struct allocindir *aip; /* the completed allocindir */ 11389 { 11390 struct indirdep *indirdep; 11391 11392 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 11393 return; 11394 indirdep = aip->ai_indirdep; 11395 LIST_REMOVE(aip, ai_next); 11396 /* 11397 * Don't set a pointer while the buffer is undergoing IO or while 11398 * we have active truncations. 11399 */ 11400 if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) { 11401 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 11402 return; 11403 } 11404 if (indirdep->ir_state & UFS1FMT) 11405 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11406 aip->ai_newblkno; 11407 else 11408 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11409 aip->ai_newblkno; 11410 /* 11411 * Await the pointer write before freeing the allocindir. 11412 */ 11413 LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next); 11414 } 11415 11416 /* 11417 * Release segments held on a jwork list. 11418 */ 11419 static void 11420 handle_jwork(wkhd) 11421 struct workhead *wkhd; 11422 { 11423 struct worklist *wk; 11424 11425 while ((wk = LIST_FIRST(wkhd)) != NULL) { 11426 WORKLIST_REMOVE(wk); 11427 switch (wk->wk_type) { 11428 case D_JSEGDEP: 11429 free_jsegdep(WK_JSEGDEP(wk)); 11430 continue; 11431 case D_FREEDEP: 11432 free_freedep(WK_FREEDEP(wk)); 11433 continue; 11434 case D_FREEFRAG: 11435 rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep)); 11436 WORKITEM_FREE(wk, D_FREEFRAG); 11437 continue; 11438 case D_FREEWORK: 11439 handle_written_freework(WK_FREEWORK(wk)); 11440 continue; 11441 default: 11442 panic("handle_jwork: Unknown type %s\n", 11443 TYPENAME(wk->wk_type)); 11444 } 11445 } 11446 } 11447 11448 /* 11449 * Handle the bufwait list on an inode when it is safe to release items 11450 * held there. This normally happens after an inode block is written but 11451 * may be delayed and handled later if there are pending journal items that 11452 * are not yet safe to be released. 11453 */ 11454 static struct freefile * 11455 handle_bufwait(inodedep, refhd) 11456 struct inodedep *inodedep; 11457 struct workhead *refhd; 11458 { 11459 struct jaddref *jaddref; 11460 struct freefile *freefile; 11461 struct worklist *wk; 11462 11463 freefile = NULL; 11464 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 11465 WORKLIST_REMOVE(wk); 11466 switch (wk->wk_type) { 11467 case D_FREEFILE: 11468 /* 11469 * We defer adding freefile to the worklist 11470 * until all other additions have been made to 11471 * ensure that it will be done after all the 11472 * old blocks have been freed. 11473 */ 11474 if (freefile != NULL) 11475 panic("handle_bufwait: freefile"); 11476 freefile = WK_FREEFILE(wk); 11477 continue; 11478 11479 case D_MKDIR: 11480 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 11481 continue; 11482 11483 case D_DIRADD: 11484 diradd_inode_written(WK_DIRADD(wk), inodedep); 11485 continue; 11486 11487 case D_FREEFRAG: 11488 wk->wk_state |= COMPLETE; 11489 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE) 11490 add_to_worklist(wk, 0); 11491 continue; 11492 11493 case D_DIRREM: 11494 wk->wk_state |= COMPLETE; 11495 add_to_worklist(wk, 0); 11496 continue; 11497 11498 case D_ALLOCDIRECT: 11499 case D_ALLOCINDIR: 11500 free_newblk(WK_NEWBLK(wk)); 11501 continue; 11502 11503 case D_JNEWBLK: 11504 wk->wk_state |= COMPLETE; 11505 free_jnewblk(WK_JNEWBLK(wk)); 11506 continue; 11507 11508 /* 11509 * Save freed journal segments and add references on 11510 * the supplied list which will delay their release 11511 * until the cg bitmap is cleared on disk. 11512 */ 11513 case D_JSEGDEP: 11514 if (refhd == NULL) 11515 free_jsegdep(WK_JSEGDEP(wk)); 11516 else 11517 WORKLIST_INSERT(refhd, wk); 11518 continue; 11519 11520 case D_JADDREF: 11521 jaddref = WK_JADDREF(wk); 11522 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 11523 if_deps); 11524 /* 11525 * Transfer any jaddrefs to the list to be freed with 11526 * the bitmap if we're handling a removed file. 11527 */ 11528 if (refhd == NULL) { 11529 wk->wk_state |= COMPLETE; 11530 free_jaddref(jaddref); 11531 } else 11532 WORKLIST_INSERT(refhd, wk); 11533 continue; 11534 11535 default: 11536 panic("handle_bufwait: Unknown type %p(%s)", 11537 wk, TYPENAME(wk->wk_type)); 11538 /* NOTREACHED */ 11539 } 11540 } 11541 return (freefile); 11542 } 11543 /* 11544 * Called from within softdep_disk_write_complete above to restore 11545 * in-memory inode block contents to their most up-to-date state. Note 11546 * that this routine is always called from interrupt level with further 11547 * interrupts from this device blocked. 11548 * 11549 * If the write did not succeed, we will do all the roll-forward 11550 * operations, but we will not take the actions that will allow its 11551 * dependencies to be processed. 11552 */ 11553 static int 11554 handle_written_inodeblock(inodedep, bp, flags) 11555 struct inodedep *inodedep; 11556 struct buf *bp; /* buffer containing the inode block */ 11557 int flags; 11558 { 11559 struct freefile *freefile; 11560 struct allocdirect *adp, *nextadp; 11561 struct ufs1_dinode *dp1 = NULL; 11562 struct ufs2_dinode *dp2 = NULL; 11563 struct workhead wkhd; 11564 int hadchanges, fstype; 11565 ino_t freelink; 11566 11567 LIST_INIT(&wkhd); 11568 hadchanges = 0; 11569 freefile = NULL; 11570 if ((inodedep->id_state & IOSTARTED) == 0) 11571 panic("handle_written_inodeblock: not started"); 11572 inodedep->id_state &= ~IOSTARTED; 11573 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 11574 fstype = UFS1; 11575 dp1 = (struct ufs1_dinode *)bp->b_data + 11576 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11577 freelink = dp1->di_freelink; 11578 } else { 11579 fstype = UFS2; 11580 dp2 = (struct ufs2_dinode *)bp->b_data + 11581 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11582 freelink = dp2->di_freelink; 11583 } 11584 /* 11585 * Leave this inodeblock dirty until it's in the list. 11586 */ 11587 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED && 11588 (flags & WRITESUCCEEDED)) { 11589 struct inodedep *inon; 11590 11591 inon = TAILQ_NEXT(inodedep, id_unlinked); 11592 if ((inon == NULL && freelink == 0) || 11593 (inon && inon->id_ino == freelink)) { 11594 if (inon) 11595 inon->id_state |= UNLINKPREV; 11596 inodedep->id_state |= UNLINKNEXT; 11597 } 11598 hadchanges = 1; 11599 } 11600 /* 11601 * If we had to rollback the inode allocation because of 11602 * bitmaps being incomplete, then simply restore it. 11603 * Keep the block dirty so that it will not be reclaimed until 11604 * all associated dependencies have been cleared and the 11605 * corresponding updates written to disk. 11606 */ 11607 if (inodedep->id_savedino1 != NULL) { 11608 hadchanges = 1; 11609 if (fstype == UFS1) 11610 *dp1 = *inodedep->id_savedino1; 11611 else 11612 *dp2 = *inodedep->id_savedino2; 11613 free(inodedep->id_savedino1, M_SAVEDINO); 11614 inodedep->id_savedino1 = NULL; 11615 if ((bp->b_flags & B_DELWRI) == 0) 11616 stat_inode_bitmap++; 11617 bdirty(bp); 11618 /* 11619 * If the inode is clear here and GOINGAWAY it will never 11620 * be written. Process the bufwait and clear any pending 11621 * work which may include the freefile. 11622 */ 11623 if (inodedep->id_state & GOINGAWAY) 11624 goto bufwait; 11625 return (1); 11626 } 11627 if (flags & WRITESUCCEEDED) 11628 inodedep->id_state |= COMPLETE; 11629 /* 11630 * Roll forward anything that had to be rolled back before 11631 * the inode could be updated. 11632 */ 11633 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 11634 nextadp = TAILQ_NEXT(adp, ad_next); 11635 if (adp->ad_state & ATTACHED) 11636 panic("handle_written_inodeblock: new entry"); 11637 if (fstype == UFS1) { 11638 if (adp->ad_offset < UFS_NDADDR) { 11639 if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11640 panic("%s %s #%jd mismatch %d != %jd", 11641 "handle_written_inodeblock:", 11642 "direct pointer", 11643 (intmax_t)adp->ad_offset, 11644 dp1->di_db[adp->ad_offset], 11645 (intmax_t)adp->ad_oldblkno); 11646 dp1->di_db[adp->ad_offset] = adp->ad_newblkno; 11647 } else { 11648 if (dp1->di_ib[adp->ad_offset - UFS_NDADDR] != 11649 0) 11650 panic("%s: %s #%jd allocated as %d", 11651 "handle_written_inodeblock", 11652 "indirect pointer", 11653 (intmax_t)adp->ad_offset - 11654 UFS_NDADDR, 11655 dp1->di_ib[adp->ad_offset - 11656 UFS_NDADDR]); 11657 dp1->di_ib[adp->ad_offset - UFS_NDADDR] = 11658 adp->ad_newblkno; 11659 } 11660 } else { 11661 if (adp->ad_offset < UFS_NDADDR) { 11662 if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11663 panic("%s: %s #%jd %s %jd != %jd", 11664 "handle_written_inodeblock", 11665 "direct pointer", 11666 (intmax_t)adp->ad_offset, "mismatch", 11667 (intmax_t)dp2->di_db[adp->ad_offset], 11668 (intmax_t)adp->ad_oldblkno); 11669 dp2->di_db[adp->ad_offset] = adp->ad_newblkno; 11670 } else { 11671 if (dp2->di_ib[adp->ad_offset - UFS_NDADDR] != 11672 0) 11673 panic("%s: %s #%jd allocated as %jd", 11674 "handle_written_inodeblock", 11675 "indirect pointer", 11676 (intmax_t)adp->ad_offset - 11677 UFS_NDADDR, 11678 (intmax_t) 11679 dp2->di_ib[adp->ad_offset - 11680 UFS_NDADDR]); 11681 dp2->di_ib[adp->ad_offset - UFS_NDADDR] = 11682 adp->ad_newblkno; 11683 } 11684 } 11685 adp->ad_state &= ~UNDONE; 11686 adp->ad_state |= ATTACHED; 11687 hadchanges = 1; 11688 } 11689 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 11690 nextadp = TAILQ_NEXT(adp, ad_next); 11691 if (adp->ad_state & ATTACHED) 11692 panic("handle_written_inodeblock: new entry"); 11693 if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno) 11694 panic("%s: direct pointers #%jd %s %jd != %jd", 11695 "handle_written_inodeblock", 11696 (intmax_t)adp->ad_offset, "mismatch", 11697 (intmax_t)dp2->di_extb[adp->ad_offset], 11698 (intmax_t)adp->ad_oldblkno); 11699 dp2->di_extb[adp->ad_offset] = adp->ad_newblkno; 11700 adp->ad_state &= ~UNDONE; 11701 adp->ad_state |= ATTACHED; 11702 hadchanges = 1; 11703 } 11704 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 11705 stat_direct_blk_ptrs++; 11706 /* 11707 * Reset the file size to its most up-to-date value. 11708 */ 11709 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) 11710 panic("handle_written_inodeblock: bad size"); 11711 if (inodedep->id_savednlink > UFS_LINK_MAX) 11712 panic("handle_written_inodeblock: Invalid link count " 11713 "%jd for inodedep %p", (uintmax_t)inodedep->id_savednlink, 11714 inodedep); 11715 if (fstype == UFS1) { 11716 if (dp1->di_nlink != inodedep->id_savednlink) { 11717 dp1->di_nlink = inodedep->id_savednlink; 11718 hadchanges = 1; 11719 } 11720 if (dp1->di_size != inodedep->id_savedsize) { 11721 dp1->di_size = inodedep->id_savedsize; 11722 hadchanges = 1; 11723 } 11724 } else { 11725 if (dp2->di_nlink != inodedep->id_savednlink) { 11726 dp2->di_nlink = inodedep->id_savednlink; 11727 hadchanges = 1; 11728 } 11729 if (dp2->di_size != inodedep->id_savedsize) { 11730 dp2->di_size = inodedep->id_savedsize; 11731 hadchanges = 1; 11732 } 11733 if (dp2->di_extsize != inodedep->id_savedextsize) { 11734 dp2->di_extsize = inodedep->id_savedextsize; 11735 hadchanges = 1; 11736 } 11737 } 11738 inodedep->id_savedsize = -1; 11739 inodedep->id_savedextsize = -1; 11740 inodedep->id_savednlink = -1; 11741 /* 11742 * If there were any rollbacks in the inode block, then it must be 11743 * marked dirty so that its will eventually get written back in 11744 * its correct form. 11745 */ 11746 if (hadchanges) { 11747 if (fstype == UFS2) 11748 ffs_update_dinode_ckhash(inodedep->id_fs, dp2); 11749 bdirty(bp); 11750 } 11751 bufwait: 11752 /* 11753 * If the write did not succeed, we have done all the roll-forward 11754 * operations, but we cannot take the actions that will allow its 11755 * dependencies to be processed. 11756 */ 11757 if ((flags & WRITESUCCEEDED) == 0) 11758 return (hadchanges); 11759 /* 11760 * Process any allocdirects that completed during the update. 11761 */ 11762 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 11763 handle_allocdirect_partdone(adp, &wkhd); 11764 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 11765 handle_allocdirect_partdone(adp, &wkhd); 11766 /* 11767 * Process deallocations that were held pending until the 11768 * inode had been written to disk. Freeing of the inode 11769 * is delayed until after all blocks have been freed to 11770 * avoid creation of new <vfsid, inum, lbn> triples 11771 * before the old ones have been deleted. Completely 11772 * unlinked inodes are not processed until the unlinked 11773 * inode list is written or the last reference is removed. 11774 */ 11775 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) { 11776 freefile = handle_bufwait(inodedep, NULL); 11777 if (freefile && !LIST_EMPTY(&wkhd)) { 11778 WORKLIST_INSERT(&wkhd, &freefile->fx_list); 11779 freefile = NULL; 11780 } 11781 } 11782 /* 11783 * Move rolled forward dependency completions to the bufwait list 11784 * now that those that were already written have been processed. 11785 */ 11786 if (!LIST_EMPTY(&wkhd) && hadchanges == 0) 11787 panic("handle_written_inodeblock: bufwait but no changes"); 11788 jwork_move(&inodedep->id_bufwait, &wkhd); 11789 11790 if (freefile != NULL) { 11791 /* 11792 * If the inode is goingaway it was never written. Fake up 11793 * the state here so free_inodedep() can succeed. 11794 */ 11795 if (inodedep->id_state & GOINGAWAY) 11796 inodedep->id_state |= COMPLETE | DEPCOMPLETE; 11797 if (free_inodedep(inodedep) == 0) 11798 panic("handle_written_inodeblock: live inodedep %p", 11799 inodedep); 11800 add_to_worklist(&freefile->fx_list, 0); 11801 return (0); 11802 } 11803 11804 /* 11805 * If no outstanding dependencies, free it. 11806 */ 11807 if (free_inodedep(inodedep) || 11808 (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 && 11809 TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 11810 TAILQ_FIRST(&inodedep->id_extupdt) == 0 && 11811 LIST_FIRST(&inodedep->id_bufwait) == 0)) 11812 return (0); 11813 return (hadchanges); 11814 } 11815 11816 /* 11817 * Perform needed roll-forwards and kick off any dependencies that 11818 * can now be processed. 11819 * 11820 * If the write did not succeed, we will do all the roll-forward 11821 * operations, but we will not take the actions that will allow its 11822 * dependencies to be processed. 11823 */ 11824 static int 11825 handle_written_indirdep(indirdep, bp, bpp, flags) 11826 struct indirdep *indirdep; 11827 struct buf *bp; 11828 struct buf **bpp; 11829 int flags; 11830 { 11831 struct allocindir *aip; 11832 struct buf *sbp; 11833 int chgs; 11834 11835 if (indirdep->ir_state & GOINGAWAY) 11836 panic("handle_written_indirdep: indirdep gone"); 11837 if ((indirdep->ir_state & IOSTARTED) == 0) 11838 panic("handle_written_indirdep: IO not started"); 11839 chgs = 0; 11840 /* 11841 * If there were rollbacks revert them here. 11842 */ 11843 if (indirdep->ir_saveddata) { 11844 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 11845 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11846 free(indirdep->ir_saveddata, M_INDIRDEP); 11847 indirdep->ir_saveddata = NULL; 11848 } 11849 chgs = 1; 11850 } 11851 indirdep->ir_state &= ~(UNDONE | IOSTARTED); 11852 indirdep->ir_state |= ATTACHED; 11853 /* 11854 * If the write did not succeed, we have done all the roll-forward 11855 * operations, but we cannot take the actions that will allow its 11856 * dependencies to be processed. 11857 */ 11858 if ((flags & WRITESUCCEEDED) == 0) { 11859 stat_indir_blk_ptrs++; 11860 bdirty(bp); 11861 return (1); 11862 } 11863 /* 11864 * Move allocindirs with written pointers to the completehd if 11865 * the indirdep's pointer is not yet written. Otherwise 11866 * free them here. 11867 */ 11868 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) { 11869 LIST_REMOVE(aip, ai_next); 11870 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 11871 LIST_INSERT_HEAD(&indirdep->ir_completehd, aip, 11872 ai_next); 11873 newblk_freefrag(&aip->ai_block); 11874 continue; 11875 } 11876 free_newblk(&aip->ai_block); 11877 } 11878 /* 11879 * Move allocindirs that have finished dependency processing from 11880 * the done list to the write list after updating the pointers. 11881 */ 11882 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11883 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) { 11884 handle_allocindir_partdone(aip); 11885 if (aip == LIST_FIRST(&indirdep->ir_donehd)) 11886 panic("disk_write_complete: not gone"); 11887 chgs = 1; 11888 } 11889 } 11890 /* 11891 * Preserve the indirdep if there were any changes or if it is not 11892 * yet valid on disk. 11893 */ 11894 if (chgs) { 11895 stat_indir_blk_ptrs++; 11896 bdirty(bp); 11897 return (1); 11898 } 11899 /* 11900 * If there were no changes we can discard the savedbp and detach 11901 * ourselves from the buf. We are only carrying completed pointers 11902 * in this case. 11903 */ 11904 sbp = indirdep->ir_savebp; 11905 sbp->b_flags |= B_INVAL | B_NOCACHE; 11906 indirdep->ir_savebp = NULL; 11907 indirdep->ir_bp = NULL; 11908 if (*bpp != NULL) 11909 panic("handle_written_indirdep: bp already exists."); 11910 *bpp = sbp; 11911 /* 11912 * The indirdep may not be freed until its parent points at it. 11913 */ 11914 if (indirdep->ir_state & DEPCOMPLETE) 11915 free_indirdep(indirdep); 11916 11917 return (0); 11918 } 11919 11920 /* 11921 * Process a diradd entry after its dependent inode has been written. 11922 */ 11923 static void 11924 diradd_inode_written(dap, inodedep) 11925 struct diradd *dap; 11926 struct inodedep *inodedep; 11927 { 11928 11929 LOCK_OWNED(VFSTOUFS(dap->da_list.wk_mp)); 11930 dap->da_state |= COMPLETE; 11931 complete_diradd(dap); 11932 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 11933 } 11934 11935 /* 11936 * Returns true if the bmsafemap will have rollbacks when written. Must only 11937 * be called with the per-filesystem lock and the buf lock on the cg held. 11938 */ 11939 static int 11940 bmsafemap_backgroundwrite(bmsafemap, bp) 11941 struct bmsafemap *bmsafemap; 11942 struct buf *bp; 11943 { 11944 int dirty; 11945 11946 LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp)); 11947 dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) | 11948 !LIST_EMPTY(&bmsafemap->sm_jnewblkhd); 11949 /* 11950 * If we're initiating a background write we need to process the 11951 * rollbacks as they exist now, not as they exist when IO starts. 11952 * No other consumers will look at the contents of the shadowed 11953 * buf so this is safe to do here. 11954 */ 11955 if (bp->b_xflags & BX_BKGRDMARKER) 11956 initiate_write_bmsafemap(bmsafemap, bp); 11957 11958 return (dirty); 11959 } 11960 11961 /* 11962 * Re-apply an allocation when a cg write is complete. 11963 */ 11964 static int 11965 jnewblk_rollforward(jnewblk, fs, cgp, blksfree) 11966 struct jnewblk *jnewblk; 11967 struct fs *fs; 11968 struct cg *cgp; 11969 uint8_t *blksfree; 11970 { 11971 ufs1_daddr_t fragno; 11972 ufs2_daddr_t blkno; 11973 long cgbno, bbase; 11974 int frags, blk; 11975 int i; 11976 11977 frags = 0; 11978 cgbno = dtogd(fs, jnewblk->jn_blkno); 11979 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { 11980 if (isclr(blksfree, cgbno + i)) 11981 panic("jnewblk_rollforward: re-allocated fragment"); 11982 frags++; 11983 } 11984 if (frags == fs->fs_frag) { 11985 blkno = fragstoblks(fs, cgbno); 11986 ffs_clrblock(fs, blksfree, (long)blkno); 11987 ffs_clusteracct(fs, cgp, blkno, -1); 11988 cgp->cg_cs.cs_nbfree--; 11989 } else { 11990 bbase = cgbno - fragnum(fs, cgbno); 11991 cgbno += jnewblk->jn_oldfrags; 11992 /* If a complete block had been reassembled, account for it. */ 11993 fragno = fragstoblks(fs, bbase); 11994 if (ffs_isblock(fs, blksfree, fragno)) { 11995 cgp->cg_cs.cs_nffree += fs->fs_frag; 11996 ffs_clusteracct(fs, cgp, fragno, -1); 11997 cgp->cg_cs.cs_nbfree--; 11998 } 11999 /* Decrement the old frags. */ 12000 blk = blkmap(fs, blksfree, bbase); 12001 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 12002 /* Allocate the fragment */ 12003 for (i = 0; i < frags; i++) 12004 clrbit(blksfree, cgbno + i); 12005 cgp->cg_cs.cs_nffree -= frags; 12006 /* Add back in counts associated with the new frags */ 12007 blk = blkmap(fs, blksfree, bbase); 12008 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 12009 } 12010 return (frags); 12011 } 12012 12013 /* 12014 * Complete a write to a bmsafemap structure. Roll forward any bitmap 12015 * changes if it's not a background write. Set all written dependencies 12016 * to DEPCOMPLETE and free the structure if possible. 12017 * 12018 * If the write did not succeed, we will do all the roll-forward 12019 * operations, but we will not take the actions that will allow its 12020 * dependencies to be processed. 12021 */ 12022 static int 12023 handle_written_bmsafemap(bmsafemap, bp, flags) 12024 struct bmsafemap *bmsafemap; 12025 struct buf *bp; 12026 int flags; 12027 { 12028 struct newblk *newblk; 12029 struct inodedep *inodedep; 12030 struct jaddref *jaddref, *jatmp; 12031 struct jnewblk *jnewblk, *jntmp; 12032 struct ufsmount *ump; 12033 uint8_t *inosused; 12034 uint8_t *blksfree; 12035 struct cg *cgp; 12036 struct fs *fs; 12037 ino_t ino; 12038 int foreground; 12039 int chgs; 12040 12041 if ((bmsafemap->sm_state & IOSTARTED) == 0) 12042 panic("handle_written_bmsafemap: Not started\n"); 12043 ump = VFSTOUFS(bmsafemap->sm_list.wk_mp); 12044 chgs = 0; 12045 bmsafemap->sm_state &= ~IOSTARTED; 12046 foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0; 12047 /* 12048 * If write was successful, release journal work that was waiting 12049 * on the write. Otherwise move the work back. 12050 */ 12051 if (flags & WRITESUCCEEDED) 12052 handle_jwork(&bmsafemap->sm_freewr); 12053 else 12054 LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, 12055 worklist, wk_list); 12056 12057 /* 12058 * Restore unwritten inode allocation pending jaddref writes. 12059 */ 12060 if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) { 12061 cgp = (struct cg *)bp->b_data; 12062 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 12063 inosused = cg_inosused(cgp); 12064 LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd, 12065 ja_bmdeps, jatmp) { 12066 if ((jaddref->ja_state & UNDONE) == 0) 12067 continue; 12068 ino = jaddref->ja_ino % fs->fs_ipg; 12069 if (isset(inosused, ino)) 12070 panic("handle_written_bmsafemap: " 12071 "re-allocated inode"); 12072 /* Do the roll-forward only if it's a real copy. */ 12073 if (foreground) { 12074 if ((jaddref->ja_mode & IFMT) == IFDIR) 12075 cgp->cg_cs.cs_ndir++; 12076 cgp->cg_cs.cs_nifree--; 12077 setbit(inosused, ino); 12078 chgs = 1; 12079 } 12080 jaddref->ja_state &= ~UNDONE; 12081 jaddref->ja_state |= ATTACHED; 12082 free_jaddref(jaddref); 12083 } 12084 } 12085 /* 12086 * Restore any block allocations which are pending journal writes. 12087 */ 12088 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 12089 cgp = (struct cg *)bp->b_data; 12090 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 12091 blksfree = cg_blksfree(cgp); 12092 LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps, 12093 jntmp) { 12094 if ((jnewblk->jn_state & UNDONE) == 0) 12095 continue; 12096 /* Do the roll-forward only if it's a real copy. */ 12097 if (foreground && 12098 jnewblk_rollforward(jnewblk, fs, cgp, blksfree)) 12099 chgs = 1; 12100 jnewblk->jn_state &= ~(UNDONE | NEWBLOCK); 12101 jnewblk->jn_state |= ATTACHED; 12102 free_jnewblk(jnewblk); 12103 } 12104 } 12105 /* 12106 * If the write did not succeed, we have done all the roll-forward 12107 * operations, but we cannot take the actions that will allow its 12108 * dependencies to be processed. 12109 */ 12110 if ((flags & WRITESUCCEEDED) == 0) { 12111 LIST_CONCAT(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, 12112 newblk, nb_deps); 12113 LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, 12114 worklist, wk_list); 12115 if (foreground) 12116 bdirty(bp); 12117 return (1); 12118 } 12119 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) { 12120 newblk->nb_state |= DEPCOMPLETE; 12121 newblk->nb_state &= ~ONDEPLIST; 12122 newblk->nb_bmsafemap = NULL; 12123 LIST_REMOVE(newblk, nb_deps); 12124 if (newblk->nb_list.wk_type == D_ALLOCDIRECT) 12125 handle_allocdirect_partdone( 12126 WK_ALLOCDIRECT(&newblk->nb_list), NULL); 12127 else if (newblk->nb_list.wk_type == D_ALLOCINDIR) 12128 handle_allocindir_partdone( 12129 WK_ALLOCINDIR(&newblk->nb_list)); 12130 else if (newblk->nb_list.wk_type != D_NEWBLK) 12131 panic("handle_written_bmsafemap: Unexpected type: %s", 12132 TYPENAME(newblk->nb_list.wk_type)); 12133 } 12134 while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) { 12135 inodedep->id_state |= DEPCOMPLETE; 12136 inodedep->id_state &= ~ONDEPLIST; 12137 LIST_REMOVE(inodedep, id_deps); 12138 inodedep->id_bmsafemap = NULL; 12139 } 12140 LIST_REMOVE(bmsafemap, sm_next); 12141 if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) && 12142 LIST_EMPTY(&bmsafemap->sm_jnewblkhd) && 12143 LIST_EMPTY(&bmsafemap->sm_newblkhd) && 12144 LIST_EMPTY(&bmsafemap->sm_inodedephd) && 12145 LIST_EMPTY(&bmsafemap->sm_freehd)) { 12146 LIST_REMOVE(bmsafemap, sm_hash); 12147 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 12148 return (0); 12149 } 12150 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 12151 if (foreground) 12152 bdirty(bp); 12153 return (1); 12154 } 12155 12156 /* 12157 * Try to free a mkdir dependency. 12158 */ 12159 static void 12160 complete_mkdir(mkdir) 12161 struct mkdir *mkdir; 12162 { 12163 struct diradd *dap; 12164 12165 if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE) 12166 return; 12167 LIST_REMOVE(mkdir, md_mkdirs); 12168 dap = mkdir->md_diradd; 12169 dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 12170 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) { 12171 dap->da_state |= DEPCOMPLETE; 12172 complete_diradd(dap); 12173 } 12174 WORKITEM_FREE(mkdir, D_MKDIR); 12175 } 12176 12177 /* 12178 * Handle the completion of a mkdir dependency. 12179 */ 12180 static void 12181 handle_written_mkdir(mkdir, type) 12182 struct mkdir *mkdir; 12183 int type; 12184 { 12185 12186 if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type) 12187 panic("handle_written_mkdir: bad type"); 12188 mkdir->md_state |= COMPLETE; 12189 complete_mkdir(mkdir); 12190 } 12191 12192 static int 12193 free_pagedep(pagedep) 12194 struct pagedep *pagedep; 12195 { 12196 int i; 12197 12198 if (pagedep->pd_state & NEWBLOCK) 12199 return (0); 12200 if (!LIST_EMPTY(&pagedep->pd_dirremhd)) 12201 return (0); 12202 for (i = 0; i < DAHASHSZ; i++) 12203 if (!LIST_EMPTY(&pagedep->pd_diraddhd[i])) 12204 return (0); 12205 if (!LIST_EMPTY(&pagedep->pd_pendinghd)) 12206 return (0); 12207 if (!LIST_EMPTY(&pagedep->pd_jmvrefhd)) 12208 return (0); 12209 if (pagedep->pd_state & ONWORKLIST) 12210 WORKLIST_REMOVE(&pagedep->pd_list); 12211 LIST_REMOVE(pagedep, pd_hash); 12212 WORKITEM_FREE(pagedep, D_PAGEDEP); 12213 12214 return (1); 12215 } 12216 12217 /* 12218 * Called from within softdep_disk_write_complete above. 12219 * A write operation was just completed. Removed inodes can 12220 * now be freed and associated block pointers may be committed. 12221 * Note that this routine is always called from interrupt level 12222 * with further interrupts from this device blocked. 12223 * 12224 * If the write did not succeed, we will do all the roll-forward 12225 * operations, but we will not take the actions that will allow its 12226 * dependencies to be processed. 12227 */ 12228 static int 12229 handle_written_filepage(pagedep, bp, flags) 12230 struct pagedep *pagedep; 12231 struct buf *bp; /* buffer containing the written page */ 12232 int flags; 12233 { 12234 struct dirrem *dirrem; 12235 struct diradd *dap, *nextdap; 12236 struct direct *ep; 12237 int i, chgs; 12238 12239 if ((pagedep->pd_state & IOSTARTED) == 0) 12240 panic("handle_written_filepage: not started"); 12241 pagedep->pd_state &= ~IOSTARTED; 12242 if ((flags & WRITESUCCEEDED) == 0) 12243 goto rollforward; 12244 /* 12245 * Process any directory removals that have been committed. 12246 */ 12247 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 12248 LIST_REMOVE(dirrem, dm_next); 12249 dirrem->dm_state |= COMPLETE; 12250 dirrem->dm_dirinum = pagedep->pd_ino; 12251 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 12252 ("handle_written_filepage: Journal entries not written.")); 12253 add_to_worklist(&dirrem->dm_list, 0); 12254 } 12255 /* 12256 * Free any directory additions that have been committed. 12257 * If it is a newly allocated block, we have to wait until 12258 * the on-disk directory inode claims the new block. 12259 */ 12260 if ((pagedep->pd_state & NEWBLOCK) == 0) 12261 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 12262 free_diradd(dap, NULL); 12263 rollforward: 12264 /* 12265 * Uncommitted directory entries must be restored. 12266 */ 12267 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 12268 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 12269 dap = nextdap) { 12270 nextdap = LIST_NEXT(dap, da_pdlist); 12271 if (dap->da_state & ATTACHED) 12272 panic("handle_written_filepage: attached"); 12273 ep = (struct direct *) 12274 ((char *)bp->b_data + dap->da_offset); 12275 ep->d_ino = dap->da_newinum; 12276 dap->da_state &= ~UNDONE; 12277 dap->da_state |= ATTACHED; 12278 chgs = 1; 12279 /* 12280 * If the inode referenced by the directory has 12281 * been written out, then the dependency can be 12282 * moved to the pending list. 12283 */ 12284 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 12285 LIST_REMOVE(dap, da_pdlist); 12286 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 12287 da_pdlist); 12288 } 12289 } 12290 } 12291 /* 12292 * If there were any rollbacks in the directory, then it must be 12293 * marked dirty so that its will eventually get written back in 12294 * its correct form. 12295 */ 12296 if (chgs || (flags & WRITESUCCEEDED) == 0) { 12297 if ((bp->b_flags & B_DELWRI) == 0) 12298 stat_dir_entry++; 12299 bdirty(bp); 12300 return (1); 12301 } 12302 /* 12303 * If we are not waiting for a new directory block to be 12304 * claimed by its inode, then the pagedep will be freed. 12305 * Otherwise it will remain to track any new entries on 12306 * the page in case they are fsync'ed. 12307 */ 12308 free_pagedep(pagedep); 12309 return (0); 12310 } 12311 12312 /* 12313 * Writing back in-core inode structures. 12314 * 12315 * The filesystem only accesses an inode's contents when it occupies an 12316 * "in-core" inode structure. These "in-core" structures are separate from 12317 * the page frames used to cache inode blocks. Only the latter are 12318 * transferred to/from the disk. So, when the updated contents of the 12319 * "in-core" inode structure are copied to the corresponding in-memory inode 12320 * block, the dependencies are also transferred. The following procedure is 12321 * called when copying a dirty "in-core" inode to a cached inode block. 12322 */ 12323 12324 /* 12325 * Called when an inode is loaded from disk. If the effective link count 12326 * differed from the actual link count when it was last flushed, then we 12327 * need to ensure that the correct effective link count is put back. 12328 */ 12329 void 12330 softdep_load_inodeblock(ip) 12331 struct inode *ip; /* the "in_core" copy of the inode */ 12332 { 12333 struct inodedep *inodedep; 12334 struct ufsmount *ump; 12335 12336 ump = ITOUMP(ip); 12337 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 12338 ("softdep_load_inodeblock called on non-softdep filesystem")); 12339 /* 12340 * Check for alternate nlink count. 12341 */ 12342 ip->i_effnlink = ip->i_nlink; 12343 ACQUIRE_LOCK(ump); 12344 if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) { 12345 FREE_LOCK(ump); 12346 return; 12347 } 12348 if (ip->i_nlink != inodedep->id_nlinkwrote && 12349 inodedep->id_nlinkwrote != -1) { 12350 KASSERT(ip->i_nlink == 0 && 12351 (ump->um_flags & UM_FSFAIL_CLEANUP) != 0, 12352 ("read bad i_nlink value")); 12353 ip->i_effnlink = ip->i_nlink = inodedep->id_nlinkwrote; 12354 } 12355 ip->i_effnlink -= inodedep->id_nlinkdelta; 12356 KASSERT(ip->i_effnlink >= 0, 12357 ("softdep_load_inodeblock: negative i_effnlink")); 12358 FREE_LOCK(ump); 12359 } 12360 12361 /* 12362 * This routine is called just before the "in-core" inode 12363 * information is to be copied to the in-memory inode block. 12364 * Recall that an inode block contains several inodes. If 12365 * the force flag is set, then the dependencies will be 12366 * cleared so that the update can always be made. Note that 12367 * the buffer is locked when this routine is called, so we 12368 * will never be in the middle of writing the inode block 12369 * to disk. 12370 */ 12371 void 12372 softdep_update_inodeblock(ip, bp, waitfor) 12373 struct inode *ip; /* the "in_core" copy of the inode */ 12374 struct buf *bp; /* the buffer containing the inode block */ 12375 int waitfor; /* nonzero => update must be allowed */ 12376 { 12377 struct inodedep *inodedep; 12378 struct inoref *inoref; 12379 struct ufsmount *ump; 12380 struct worklist *wk; 12381 struct mount *mp; 12382 struct buf *ibp; 12383 struct fs *fs; 12384 int error; 12385 12386 ump = ITOUMP(ip); 12387 mp = UFSTOVFS(ump); 12388 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 12389 ("softdep_update_inodeblock called on non-softdep filesystem")); 12390 fs = ump->um_fs; 12391 /* 12392 * Preserve the freelink that is on disk. clear_unlinked_inodedep() 12393 * does not have access to the in-core ip so must write directly into 12394 * the inode block buffer when setting freelink. 12395 */ 12396 if (fs->fs_magic == FS_UFS1_MAGIC) 12397 DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data + 12398 ino_to_fsbo(fs, ip->i_number))->di_freelink); 12399 else 12400 DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data + 12401 ino_to_fsbo(fs, ip->i_number))->di_freelink); 12402 /* 12403 * If the effective link count is not equal to the actual link 12404 * count, then we must track the difference in an inodedep while 12405 * the inode is (potentially) tossed out of the cache. Otherwise, 12406 * if there is no existing inodedep, then there are no dependencies 12407 * to track. 12408 */ 12409 ACQUIRE_LOCK(ump); 12410 again: 12411 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 12412 FREE_LOCK(ump); 12413 if (ip->i_effnlink != ip->i_nlink) 12414 panic("softdep_update_inodeblock: bad link count"); 12415 return; 12416 } 12417 KASSERT(ip->i_nlink >= inodedep->id_nlinkdelta, 12418 ("softdep_update_inodeblock inconsistent ip %p i_nlink %d " 12419 "inodedep %p id_nlinkdelta %jd", 12420 ip, ip->i_nlink, inodedep, (intmax_t)inodedep->id_nlinkdelta)); 12421 inodedep->id_nlinkwrote = ip->i_nlink; 12422 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) 12423 panic("softdep_update_inodeblock: bad delta"); 12424 /* 12425 * If we're flushing all dependencies we must also move any waiting 12426 * for journal writes onto the bufwait list prior to I/O. 12427 */ 12428 if (waitfor) { 12429 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12430 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12431 == DEPCOMPLETE) { 12432 jwait(&inoref->if_list, MNT_WAIT); 12433 goto again; 12434 } 12435 } 12436 } 12437 /* 12438 * Changes have been initiated. Anything depending on these 12439 * changes cannot occur until this inode has been written. 12440 */ 12441 inodedep->id_state &= ~COMPLETE; 12442 if ((inodedep->id_state & ONWORKLIST) == 0) 12443 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 12444 /* 12445 * Any new dependencies associated with the incore inode must 12446 * now be moved to the list associated with the buffer holding 12447 * the in-memory copy of the inode. Once merged process any 12448 * allocdirects that are completed by the merger. 12449 */ 12450 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 12451 if (!TAILQ_EMPTY(&inodedep->id_inoupdt)) 12452 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt), 12453 NULL); 12454 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 12455 if (!TAILQ_EMPTY(&inodedep->id_extupdt)) 12456 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt), 12457 NULL); 12458 /* 12459 * Now that the inode has been pushed into the buffer, the 12460 * operations dependent on the inode being written to disk 12461 * can be moved to the id_bufwait so that they will be 12462 * processed when the buffer I/O completes. 12463 */ 12464 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 12465 WORKLIST_REMOVE(wk); 12466 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 12467 } 12468 /* 12469 * Newly allocated inodes cannot be written until the bitmap 12470 * that allocates them have been written (indicated by 12471 * DEPCOMPLETE being set in id_state). If we are doing a 12472 * forced sync (e.g., an fsync on a file), we force the bitmap 12473 * to be written so that the update can be done. 12474 */ 12475 if (waitfor == 0) { 12476 FREE_LOCK(ump); 12477 return; 12478 } 12479 retry: 12480 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) { 12481 FREE_LOCK(ump); 12482 return; 12483 } 12484 ibp = inodedep->id_bmsafemap->sm_buf; 12485 ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT); 12486 if (ibp == NULL) { 12487 /* 12488 * If ibp came back as NULL, the dependency could have been 12489 * freed while we slept. Look it up again, and check to see 12490 * that it has completed. 12491 */ 12492 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 12493 goto retry; 12494 FREE_LOCK(ump); 12495 return; 12496 } 12497 FREE_LOCK(ump); 12498 if ((error = bwrite(ibp)) != 0) 12499 softdep_error("softdep_update_inodeblock: bwrite", error); 12500 } 12501 12502 /* 12503 * Merge the a new inode dependency list (such as id_newinoupdt) into an 12504 * old inode dependency list (such as id_inoupdt). 12505 */ 12506 static void 12507 merge_inode_lists(newlisthead, oldlisthead) 12508 struct allocdirectlst *newlisthead; 12509 struct allocdirectlst *oldlisthead; 12510 { 12511 struct allocdirect *listadp, *newadp; 12512 12513 newadp = TAILQ_FIRST(newlisthead); 12514 if (newadp != NULL) 12515 LOCK_OWNED(VFSTOUFS(newadp->ad_block.nb_list.wk_mp)); 12516 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 12517 if (listadp->ad_offset < newadp->ad_offset) { 12518 listadp = TAILQ_NEXT(listadp, ad_next); 12519 continue; 12520 } 12521 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12522 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 12523 if (listadp->ad_offset == newadp->ad_offset) { 12524 allocdirect_merge(oldlisthead, newadp, 12525 listadp); 12526 listadp = newadp; 12527 } 12528 newadp = TAILQ_FIRST(newlisthead); 12529 } 12530 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 12531 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12532 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 12533 } 12534 } 12535 12536 /* 12537 * If we are doing an fsync, then we must ensure that any directory 12538 * entries for the inode have been written after the inode gets to disk. 12539 */ 12540 int 12541 softdep_fsync(vp) 12542 struct vnode *vp; /* the "in_core" copy of the inode */ 12543 { 12544 struct inodedep *inodedep; 12545 struct pagedep *pagedep; 12546 struct inoref *inoref; 12547 struct ufsmount *ump; 12548 struct worklist *wk; 12549 struct diradd *dap; 12550 struct mount *mp; 12551 struct vnode *pvp; 12552 struct inode *ip; 12553 struct buf *bp; 12554 struct fs *fs; 12555 struct thread *td = curthread; 12556 int error, flushparent, pagedep_new_block; 12557 ino_t parentino; 12558 ufs_lbn_t lbn; 12559 12560 ip = VTOI(vp); 12561 mp = vp->v_mount; 12562 ump = VFSTOUFS(mp); 12563 fs = ump->um_fs; 12564 if (MOUNTEDSOFTDEP(mp) == 0) 12565 return (0); 12566 ACQUIRE_LOCK(ump); 12567 restart: 12568 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 12569 FREE_LOCK(ump); 12570 return (0); 12571 } 12572 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12573 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12574 == DEPCOMPLETE) { 12575 jwait(&inoref->if_list, MNT_WAIT); 12576 goto restart; 12577 } 12578 } 12579 if (!LIST_EMPTY(&inodedep->id_inowait) || 12580 !TAILQ_EMPTY(&inodedep->id_extupdt) || 12581 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 12582 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 12583 !TAILQ_EMPTY(&inodedep->id_newinoupdt)) 12584 panic("softdep_fsync: pending ops %p", inodedep); 12585 for (error = 0, flushparent = 0; ; ) { 12586 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 12587 break; 12588 if (wk->wk_type != D_DIRADD) 12589 panic("softdep_fsync: Unexpected type %s", 12590 TYPENAME(wk->wk_type)); 12591 dap = WK_DIRADD(wk); 12592 /* 12593 * Flush our parent if this directory entry has a MKDIR_PARENT 12594 * dependency or is contained in a newly allocated block. 12595 */ 12596 if (dap->da_state & DIRCHG) 12597 pagedep = dap->da_previous->dm_pagedep; 12598 else 12599 pagedep = dap->da_pagedep; 12600 parentino = pagedep->pd_ino; 12601 lbn = pagedep->pd_lbn; 12602 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) 12603 panic("softdep_fsync: dirty"); 12604 if ((dap->da_state & MKDIR_PARENT) || 12605 (pagedep->pd_state & NEWBLOCK)) 12606 flushparent = 1; 12607 else 12608 flushparent = 0; 12609 /* 12610 * If we are being fsync'ed as part of vgone'ing this vnode, 12611 * then we will not be able to release and recover the 12612 * vnode below, so we just have to give up on writing its 12613 * directory entry out. It will eventually be written, just 12614 * not now, but then the user was not asking to have it 12615 * written, so we are not breaking any promises. 12616 */ 12617 if (VN_IS_DOOMED(vp)) 12618 break; 12619 /* 12620 * We prevent deadlock by always fetching inodes from the 12621 * root, moving down the directory tree. Thus, when fetching 12622 * our parent directory, we first try to get the lock. If 12623 * that fails, we must unlock ourselves before requesting 12624 * the lock on our parent. See the comment in ufs_lookup 12625 * for details on possible races. 12626 */ 12627 FREE_LOCK(ump); 12628 if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp, 12629 FFSV_FORCEINSMQ)) { 12630 /* 12631 * Unmount cannot proceed after unlock because 12632 * caller must have called vn_start_write(). 12633 */ 12634 VOP_UNLOCK(vp); 12635 error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, 12636 &pvp, FFSV_FORCEINSMQ); 12637 MPASS(VTOI(pvp)->i_mode != 0); 12638 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 12639 if (VN_IS_DOOMED(vp)) { 12640 if (error == 0) 12641 vput(pvp); 12642 error = ENOENT; 12643 } 12644 if (error != 0) 12645 return (error); 12646 } 12647 /* 12648 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 12649 * that are contained in direct blocks will be resolved by 12650 * doing a ffs_update. Pagedeps contained in indirect blocks 12651 * may require a complete sync'ing of the directory. So, we 12652 * try the cheap and fast ffs_update first, and if that fails, 12653 * then we do the slower ffs_syncvnode of the directory. 12654 */ 12655 if (flushparent) { 12656 int locked; 12657 12658 if ((error = ffs_update(pvp, 1)) != 0) { 12659 vput(pvp); 12660 return (error); 12661 } 12662 ACQUIRE_LOCK(ump); 12663 locked = 1; 12664 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) { 12665 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) { 12666 if (wk->wk_type != D_DIRADD) 12667 panic("softdep_fsync: Unexpected type %s", 12668 TYPENAME(wk->wk_type)); 12669 dap = WK_DIRADD(wk); 12670 if (dap->da_state & DIRCHG) 12671 pagedep = dap->da_previous->dm_pagedep; 12672 else 12673 pagedep = dap->da_pagedep; 12674 pagedep_new_block = pagedep->pd_state & NEWBLOCK; 12675 FREE_LOCK(ump); 12676 locked = 0; 12677 if (pagedep_new_block && (error = 12678 ffs_syncvnode(pvp, MNT_WAIT, 0))) { 12679 vput(pvp); 12680 return (error); 12681 } 12682 } 12683 } 12684 if (locked) 12685 FREE_LOCK(ump); 12686 } 12687 /* 12688 * Flush directory page containing the inode's name. 12689 */ 12690 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 12691 &bp); 12692 if (error == 0) 12693 error = bwrite(bp); 12694 else 12695 brelse(bp); 12696 vput(pvp); 12697 if (!ffs_fsfail_cleanup(ump, error)) 12698 return (error); 12699 ACQUIRE_LOCK(ump); 12700 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 12701 break; 12702 } 12703 FREE_LOCK(ump); 12704 return (0); 12705 } 12706 12707 /* 12708 * Flush all the dirty bitmaps associated with the block device 12709 * before flushing the rest of the dirty blocks so as to reduce 12710 * the number of dependencies that will have to be rolled back. 12711 * 12712 * XXX Unused? 12713 */ 12714 void 12715 softdep_fsync_mountdev(vp) 12716 struct vnode *vp; 12717 { 12718 struct buf *bp, *nbp; 12719 struct worklist *wk; 12720 struct bufobj *bo; 12721 12722 if (!vn_isdisk(vp, NULL)) 12723 panic("softdep_fsync_mountdev: vnode not a disk"); 12724 bo = &vp->v_bufobj; 12725 restart: 12726 BO_LOCK(bo); 12727 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 12728 /* 12729 * If it is already scheduled, skip to the next buffer. 12730 */ 12731 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 12732 continue; 12733 12734 if ((bp->b_flags & B_DELWRI) == 0) 12735 panic("softdep_fsync_mountdev: not dirty"); 12736 /* 12737 * We are only interested in bitmaps with outstanding 12738 * dependencies. 12739 */ 12740 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 12741 wk->wk_type != D_BMSAFEMAP || 12742 (bp->b_vflags & BV_BKGRDINPROG)) { 12743 BUF_UNLOCK(bp); 12744 continue; 12745 } 12746 BO_UNLOCK(bo); 12747 bremfree(bp); 12748 (void) bawrite(bp); 12749 goto restart; 12750 } 12751 drain_output(vp); 12752 BO_UNLOCK(bo); 12753 } 12754 12755 /* 12756 * Sync all cylinder groups that were dirty at the time this function is 12757 * called. Newly dirtied cgs will be inserted before the sentinel. This 12758 * is used to flush freedep activity that may be holding up writes to a 12759 * indirect block. 12760 */ 12761 static int 12762 sync_cgs(mp, waitfor) 12763 struct mount *mp; 12764 int waitfor; 12765 { 12766 struct bmsafemap *bmsafemap; 12767 struct bmsafemap *sentinel; 12768 struct ufsmount *ump; 12769 struct buf *bp; 12770 int error; 12771 12772 sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK); 12773 sentinel->sm_cg = -1; 12774 ump = VFSTOUFS(mp); 12775 error = 0; 12776 ACQUIRE_LOCK(ump); 12777 LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next); 12778 for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL; 12779 bmsafemap = LIST_NEXT(sentinel, sm_next)) { 12780 /* Skip sentinels and cgs with no work to release. */ 12781 if (bmsafemap->sm_cg == -1 || 12782 (LIST_EMPTY(&bmsafemap->sm_freehd) && 12783 LIST_EMPTY(&bmsafemap->sm_freewr))) { 12784 LIST_REMOVE(sentinel, sm_next); 12785 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12786 continue; 12787 } 12788 /* 12789 * If we don't get the lock and we're waiting try again, if 12790 * not move on to the next buf and try to sync it. 12791 */ 12792 bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor); 12793 if (bp == NULL && waitfor == MNT_WAIT) 12794 continue; 12795 LIST_REMOVE(sentinel, sm_next); 12796 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12797 if (bp == NULL) 12798 continue; 12799 FREE_LOCK(ump); 12800 if (waitfor == MNT_NOWAIT) 12801 bawrite(bp); 12802 else 12803 error = bwrite(bp); 12804 ACQUIRE_LOCK(ump); 12805 if (error) 12806 break; 12807 } 12808 LIST_REMOVE(sentinel, sm_next); 12809 FREE_LOCK(ump); 12810 free(sentinel, M_BMSAFEMAP); 12811 return (error); 12812 } 12813 12814 /* 12815 * This routine is called when we are trying to synchronously flush a 12816 * file. This routine must eliminate any filesystem metadata dependencies 12817 * so that the syncing routine can succeed. 12818 */ 12819 int 12820 softdep_sync_metadata(struct vnode *vp) 12821 { 12822 struct inode *ip; 12823 int error; 12824 12825 ip = VTOI(vp); 12826 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 12827 ("softdep_sync_metadata called on non-softdep filesystem")); 12828 /* 12829 * Ensure that any direct block dependencies have been cleared, 12830 * truncations are started, and inode references are journaled. 12831 */ 12832 ACQUIRE_LOCK(VFSTOUFS(vp->v_mount)); 12833 /* 12834 * Write all journal records to prevent rollbacks on devvp. 12835 */ 12836 if (vp->v_type == VCHR) 12837 softdep_flushjournal(vp->v_mount); 12838 error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number); 12839 /* 12840 * Ensure that all truncates are written so we won't find deps on 12841 * indirect blocks. 12842 */ 12843 process_truncates(vp); 12844 FREE_LOCK(VFSTOUFS(vp->v_mount)); 12845 12846 return (error); 12847 } 12848 12849 /* 12850 * This routine is called when we are attempting to sync a buf with 12851 * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any 12852 * other IO it can but returns EBUSY if the buffer is not yet able to 12853 * be written. Dependencies which will not cause rollbacks will always 12854 * return 0. 12855 */ 12856 int 12857 softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 12858 { 12859 struct indirdep *indirdep; 12860 struct pagedep *pagedep; 12861 struct allocindir *aip; 12862 struct newblk *newblk; 12863 struct ufsmount *ump; 12864 struct buf *nbp; 12865 struct worklist *wk; 12866 int i, error; 12867 12868 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 12869 ("softdep_sync_buf called on non-softdep filesystem")); 12870 /* 12871 * For VCHR we just don't want to force flush any dependencies that 12872 * will cause rollbacks. 12873 */ 12874 if (vp->v_type == VCHR) { 12875 if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0)) 12876 return (EBUSY); 12877 return (0); 12878 } 12879 ump = VFSTOUFS(vp->v_mount); 12880 ACQUIRE_LOCK(ump); 12881 /* 12882 * As we hold the buffer locked, none of its dependencies 12883 * will disappear. 12884 */ 12885 error = 0; 12886 top: 12887 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 12888 switch (wk->wk_type) { 12889 12890 case D_ALLOCDIRECT: 12891 case D_ALLOCINDIR: 12892 newblk = WK_NEWBLK(wk); 12893 if (newblk->nb_jnewblk != NULL) { 12894 if (waitfor == MNT_NOWAIT) { 12895 error = EBUSY; 12896 goto out_unlock; 12897 } 12898 jwait(&newblk->nb_jnewblk->jn_list, waitfor); 12899 goto top; 12900 } 12901 if (newblk->nb_state & DEPCOMPLETE || 12902 waitfor == MNT_NOWAIT) 12903 continue; 12904 nbp = newblk->nb_bmsafemap->sm_buf; 12905 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12906 if (nbp == NULL) 12907 goto top; 12908 FREE_LOCK(ump); 12909 if ((error = bwrite(nbp)) != 0) 12910 goto out; 12911 ACQUIRE_LOCK(ump); 12912 continue; 12913 12914 case D_INDIRDEP: 12915 indirdep = WK_INDIRDEP(wk); 12916 if (waitfor == MNT_NOWAIT) { 12917 if (!TAILQ_EMPTY(&indirdep->ir_trunc) || 12918 !LIST_EMPTY(&indirdep->ir_deplisthd)) { 12919 error = EBUSY; 12920 goto out_unlock; 12921 } 12922 } 12923 if (!TAILQ_EMPTY(&indirdep->ir_trunc)) 12924 panic("softdep_sync_buf: truncation pending."); 12925 restart: 12926 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 12927 newblk = (struct newblk *)aip; 12928 if (newblk->nb_jnewblk != NULL) { 12929 jwait(&newblk->nb_jnewblk->jn_list, 12930 waitfor); 12931 goto restart; 12932 } 12933 if (newblk->nb_state & DEPCOMPLETE) 12934 continue; 12935 nbp = newblk->nb_bmsafemap->sm_buf; 12936 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12937 if (nbp == NULL) 12938 goto restart; 12939 FREE_LOCK(ump); 12940 if ((error = bwrite(nbp)) != 0) 12941 goto out; 12942 ACQUIRE_LOCK(ump); 12943 goto restart; 12944 } 12945 continue; 12946 12947 case D_PAGEDEP: 12948 /* 12949 * Only flush directory entries in synchronous passes. 12950 */ 12951 if (waitfor != MNT_WAIT) { 12952 error = EBUSY; 12953 goto out_unlock; 12954 } 12955 /* 12956 * While syncing snapshots, we must allow recursive 12957 * lookups. 12958 */ 12959 BUF_AREC(bp); 12960 /* 12961 * We are trying to sync a directory that may 12962 * have dependencies on both its own metadata 12963 * and/or dependencies on the inodes of any 12964 * recently allocated files. We walk its diradd 12965 * lists pushing out the associated inode. 12966 */ 12967 pagedep = WK_PAGEDEP(wk); 12968 for (i = 0; i < DAHASHSZ; i++) { 12969 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 12970 continue; 12971 if ((error = flush_pagedep_deps(vp, wk->wk_mp, 12972 &pagedep->pd_diraddhd[i]))) { 12973 BUF_NOREC(bp); 12974 goto out_unlock; 12975 } 12976 } 12977 BUF_NOREC(bp); 12978 continue; 12979 12980 case D_FREEWORK: 12981 case D_FREEDEP: 12982 case D_JSEGDEP: 12983 case D_JNEWBLK: 12984 continue; 12985 12986 default: 12987 panic("softdep_sync_buf: Unknown type %s", 12988 TYPENAME(wk->wk_type)); 12989 /* NOTREACHED */ 12990 } 12991 } 12992 out_unlock: 12993 FREE_LOCK(ump); 12994 out: 12995 return (error); 12996 } 12997 12998 /* 12999 * Flush the dependencies associated with an inodedep. 13000 */ 13001 static int 13002 flush_inodedep_deps(vp, mp, ino) 13003 struct vnode *vp; 13004 struct mount *mp; 13005 ino_t ino; 13006 { 13007 struct inodedep *inodedep; 13008 struct inoref *inoref; 13009 struct ufsmount *ump; 13010 int error, waitfor; 13011 13012 /* 13013 * This work is done in two passes. The first pass grabs most 13014 * of the buffers and begins asynchronously writing them. The 13015 * only way to wait for these asynchronous writes is to sleep 13016 * on the filesystem vnode which may stay busy for a long time 13017 * if the filesystem is active. So, instead, we make a second 13018 * pass over the dependencies blocking on each write. In the 13019 * usual case we will be blocking against a write that we 13020 * initiated, so when it is done the dependency will have been 13021 * resolved. Thus the second pass is expected to end quickly. 13022 * We give a brief window at the top of the loop to allow 13023 * any pending I/O to complete. 13024 */ 13025 ump = VFSTOUFS(mp); 13026 LOCK_OWNED(ump); 13027 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 13028 if (error) 13029 return (error); 13030 FREE_LOCK(ump); 13031 ACQUIRE_LOCK(ump); 13032 restart: 13033 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 13034 return (0); 13035 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 13036 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 13037 == DEPCOMPLETE) { 13038 jwait(&inoref->if_list, MNT_WAIT); 13039 goto restart; 13040 } 13041 } 13042 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 13043 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 13044 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 13045 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 13046 continue; 13047 /* 13048 * If pass2, we are done, otherwise do pass 2. 13049 */ 13050 if (waitfor == MNT_WAIT) 13051 break; 13052 waitfor = MNT_WAIT; 13053 } 13054 /* 13055 * Try freeing inodedep in case all dependencies have been removed. 13056 */ 13057 if (inodedep_lookup(mp, ino, 0, &inodedep) != 0) 13058 (void) free_inodedep(inodedep); 13059 return (0); 13060 } 13061 13062 /* 13063 * Flush an inode dependency list. 13064 */ 13065 static int 13066 flush_deplist(listhead, waitfor, errorp) 13067 struct allocdirectlst *listhead; 13068 int waitfor; 13069 int *errorp; 13070 { 13071 struct allocdirect *adp; 13072 struct newblk *newblk; 13073 struct ufsmount *ump; 13074 struct buf *bp; 13075 13076 if ((adp = TAILQ_FIRST(listhead)) == NULL) 13077 return (0); 13078 ump = VFSTOUFS(adp->ad_list.wk_mp); 13079 LOCK_OWNED(ump); 13080 TAILQ_FOREACH(adp, listhead, ad_next) { 13081 newblk = (struct newblk *)adp; 13082 if (newblk->nb_jnewblk != NULL) { 13083 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 13084 return (1); 13085 } 13086 if (newblk->nb_state & DEPCOMPLETE) 13087 continue; 13088 bp = newblk->nb_bmsafemap->sm_buf; 13089 bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor); 13090 if (bp == NULL) { 13091 if (waitfor == MNT_NOWAIT) 13092 continue; 13093 return (1); 13094 } 13095 FREE_LOCK(ump); 13096 if (waitfor == MNT_NOWAIT) 13097 bawrite(bp); 13098 else 13099 *errorp = bwrite(bp); 13100 ACQUIRE_LOCK(ump); 13101 return (1); 13102 } 13103 return (0); 13104 } 13105 13106 /* 13107 * Flush dependencies associated with an allocdirect block. 13108 */ 13109 static int 13110 flush_newblk_dep(vp, mp, lbn) 13111 struct vnode *vp; 13112 struct mount *mp; 13113 ufs_lbn_t lbn; 13114 { 13115 struct newblk *newblk; 13116 struct ufsmount *ump; 13117 struct bufobj *bo; 13118 struct inode *ip; 13119 struct buf *bp; 13120 ufs2_daddr_t blkno; 13121 int error; 13122 13123 error = 0; 13124 bo = &vp->v_bufobj; 13125 ip = VTOI(vp); 13126 blkno = DIP(ip, i_db[lbn]); 13127 if (blkno == 0) 13128 panic("flush_newblk_dep: Missing block"); 13129 ump = VFSTOUFS(mp); 13130 ACQUIRE_LOCK(ump); 13131 /* 13132 * Loop until all dependencies related to this block are satisfied. 13133 * We must be careful to restart after each sleep in case a write 13134 * completes some part of this process for us. 13135 */ 13136 for (;;) { 13137 if (newblk_lookup(mp, blkno, 0, &newblk) == 0) { 13138 FREE_LOCK(ump); 13139 break; 13140 } 13141 if (newblk->nb_list.wk_type != D_ALLOCDIRECT) 13142 panic("flush_newblk_dep: Bad newblk %p", newblk); 13143 /* 13144 * Flush the journal. 13145 */ 13146 if (newblk->nb_jnewblk != NULL) { 13147 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 13148 continue; 13149 } 13150 /* 13151 * Write the bitmap dependency. 13152 */ 13153 if ((newblk->nb_state & DEPCOMPLETE) == 0) { 13154 bp = newblk->nb_bmsafemap->sm_buf; 13155 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 13156 if (bp == NULL) 13157 continue; 13158 FREE_LOCK(ump); 13159 error = bwrite(bp); 13160 if (error) 13161 break; 13162 ACQUIRE_LOCK(ump); 13163 continue; 13164 } 13165 /* 13166 * Write the buffer. 13167 */ 13168 FREE_LOCK(ump); 13169 BO_LOCK(bo); 13170 bp = gbincore(bo, lbn); 13171 if (bp != NULL) { 13172 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 13173 LK_INTERLOCK, BO_LOCKPTR(bo)); 13174 if (error == ENOLCK) { 13175 ACQUIRE_LOCK(ump); 13176 error = 0; 13177 continue; /* Slept, retry */ 13178 } 13179 if (error != 0) 13180 break; /* Failed */ 13181 if (bp->b_flags & B_DELWRI) { 13182 bremfree(bp); 13183 error = bwrite(bp); 13184 if (error) 13185 break; 13186 } else 13187 BUF_UNLOCK(bp); 13188 } else 13189 BO_UNLOCK(bo); 13190 /* 13191 * We have to wait for the direct pointers to 13192 * point at the newdirblk before the dependency 13193 * will go away. 13194 */ 13195 error = ffs_update(vp, 1); 13196 if (error) 13197 break; 13198 ACQUIRE_LOCK(ump); 13199 } 13200 return (error); 13201 } 13202 13203 /* 13204 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 13205 */ 13206 static int 13207 flush_pagedep_deps(pvp, mp, diraddhdp) 13208 struct vnode *pvp; 13209 struct mount *mp; 13210 struct diraddhd *diraddhdp; 13211 { 13212 struct inodedep *inodedep; 13213 struct inoref *inoref; 13214 struct ufsmount *ump; 13215 struct diradd *dap; 13216 struct vnode *vp; 13217 int error = 0; 13218 struct buf *bp; 13219 ino_t inum; 13220 struct diraddhd unfinished; 13221 13222 LIST_INIT(&unfinished); 13223 ump = VFSTOUFS(mp); 13224 LOCK_OWNED(ump); 13225 restart: 13226 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 13227 /* 13228 * Flush ourselves if this directory entry 13229 * has a MKDIR_PARENT dependency. 13230 */ 13231 if (dap->da_state & MKDIR_PARENT) { 13232 FREE_LOCK(ump); 13233 if ((error = ffs_update(pvp, 1)) != 0) 13234 break; 13235 ACQUIRE_LOCK(ump); 13236 /* 13237 * If that cleared dependencies, go on to next. 13238 */ 13239 if (dap != LIST_FIRST(diraddhdp)) 13240 continue; 13241 /* 13242 * All MKDIR_PARENT dependencies and all the 13243 * NEWBLOCK pagedeps that are contained in direct 13244 * blocks were resolved by doing above ffs_update. 13245 * Pagedeps contained in indirect blocks may 13246 * require a complete sync'ing of the directory. 13247 * We are in the midst of doing a complete sync, 13248 * so if they are not resolved in this pass we 13249 * defer them for now as they will be sync'ed by 13250 * our caller shortly. 13251 */ 13252 LIST_REMOVE(dap, da_pdlist); 13253 LIST_INSERT_HEAD(&unfinished, dap, da_pdlist); 13254 continue; 13255 } 13256 /* 13257 * A newly allocated directory must have its "." and 13258 * ".." entries written out before its name can be 13259 * committed in its parent. 13260 */ 13261 inum = dap->da_newinum; 13262 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 13263 panic("flush_pagedep_deps: lost inode1"); 13264 /* 13265 * Wait for any pending journal adds to complete so we don't 13266 * cause rollbacks while syncing. 13267 */ 13268 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 13269 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 13270 == DEPCOMPLETE) { 13271 jwait(&inoref->if_list, MNT_WAIT); 13272 goto restart; 13273 } 13274 } 13275 if (dap->da_state & MKDIR_BODY) { 13276 FREE_LOCK(ump); 13277 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 13278 FFSV_FORCEINSMQ))) 13279 break; 13280 MPASS(VTOI(vp)->i_mode != 0); 13281 error = flush_newblk_dep(vp, mp, 0); 13282 /* 13283 * If we still have the dependency we might need to 13284 * update the vnode to sync the new link count to 13285 * disk. 13286 */ 13287 if (error == 0 && dap == LIST_FIRST(diraddhdp)) 13288 error = ffs_update(vp, 1); 13289 vput(vp); 13290 if (error != 0) 13291 break; 13292 ACQUIRE_LOCK(ump); 13293 /* 13294 * If that cleared dependencies, go on to next. 13295 */ 13296 if (dap != LIST_FIRST(diraddhdp)) 13297 continue; 13298 if (dap->da_state & MKDIR_BODY) { 13299 inodedep_lookup(UFSTOVFS(ump), inum, 0, 13300 &inodedep); 13301 panic("flush_pagedep_deps: MKDIR_BODY " 13302 "inodedep %p dap %p vp %p", 13303 inodedep, dap, vp); 13304 } 13305 } 13306 /* 13307 * Flush the inode on which the directory entry depends. 13308 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 13309 * the only remaining dependency is that the updated inode 13310 * count must get pushed to disk. The inode has already 13311 * been pushed into its inode buffer (via VOP_UPDATE) at 13312 * the time of the reference count change. So we need only 13313 * locate that buffer, ensure that there will be no rollback 13314 * caused by a bitmap dependency, then write the inode buffer. 13315 */ 13316 retry: 13317 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 13318 panic("flush_pagedep_deps: lost inode"); 13319 /* 13320 * If the inode still has bitmap dependencies, 13321 * push them to disk. 13322 */ 13323 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) { 13324 bp = inodedep->id_bmsafemap->sm_buf; 13325 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 13326 if (bp == NULL) 13327 goto retry; 13328 FREE_LOCK(ump); 13329 if ((error = bwrite(bp)) != 0) 13330 break; 13331 ACQUIRE_LOCK(ump); 13332 if (dap != LIST_FIRST(diraddhdp)) 13333 continue; 13334 } 13335 /* 13336 * If the inode is still sitting in a buffer waiting 13337 * to be written or waiting for the link count to be 13338 * adjusted update it here to flush it to disk. 13339 */ 13340 if (dap == LIST_FIRST(diraddhdp)) { 13341 FREE_LOCK(ump); 13342 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 13343 FFSV_FORCEINSMQ))) 13344 break; 13345 MPASS(VTOI(vp)->i_mode != 0); 13346 error = ffs_update(vp, 1); 13347 vput(vp); 13348 if (error) 13349 break; 13350 ACQUIRE_LOCK(ump); 13351 } 13352 /* 13353 * If we have failed to get rid of all the dependencies 13354 * then something is seriously wrong. 13355 */ 13356 if (dap == LIST_FIRST(diraddhdp)) { 13357 inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); 13358 panic("flush_pagedep_deps: failed to flush " 13359 "inodedep %p ino %ju dap %p", 13360 inodedep, (uintmax_t)inum, dap); 13361 } 13362 } 13363 if (error) 13364 ACQUIRE_LOCK(ump); 13365 while ((dap = LIST_FIRST(&unfinished)) != NULL) { 13366 LIST_REMOVE(dap, da_pdlist); 13367 LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist); 13368 } 13369 return (error); 13370 } 13371 13372 /* 13373 * A large burst of file addition or deletion activity can drive the 13374 * memory load excessively high. First attempt to slow things down 13375 * using the techniques below. If that fails, this routine requests 13376 * the offending operations to fall back to running synchronously 13377 * until the memory load returns to a reasonable level. 13378 */ 13379 int 13380 softdep_slowdown(vp) 13381 struct vnode *vp; 13382 { 13383 struct ufsmount *ump; 13384 int jlow; 13385 int max_softdeps_hard; 13386 13387 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 13388 ("softdep_slowdown called on non-softdep filesystem")); 13389 ump = VFSTOUFS(vp->v_mount); 13390 ACQUIRE_LOCK(ump); 13391 jlow = 0; 13392 /* 13393 * Check for journal space if needed. 13394 */ 13395 if (DOINGSUJ(vp)) { 13396 if (journal_space(ump, 0) == 0) 13397 jlow = 1; 13398 } 13399 /* 13400 * If the system is under its limits and our filesystem is 13401 * not responsible for more than our share of the usage and 13402 * we are not low on journal space, then no need to slow down. 13403 */ 13404 max_softdeps_hard = max_softdeps * 11 / 10; 13405 if (dep_current[D_DIRREM] < max_softdeps_hard / 2 && 13406 dep_current[D_INODEDEP] < max_softdeps_hard && 13407 dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 && 13408 dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 && 13409 ump->softdep_curdeps[D_DIRREM] < 13410 (max_softdeps_hard / 2) / stat_flush_threads && 13411 ump->softdep_curdeps[D_INODEDEP] < 13412 max_softdeps_hard / stat_flush_threads && 13413 ump->softdep_curdeps[D_INDIRDEP] < 13414 (max_softdeps_hard / 1000) / stat_flush_threads && 13415 ump->softdep_curdeps[D_FREEBLKS] < 13416 max_softdeps_hard / stat_flush_threads) { 13417 FREE_LOCK(ump); 13418 return (0); 13419 } 13420 /* 13421 * If the journal is low or our filesystem is over its limit 13422 * then speedup the cleanup. 13423 */ 13424 if (ump->softdep_curdeps[D_INDIRDEP] < 13425 (max_softdeps_hard / 1000) / stat_flush_threads || jlow) 13426 softdep_speedup(ump); 13427 stat_sync_limit_hit += 1; 13428 FREE_LOCK(ump); 13429 /* 13430 * We only slow down the rate at which new dependencies are 13431 * generated if we are not using journaling. With journaling, 13432 * the cleanup should always be sufficient to keep things 13433 * under control. 13434 */ 13435 if (DOINGSUJ(vp)) 13436 return (0); 13437 return (1); 13438 } 13439 13440 /* 13441 * Called by the allocation routines when they are about to fail 13442 * in the hope that we can free up the requested resource (inodes 13443 * or disk space). 13444 * 13445 * First check to see if the work list has anything on it. If it has, 13446 * clean up entries until we successfully free the requested resource. 13447 * Because this process holds inodes locked, we cannot handle any remove 13448 * requests that might block on a locked inode as that could lead to 13449 * deadlock. If the worklist yields none of the requested resource, 13450 * start syncing out vnodes to free up the needed space. 13451 */ 13452 int 13453 softdep_request_cleanup(fs, vp, cred, resource) 13454 struct fs *fs; 13455 struct vnode *vp; 13456 struct ucred *cred; 13457 int resource; 13458 { 13459 struct ufsmount *ump; 13460 struct mount *mp; 13461 long starttime; 13462 ufs2_daddr_t needed; 13463 int error, failed_vnode; 13464 13465 /* 13466 * If we are being called because of a process doing a 13467 * copy-on-write, then it is not safe to process any 13468 * worklist items as we will recurse into the copyonwrite 13469 * routine. This will result in an incoherent snapshot. 13470 * If the vnode that we hold is a snapshot, we must avoid 13471 * handling other resources that could cause deadlock. 13472 */ 13473 if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp))) 13474 return (0); 13475 13476 if (resource == FLUSH_BLOCKS_WAIT) 13477 stat_cleanup_blkrequests += 1; 13478 else 13479 stat_cleanup_inorequests += 1; 13480 13481 mp = vp->v_mount; 13482 ump = VFSTOUFS(mp); 13483 mtx_assert(UFS_MTX(ump), MA_OWNED); 13484 UFS_UNLOCK(ump); 13485 error = ffs_update(vp, 1); 13486 if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) { 13487 UFS_LOCK(ump); 13488 return (0); 13489 } 13490 /* 13491 * If we are in need of resources, start by cleaning up 13492 * any block removals associated with our inode. 13493 */ 13494 ACQUIRE_LOCK(ump); 13495 process_removes(vp); 13496 process_truncates(vp); 13497 FREE_LOCK(ump); 13498 /* 13499 * Now clean up at least as many resources as we will need. 13500 * 13501 * When requested to clean up inodes, the number that are needed 13502 * is set by the number of simultaneous writers (mnt_writeopcount) 13503 * plus a bit of slop (2) in case some more writers show up while 13504 * we are cleaning. 13505 * 13506 * When requested to free up space, the amount of space that 13507 * we need is enough blocks to allocate a full-sized segment 13508 * (fs_contigsumsize). The number of such segments that will 13509 * be needed is set by the number of simultaneous writers 13510 * (mnt_writeopcount) plus a bit of slop (2) in case some more 13511 * writers show up while we are cleaning. 13512 * 13513 * Additionally, if we are unpriviledged and allocating space, 13514 * we need to ensure that we clean up enough blocks to get the 13515 * needed number of blocks over the threshold of the minimum 13516 * number of blocks required to be kept free by the filesystem 13517 * (fs_minfree). 13518 */ 13519 if (resource == FLUSH_INODES_WAIT) { 13520 needed = vfs_mount_fetch_counter(vp->v_mount, 13521 MNT_COUNT_WRITEOPCOUNT) + 2; 13522 } else if (resource == FLUSH_BLOCKS_WAIT) { 13523 needed = (vfs_mount_fetch_counter(vp->v_mount, 13524 MNT_COUNT_WRITEOPCOUNT) + 2) * fs->fs_contigsumsize; 13525 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE)) 13526 needed += fragstoblks(fs, 13527 roundup((fs->fs_dsize * fs->fs_minfree / 100) - 13528 fs->fs_cstotal.cs_nffree, fs->fs_frag)); 13529 } else { 13530 printf("softdep_request_cleanup: Unknown resource type %d\n", 13531 resource); 13532 UFS_LOCK(ump); 13533 return (0); 13534 } 13535 starttime = time_second; 13536 retry: 13537 if (resource == FLUSH_BLOCKS_WAIT && 13538 fs->fs_cstotal.cs_nbfree <= needed) 13539 softdep_send_speedup(ump, needed * fs->fs_bsize, 13540 BIO_SPEEDUP_TRIM); 13541 if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 && 13542 fs->fs_cstotal.cs_nbfree <= needed) || 13543 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13544 fs->fs_cstotal.cs_nifree <= needed)) { 13545 ACQUIRE_LOCK(ump); 13546 if (ump->softdep_on_worklist > 0 && 13547 process_worklist_item(UFSTOVFS(ump), 13548 ump->softdep_on_worklist, LK_NOWAIT) != 0) 13549 stat_worklist_push += 1; 13550 FREE_LOCK(ump); 13551 } 13552 /* 13553 * If we still need resources and there are no more worklist 13554 * entries to process to obtain them, we have to start flushing 13555 * the dirty vnodes to force the release of additional requests 13556 * to the worklist that we can then process to reap addition 13557 * resources. We walk the vnodes associated with the mount point 13558 * until we get the needed worklist requests that we can reap. 13559 * 13560 * If there are several threads all needing to clean the same 13561 * mount point, only one is allowed to walk the mount list. 13562 * When several threads all try to walk the same mount list, 13563 * they end up competing with each other and often end up in 13564 * livelock. This approach ensures that forward progress is 13565 * made at the cost of occational ENOSPC errors being returned 13566 * that might otherwise have been avoided. 13567 */ 13568 error = 1; 13569 if ((resource == FLUSH_BLOCKS_WAIT && 13570 fs->fs_cstotal.cs_nbfree <= needed) || 13571 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13572 fs->fs_cstotal.cs_nifree <= needed)) { 13573 ACQUIRE_LOCK(ump); 13574 if ((ump->um_softdep->sd_flags & FLUSH_RC_ACTIVE) == 0) { 13575 ump->um_softdep->sd_flags |= FLUSH_RC_ACTIVE; 13576 FREE_LOCK(ump); 13577 failed_vnode = softdep_request_cleanup_flush(mp, ump); 13578 ACQUIRE_LOCK(ump); 13579 ump->um_softdep->sd_flags &= ~FLUSH_RC_ACTIVE; 13580 FREE_LOCK(ump); 13581 if (ump->softdep_on_worklist > 0) { 13582 stat_cleanup_retries += 1; 13583 if (!failed_vnode) 13584 goto retry; 13585 } 13586 } else { 13587 FREE_LOCK(ump); 13588 error = 0; 13589 } 13590 stat_cleanup_failures += 1; 13591 } 13592 if (time_second - starttime > stat_cleanup_high_delay) 13593 stat_cleanup_high_delay = time_second - starttime; 13594 UFS_LOCK(ump); 13595 return (error); 13596 } 13597 13598 /* 13599 * Scan the vnodes for the specified mount point flushing out any 13600 * vnodes that can be locked without waiting. Finally, try to flush 13601 * the device associated with the mount point if it can be locked 13602 * without waiting. 13603 * 13604 * We return 0 if we were able to lock every vnode in our scan. 13605 * If we had to skip one or more vnodes, we return 1. 13606 */ 13607 static int 13608 softdep_request_cleanup_flush(mp, ump) 13609 struct mount *mp; 13610 struct ufsmount *ump; 13611 { 13612 struct thread *td; 13613 struct vnode *lvp, *mvp; 13614 int failed_vnode; 13615 13616 failed_vnode = 0; 13617 td = curthread; 13618 MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { 13619 if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { 13620 VI_UNLOCK(lvp); 13621 continue; 13622 } 13623 if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, 13624 td) != 0) { 13625 failed_vnode = 1; 13626 continue; 13627 } 13628 if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ 13629 vput(lvp); 13630 continue; 13631 } 13632 (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); 13633 vput(lvp); 13634 } 13635 lvp = ump->um_devvp; 13636 if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 13637 VOP_FSYNC(lvp, MNT_NOWAIT, td); 13638 VOP_UNLOCK(lvp); 13639 } 13640 return (failed_vnode); 13641 } 13642 13643 static bool 13644 softdep_excess_items(struct ufsmount *ump, int item) 13645 { 13646 13647 KASSERT(item >= 0 && item < D_LAST, ("item %d", item)); 13648 return (dep_current[item] > max_softdeps && 13649 ump->softdep_curdeps[item] > max_softdeps / 13650 stat_flush_threads); 13651 } 13652 13653 static void 13654 schedule_cleanup(struct mount *mp) 13655 { 13656 struct ufsmount *ump; 13657 struct thread *td; 13658 13659 ump = VFSTOUFS(mp); 13660 LOCK_OWNED(ump); 13661 FREE_LOCK(ump); 13662 td = curthread; 13663 if ((td->td_pflags & TDP_KTHREAD) != 0 && 13664 (td->td_proc->p_flag2 & P2_AST_SU) == 0) { 13665 /* 13666 * No ast is delivered to kernel threads, so nobody 13667 * would deref the mp. Some kernel threads 13668 * explicitely check for AST, e.g. NFS daemon does 13669 * this in the serving loop. 13670 */ 13671 return; 13672 } 13673 if (td->td_su != NULL) 13674 vfs_rel(td->td_su); 13675 vfs_ref(mp); 13676 td->td_su = mp; 13677 thread_lock(td); 13678 td->td_flags |= TDF_ASTPENDING; 13679 thread_unlock(td); 13680 } 13681 13682 static void 13683 softdep_ast_cleanup_proc(struct thread *td) 13684 { 13685 struct mount *mp; 13686 struct ufsmount *ump; 13687 int error; 13688 bool req; 13689 13690 while ((mp = td->td_su) != NULL) { 13691 td->td_su = NULL; 13692 error = vfs_busy(mp, MBF_NOWAIT); 13693 vfs_rel(mp); 13694 if (error != 0) 13695 return; 13696 if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) { 13697 ump = VFSTOUFS(mp); 13698 for (;;) { 13699 req = false; 13700 ACQUIRE_LOCK(ump); 13701 if (softdep_excess_items(ump, D_INODEDEP)) { 13702 req = true; 13703 request_cleanup(mp, FLUSH_INODES); 13704 } 13705 if (softdep_excess_items(ump, D_DIRREM)) { 13706 req = true; 13707 request_cleanup(mp, FLUSH_BLOCKS); 13708 } 13709 FREE_LOCK(ump); 13710 if (softdep_excess_items(ump, D_NEWBLK) || 13711 softdep_excess_items(ump, D_ALLOCDIRECT) || 13712 softdep_excess_items(ump, D_ALLOCINDIR)) { 13713 error = vn_start_write(NULL, &mp, 13714 V_WAIT); 13715 if (error == 0) { 13716 req = true; 13717 VFS_SYNC(mp, MNT_WAIT); 13718 vn_finished_write(mp); 13719 } 13720 } 13721 if ((td->td_pflags & TDP_KTHREAD) != 0 || !req) 13722 break; 13723 } 13724 } 13725 vfs_unbusy(mp); 13726 } 13727 if ((mp = td->td_su) != NULL) { 13728 td->td_su = NULL; 13729 vfs_rel(mp); 13730 } 13731 } 13732 13733 /* 13734 * If memory utilization has gotten too high, deliberately slow things 13735 * down and speed up the I/O processing. 13736 */ 13737 static int 13738 request_cleanup(mp, resource) 13739 struct mount *mp; 13740 int resource; 13741 { 13742 struct thread *td = curthread; 13743 struct ufsmount *ump; 13744 13745 ump = VFSTOUFS(mp); 13746 LOCK_OWNED(ump); 13747 /* 13748 * We never hold up the filesystem syncer or buf daemon. 13749 */ 13750 if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF)) 13751 return (0); 13752 /* 13753 * First check to see if the work list has gotten backlogged. 13754 * If it has, co-opt this process to help clean up two entries. 13755 * Because this process may hold inodes locked, we cannot 13756 * handle any remove requests that might block on a locked 13757 * inode as that could lead to deadlock. We set TDP_SOFTDEP 13758 * to avoid recursively processing the worklist. 13759 */ 13760 if (ump->softdep_on_worklist > max_softdeps / 10) { 13761 td->td_pflags |= TDP_SOFTDEP; 13762 process_worklist_item(mp, 2, LK_NOWAIT); 13763 td->td_pflags &= ~TDP_SOFTDEP; 13764 stat_worklist_push += 2; 13765 return(1); 13766 } 13767 /* 13768 * Next, we attempt to speed up the syncer process. If that 13769 * is successful, then we allow the process to continue. 13770 */ 13771 if (softdep_speedup(ump) && 13772 resource != FLUSH_BLOCKS_WAIT && 13773 resource != FLUSH_INODES_WAIT) 13774 return(0); 13775 /* 13776 * If we are resource constrained on inode dependencies, try 13777 * flushing some dirty inodes. Otherwise, we are constrained 13778 * by file deletions, so try accelerating flushes of directories 13779 * with removal dependencies. We would like to do the cleanup 13780 * here, but we probably hold an inode locked at this point and 13781 * that might deadlock against one that we try to clean. So, 13782 * the best that we can do is request the syncer daemon to do 13783 * the cleanup for us. 13784 */ 13785 switch (resource) { 13786 13787 case FLUSH_INODES: 13788 case FLUSH_INODES_WAIT: 13789 ACQUIRE_GBLLOCK(&lk); 13790 stat_ino_limit_push += 1; 13791 req_clear_inodedeps += 1; 13792 FREE_GBLLOCK(&lk); 13793 stat_countp = &stat_ino_limit_hit; 13794 break; 13795 13796 case FLUSH_BLOCKS: 13797 case FLUSH_BLOCKS_WAIT: 13798 ACQUIRE_GBLLOCK(&lk); 13799 stat_blk_limit_push += 1; 13800 req_clear_remove += 1; 13801 FREE_GBLLOCK(&lk); 13802 stat_countp = &stat_blk_limit_hit; 13803 break; 13804 13805 default: 13806 panic("request_cleanup: unknown type"); 13807 } 13808 /* 13809 * Hopefully the syncer daemon will catch up and awaken us. 13810 * We wait at most tickdelay before proceeding in any case. 13811 */ 13812 ACQUIRE_GBLLOCK(&lk); 13813 FREE_LOCK(ump); 13814 proc_waiting += 1; 13815 if (callout_pending(&softdep_callout) == FALSE) 13816 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, 13817 pause_timer, 0); 13818 13819 if ((td->td_pflags & TDP_KTHREAD) == 0) 13820 msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); 13821 proc_waiting -= 1; 13822 FREE_GBLLOCK(&lk); 13823 ACQUIRE_LOCK(ump); 13824 return (1); 13825 } 13826 13827 /* 13828 * Awaken processes pausing in request_cleanup and clear proc_waiting 13829 * to indicate that there is no longer a timer running. Pause_timer 13830 * will be called with the global softdep mutex (&lk) locked. 13831 */ 13832 static void 13833 pause_timer(arg) 13834 void *arg; 13835 { 13836 13837 GBLLOCK_OWNED(&lk); 13838 /* 13839 * The callout_ API has acquired mtx and will hold it around this 13840 * function call. 13841 */ 13842 *stat_countp += proc_waiting; 13843 wakeup(&proc_waiting); 13844 } 13845 13846 /* 13847 * If requested, try removing inode or removal dependencies. 13848 */ 13849 static void 13850 check_clear_deps(mp) 13851 struct mount *mp; 13852 { 13853 struct ufsmount *ump; 13854 bool suj_susp; 13855 13856 /* 13857 * Tell the lower layers that any TRIM or WRITE transactions that have 13858 * been delayed for performance reasons should proceed to help alleviate 13859 * the shortage faster. The race between checking req_* and the softdep 13860 * mutex (lk) is fine since this is an advisory operation that at most 13861 * causes deferred work to be done sooner. 13862 */ 13863 ump = VFSTOUFS(mp); 13864 suj_susp = MOUNTEDSUJ(mp) && ump->softdep_jblocks->jb_suspended; 13865 if (req_clear_remove || req_clear_inodedeps || suj_susp) { 13866 FREE_LOCK(ump); 13867 softdep_send_speedup(ump, 0, BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE); 13868 ACQUIRE_LOCK(ump); 13869 } 13870 13871 /* 13872 * If we are suspended, it may be because of our using 13873 * too many inodedeps, so help clear them out. 13874 */ 13875 if (suj_susp) 13876 clear_inodedeps(mp); 13877 13878 /* 13879 * General requests for cleanup of backed up dependencies 13880 */ 13881 ACQUIRE_GBLLOCK(&lk); 13882 if (req_clear_inodedeps) { 13883 req_clear_inodedeps -= 1; 13884 FREE_GBLLOCK(&lk); 13885 clear_inodedeps(mp); 13886 ACQUIRE_GBLLOCK(&lk); 13887 wakeup(&proc_waiting); 13888 } 13889 if (req_clear_remove) { 13890 req_clear_remove -= 1; 13891 FREE_GBLLOCK(&lk); 13892 clear_remove(mp); 13893 ACQUIRE_GBLLOCK(&lk); 13894 wakeup(&proc_waiting); 13895 } 13896 FREE_GBLLOCK(&lk); 13897 } 13898 13899 /* 13900 * Flush out a directory with at least one removal dependency in an effort to 13901 * reduce the number of dirrem, freefile, and freeblks dependency structures. 13902 */ 13903 static void 13904 clear_remove(mp) 13905 struct mount *mp; 13906 { 13907 struct pagedep_hashhead *pagedephd; 13908 struct pagedep *pagedep; 13909 struct ufsmount *ump; 13910 struct vnode *vp; 13911 struct bufobj *bo; 13912 int error, cnt; 13913 ino_t ino; 13914 13915 ump = VFSTOUFS(mp); 13916 LOCK_OWNED(ump); 13917 13918 for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) { 13919 pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++]; 13920 if (ump->pagedep_nextclean > ump->pagedep_hash_size) 13921 ump->pagedep_nextclean = 0; 13922 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 13923 if (LIST_EMPTY(&pagedep->pd_dirremhd)) 13924 continue; 13925 ino = pagedep->pd_ino; 13926 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13927 continue; 13928 FREE_LOCK(ump); 13929 13930 /* 13931 * Let unmount clear deps 13932 */ 13933 error = vfs_busy(mp, MBF_NOWAIT); 13934 if (error != 0) 13935 goto finish_write; 13936 error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13937 FFSV_FORCEINSMQ); 13938 vfs_unbusy(mp); 13939 if (error != 0) { 13940 softdep_error("clear_remove: vget", error); 13941 goto finish_write; 13942 } 13943 MPASS(VTOI(vp)->i_mode != 0); 13944 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13945 softdep_error("clear_remove: fsync", error); 13946 bo = &vp->v_bufobj; 13947 BO_LOCK(bo); 13948 drain_output(vp); 13949 BO_UNLOCK(bo); 13950 vput(vp); 13951 finish_write: 13952 vn_finished_write(mp); 13953 ACQUIRE_LOCK(ump); 13954 return; 13955 } 13956 } 13957 } 13958 13959 /* 13960 * Clear out a block of dirty inodes in an effort to reduce 13961 * the number of inodedep dependency structures. 13962 */ 13963 static void 13964 clear_inodedeps(mp) 13965 struct mount *mp; 13966 { 13967 struct inodedep_hashhead *inodedephd; 13968 struct inodedep *inodedep; 13969 struct ufsmount *ump; 13970 struct vnode *vp; 13971 struct fs *fs; 13972 int error, cnt; 13973 ino_t firstino, lastino, ino; 13974 13975 ump = VFSTOUFS(mp); 13976 fs = ump->um_fs; 13977 LOCK_OWNED(ump); 13978 /* 13979 * Pick a random inode dependency to be cleared. 13980 * We will then gather up all the inodes in its block 13981 * that have dependencies and flush them out. 13982 */ 13983 for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) { 13984 inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++]; 13985 if (ump->inodedep_nextclean > ump->inodedep_hash_size) 13986 ump->inodedep_nextclean = 0; 13987 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 13988 break; 13989 } 13990 if (inodedep == NULL) 13991 return; 13992 /* 13993 * Find the last inode in the block with dependencies. 13994 */ 13995 firstino = rounddown2(inodedep->id_ino, INOPB(fs)); 13996 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 13997 if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0) 13998 break; 13999 /* 14000 * Asynchronously push all but the last inode with dependencies. 14001 * Synchronously push the last inode with dependencies to ensure 14002 * that the inode block gets written to free up the inodedeps. 14003 */ 14004 for (ino = firstino; ino <= lastino; ino++) { 14005 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 14006 continue; 14007 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 14008 continue; 14009 FREE_LOCK(ump); 14010 error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */ 14011 if (error != 0) { 14012 vn_finished_write(mp); 14013 ACQUIRE_LOCK(ump); 14014 return; 14015 } 14016 if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 14017 FFSV_FORCEINSMQ)) != 0) { 14018 softdep_error("clear_inodedeps: vget", error); 14019 vfs_unbusy(mp); 14020 vn_finished_write(mp); 14021 ACQUIRE_LOCK(ump); 14022 return; 14023 } 14024 vfs_unbusy(mp); 14025 if (VTOI(vp)->i_mode == 0) { 14026 vgone(vp); 14027 } else if (ino == lastino) { 14028 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0))) 14029 softdep_error("clear_inodedeps: fsync1", error); 14030 } else { 14031 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 14032 softdep_error("clear_inodedeps: fsync2", error); 14033 BO_LOCK(&vp->v_bufobj); 14034 drain_output(vp); 14035 BO_UNLOCK(&vp->v_bufobj); 14036 } 14037 vput(vp); 14038 vn_finished_write(mp); 14039 ACQUIRE_LOCK(ump); 14040 } 14041 } 14042 14043 void 14044 softdep_buf_append(bp, wkhd) 14045 struct buf *bp; 14046 struct workhead *wkhd; 14047 { 14048 struct worklist *wk; 14049 struct ufsmount *ump; 14050 14051 if ((wk = LIST_FIRST(wkhd)) == NULL) 14052 return; 14053 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 14054 ("softdep_buf_append called on non-softdep filesystem")); 14055 ump = VFSTOUFS(wk->wk_mp); 14056 ACQUIRE_LOCK(ump); 14057 while ((wk = LIST_FIRST(wkhd)) != NULL) { 14058 WORKLIST_REMOVE(wk); 14059 WORKLIST_INSERT(&bp->b_dep, wk); 14060 } 14061 FREE_LOCK(ump); 14062 14063 } 14064 14065 void 14066 softdep_inode_append(ip, cred, wkhd) 14067 struct inode *ip; 14068 struct ucred *cred; 14069 struct workhead *wkhd; 14070 { 14071 struct buf *bp; 14072 struct fs *fs; 14073 struct ufsmount *ump; 14074 int error; 14075 14076 ump = ITOUMP(ip); 14077 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 14078 ("softdep_inode_append called on non-softdep filesystem")); 14079 fs = ump->um_fs; 14080 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 14081 (int)fs->fs_bsize, cred, &bp); 14082 if (error) { 14083 bqrelse(bp); 14084 softdep_freework(wkhd); 14085 return; 14086 } 14087 softdep_buf_append(bp, wkhd); 14088 bqrelse(bp); 14089 } 14090 14091 void 14092 softdep_freework(wkhd) 14093 struct workhead *wkhd; 14094 { 14095 struct worklist *wk; 14096 struct ufsmount *ump; 14097 14098 if ((wk = LIST_FIRST(wkhd)) == NULL) 14099 return; 14100 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 14101 ("softdep_freework called on non-softdep filesystem")); 14102 ump = VFSTOUFS(wk->wk_mp); 14103 ACQUIRE_LOCK(ump); 14104 handle_jwork(wkhd); 14105 FREE_LOCK(ump); 14106 } 14107 14108 static struct ufsmount * 14109 softdep_bp_to_mp(bp) 14110 struct buf *bp; 14111 { 14112 struct mount *mp; 14113 struct vnode *vp; 14114 14115 if (LIST_EMPTY(&bp->b_dep)) 14116 return (NULL); 14117 vp = bp->b_vp; 14118 KASSERT(vp != NULL, 14119 ("%s, buffer with dependencies lacks vnode", __func__)); 14120 14121 /* 14122 * The ump mount point is stable after we get a correct 14123 * pointer, since bp is locked and this prevents unmount from 14124 * proceeding. But to get to it, we cannot dereference bp->b_dep 14125 * head wk_mp, because we do not yet own SU ump lock and 14126 * workitem might be freed while dereferenced. 14127 */ 14128 retry: 14129 switch (vp->v_type) { 14130 case VCHR: 14131 VI_LOCK(vp); 14132 mp = vp->v_type == VCHR ? vp->v_rdev->si_mountpt : NULL; 14133 VI_UNLOCK(vp); 14134 if (mp == NULL) 14135 goto retry; 14136 break; 14137 case VREG: 14138 case VDIR: 14139 case VLNK: 14140 case VFIFO: 14141 case VSOCK: 14142 mp = vp->v_mount; 14143 break; 14144 case VBLK: 14145 vn_printf(vp, "softdep_bp_to_mp: unexpected block device\n"); 14146 /* FALLTHROUGH */ 14147 case VNON: 14148 case VBAD: 14149 case VMARKER: 14150 mp = NULL; 14151 break; 14152 default: 14153 vn_printf(vp, "unknown vnode type"); 14154 mp = NULL; 14155 break; 14156 } 14157 return (VFSTOUFS(mp)); 14158 } 14159 14160 /* 14161 * Function to determine if the buffer has outstanding dependencies 14162 * that will cause a roll-back if the buffer is written. If wantcount 14163 * is set, return number of dependencies, otherwise just yes or no. 14164 */ 14165 static int 14166 softdep_count_dependencies(bp, wantcount) 14167 struct buf *bp; 14168 int wantcount; 14169 { 14170 struct worklist *wk; 14171 struct ufsmount *ump; 14172 struct bmsafemap *bmsafemap; 14173 struct freework *freework; 14174 struct inodedep *inodedep; 14175 struct indirdep *indirdep; 14176 struct freeblks *freeblks; 14177 struct allocindir *aip; 14178 struct pagedep *pagedep; 14179 struct dirrem *dirrem; 14180 struct newblk *newblk; 14181 struct mkdir *mkdir; 14182 struct diradd *dap; 14183 int i, retval; 14184 14185 ump = softdep_bp_to_mp(bp); 14186 if (ump == NULL) 14187 return (0); 14188 retval = 0; 14189 ACQUIRE_LOCK(ump); 14190 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 14191 switch (wk->wk_type) { 14192 14193 case D_INODEDEP: 14194 inodedep = WK_INODEDEP(wk); 14195 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 14196 /* bitmap allocation dependency */ 14197 retval += 1; 14198 if (!wantcount) 14199 goto out; 14200 } 14201 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 14202 /* direct block pointer dependency */ 14203 retval += 1; 14204 if (!wantcount) 14205 goto out; 14206 } 14207 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 14208 /* direct block pointer dependency */ 14209 retval += 1; 14210 if (!wantcount) 14211 goto out; 14212 } 14213 if (TAILQ_FIRST(&inodedep->id_inoreflst)) { 14214 /* Add reference dependency. */ 14215 retval += 1; 14216 if (!wantcount) 14217 goto out; 14218 } 14219 continue; 14220 14221 case D_INDIRDEP: 14222 indirdep = WK_INDIRDEP(wk); 14223 14224 TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) { 14225 /* indirect truncation dependency */ 14226 retval += 1; 14227 if (!wantcount) 14228 goto out; 14229 } 14230 14231 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 14232 /* indirect block pointer dependency */ 14233 retval += 1; 14234 if (!wantcount) 14235 goto out; 14236 } 14237 continue; 14238 14239 case D_PAGEDEP: 14240 pagedep = WK_PAGEDEP(wk); 14241 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 14242 if (LIST_FIRST(&dirrem->dm_jremrefhd)) { 14243 /* Journal remove ref dependency. */ 14244 retval += 1; 14245 if (!wantcount) 14246 goto out; 14247 } 14248 } 14249 for (i = 0; i < DAHASHSZ; i++) { 14250 14251 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 14252 /* directory entry dependency */ 14253 retval += 1; 14254 if (!wantcount) 14255 goto out; 14256 } 14257 } 14258 continue; 14259 14260 case D_BMSAFEMAP: 14261 bmsafemap = WK_BMSAFEMAP(wk); 14262 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) { 14263 /* Add reference dependency. */ 14264 retval += 1; 14265 if (!wantcount) 14266 goto out; 14267 } 14268 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) { 14269 /* Allocate block dependency. */ 14270 retval += 1; 14271 if (!wantcount) 14272 goto out; 14273 } 14274 continue; 14275 14276 case D_FREEBLKS: 14277 freeblks = WK_FREEBLKS(wk); 14278 if (LIST_FIRST(&freeblks->fb_jblkdephd)) { 14279 /* Freeblk journal dependency. */ 14280 retval += 1; 14281 if (!wantcount) 14282 goto out; 14283 } 14284 continue; 14285 14286 case D_ALLOCDIRECT: 14287 case D_ALLOCINDIR: 14288 newblk = WK_NEWBLK(wk); 14289 if (newblk->nb_jnewblk) { 14290 /* Journal allocate dependency. */ 14291 retval += 1; 14292 if (!wantcount) 14293 goto out; 14294 } 14295 continue; 14296 14297 case D_MKDIR: 14298 mkdir = WK_MKDIR(wk); 14299 if (mkdir->md_jaddref) { 14300 /* Journal reference dependency. */ 14301 retval += 1; 14302 if (!wantcount) 14303 goto out; 14304 } 14305 continue; 14306 14307 case D_FREEWORK: 14308 case D_FREEDEP: 14309 case D_JSEGDEP: 14310 case D_JSEG: 14311 case D_SBDEP: 14312 /* never a dependency on these blocks */ 14313 continue; 14314 14315 default: 14316 panic("softdep_count_dependencies: Unexpected type %s", 14317 TYPENAME(wk->wk_type)); 14318 /* NOTREACHED */ 14319 } 14320 } 14321 out: 14322 FREE_LOCK(ump); 14323 return (retval); 14324 } 14325 14326 /* 14327 * Acquire exclusive access to a buffer. 14328 * Must be called with a locked mtx parameter. 14329 * Return acquired buffer or NULL on failure. 14330 */ 14331 static struct buf * 14332 getdirtybuf(bp, lock, waitfor) 14333 struct buf *bp; 14334 struct rwlock *lock; 14335 int waitfor; 14336 { 14337 int error; 14338 14339 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) { 14340 if (waitfor != MNT_WAIT) 14341 return (NULL); 14342 error = BUF_LOCK(bp, 14343 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock); 14344 /* 14345 * Even if we successfully acquire bp here, we have dropped 14346 * lock, which may violates our guarantee. 14347 */ 14348 if (error == 0) 14349 BUF_UNLOCK(bp); 14350 else if (error != ENOLCK) 14351 panic("getdirtybuf: inconsistent lock: %d", error); 14352 rw_wlock(lock); 14353 return (NULL); 14354 } 14355 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 14356 if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) { 14357 rw_wunlock(lock); 14358 BO_LOCK(bp->b_bufobj); 14359 BUF_UNLOCK(bp); 14360 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 14361 bp->b_vflags |= BV_BKGRDWAIT; 14362 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), 14363 PRIBIO | PDROP, "getbuf", 0); 14364 } else 14365 BO_UNLOCK(bp->b_bufobj); 14366 rw_wlock(lock); 14367 return (NULL); 14368 } 14369 BUF_UNLOCK(bp); 14370 if (waitfor != MNT_WAIT) 14371 return (NULL); 14372 #ifdef DEBUG_VFS_LOCKS 14373 if (bp->b_vp->v_type != VCHR) 14374 ASSERT_BO_WLOCKED(bp->b_bufobj); 14375 #endif 14376 bp->b_vflags |= BV_BKGRDWAIT; 14377 rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0); 14378 return (NULL); 14379 } 14380 if ((bp->b_flags & B_DELWRI) == 0) { 14381 BUF_UNLOCK(bp); 14382 return (NULL); 14383 } 14384 bremfree(bp); 14385 return (bp); 14386 } 14387 14388 14389 /* 14390 * Check if it is safe to suspend the file system now. On entry, 14391 * the vnode interlock for devvp should be held. Return 0 with 14392 * the mount interlock held if the file system can be suspended now, 14393 * otherwise return EAGAIN with the mount interlock held. 14394 */ 14395 int 14396 softdep_check_suspend(struct mount *mp, 14397 struct vnode *devvp, 14398 int softdep_depcnt, 14399 int softdep_accdepcnt, 14400 int secondary_writes, 14401 int secondary_accwrites) 14402 { 14403 struct bufobj *bo; 14404 struct ufsmount *ump; 14405 struct inodedep *inodedep; 14406 int error, unlinked; 14407 14408 bo = &devvp->v_bufobj; 14409 ASSERT_BO_WLOCKED(bo); 14410 14411 /* 14412 * If we are not running with soft updates, then we need only 14413 * deal with secondary writes as we try to suspend. 14414 */ 14415 if (MOUNTEDSOFTDEP(mp) == 0) { 14416 MNT_ILOCK(mp); 14417 while (mp->mnt_secondary_writes != 0) { 14418 BO_UNLOCK(bo); 14419 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 14420 (PUSER - 1) | PDROP, "secwr", 0); 14421 BO_LOCK(bo); 14422 MNT_ILOCK(mp); 14423 } 14424 14425 /* 14426 * Reasons for needing more work before suspend: 14427 * - Dirty buffers on devvp. 14428 * - Secondary writes occurred after start of vnode sync loop 14429 */ 14430 error = 0; 14431 if (bo->bo_numoutput > 0 || 14432 bo->bo_dirty.bv_cnt > 0 || 14433 secondary_writes != 0 || 14434 mp->mnt_secondary_writes != 0 || 14435 secondary_accwrites != mp->mnt_secondary_accwrites) 14436 error = EAGAIN; 14437 BO_UNLOCK(bo); 14438 return (error); 14439 } 14440 14441 /* 14442 * If we are running with soft updates, then we need to coordinate 14443 * with them as we try to suspend. 14444 */ 14445 ump = VFSTOUFS(mp); 14446 for (;;) { 14447 if (!TRY_ACQUIRE_LOCK(ump)) { 14448 BO_UNLOCK(bo); 14449 ACQUIRE_LOCK(ump); 14450 FREE_LOCK(ump); 14451 BO_LOCK(bo); 14452 continue; 14453 } 14454 MNT_ILOCK(mp); 14455 if (mp->mnt_secondary_writes != 0) { 14456 FREE_LOCK(ump); 14457 BO_UNLOCK(bo); 14458 msleep(&mp->mnt_secondary_writes, 14459 MNT_MTX(mp), 14460 (PUSER - 1) | PDROP, "secwr", 0); 14461 BO_LOCK(bo); 14462 continue; 14463 } 14464 break; 14465 } 14466 14467 unlinked = 0; 14468 if (MOUNTEDSUJ(mp)) { 14469 for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked); 14470 inodedep != NULL; 14471 inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 14472 if ((inodedep->id_state & (UNLINKED | UNLINKLINKS | 14473 UNLINKONLIST)) != (UNLINKED | UNLINKLINKS | 14474 UNLINKONLIST) || 14475 !check_inodedep_free(inodedep)) 14476 continue; 14477 unlinked++; 14478 } 14479 } 14480 14481 /* 14482 * Reasons for needing more work before suspend: 14483 * - Dirty buffers on devvp. 14484 * - Softdep activity occurred after start of vnode sync loop 14485 * - Secondary writes occurred after start of vnode sync loop 14486 */ 14487 error = 0; 14488 if (bo->bo_numoutput > 0 || 14489 bo->bo_dirty.bv_cnt > 0 || 14490 softdep_depcnt != unlinked || 14491 ump->softdep_deps != unlinked || 14492 softdep_accdepcnt != ump->softdep_accdeps || 14493 secondary_writes != 0 || 14494 mp->mnt_secondary_writes != 0 || 14495 secondary_accwrites != mp->mnt_secondary_accwrites) 14496 error = EAGAIN; 14497 FREE_LOCK(ump); 14498 BO_UNLOCK(bo); 14499 return (error); 14500 } 14501 14502 14503 /* 14504 * Get the number of dependency structures for the file system, both 14505 * the current number and the total number allocated. These will 14506 * later be used to detect that softdep processing has occurred. 14507 */ 14508 void 14509 softdep_get_depcounts(struct mount *mp, 14510 int *softdep_depsp, 14511 int *softdep_accdepsp) 14512 { 14513 struct ufsmount *ump; 14514 14515 if (MOUNTEDSOFTDEP(mp) == 0) { 14516 *softdep_depsp = 0; 14517 *softdep_accdepsp = 0; 14518 return; 14519 } 14520 ump = VFSTOUFS(mp); 14521 ACQUIRE_LOCK(ump); 14522 *softdep_depsp = ump->softdep_deps; 14523 *softdep_accdepsp = ump->softdep_accdeps; 14524 FREE_LOCK(ump); 14525 } 14526 14527 /* 14528 * Wait for pending output on a vnode to complete. 14529 */ 14530 static void 14531 drain_output(vp) 14532 struct vnode *vp; 14533 { 14534 14535 ASSERT_VOP_LOCKED(vp, "drain_output"); 14536 (void)bufobj_wwait(&vp->v_bufobj, 0, 0); 14537 } 14538 14539 /* 14540 * Called whenever a buffer that is being invalidated or reallocated 14541 * contains dependencies. This should only happen if an I/O error has 14542 * occurred. The routine is called with the buffer locked. 14543 */ 14544 static void 14545 softdep_deallocate_dependencies(bp) 14546 struct buf *bp; 14547 { 14548 14549 if ((bp->b_ioflags & BIO_ERROR) == 0) 14550 panic("softdep_deallocate_dependencies: dangling deps"); 14551 if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL) 14552 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 14553 else 14554 printf("softdep_deallocate_dependencies: " 14555 "got error %d while accessing filesystem\n", bp->b_error); 14556 if (bp->b_error != ENXIO) 14557 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 14558 } 14559 14560 /* 14561 * Function to handle asynchronous write errors in the filesystem. 14562 */ 14563 static void 14564 softdep_error(func, error) 14565 char *func; 14566 int error; 14567 { 14568 14569 /* XXX should do something better! */ 14570 printf("%s: got error %d while accessing filesystem\n", func, error); 14571 } 14572 14573 #ifdef DDB 14574 14575 /* exported to ffs_vfsops.c */ 14576 extern void db_print_ffs(struct ufsmount *ump); 14577 void 14578 db_print_ffs(struct ufsmount *ump) 14579 { 14580 db_printf("mp %p (%s) devvp %p\n", ump->um_mountp, 14581 ump->um_mountp->mnt_stat.f_mntonname, ump->um_devvp); 14582 db_printf(" fs %p su_wl %d su_deps %d su_req %d\n", 14583 ump->um_fs, ump->softdep_on_worklist, 14584 ump->softdep_deps, ump->softdep_req); 14585 } 14586 14587 static void 14588 worklist_print(struct worklist *wk, int verbose) 14589 { 14590 14591 if (!verbose) { 14592 db_printf("%s: %p state 0x%b\n", TYPENAME(wk->wk_type), wk, 14593 (u_int)wk->wk_state, PRINT_SOFTDEP_FLAGS); 14594 return; 14595 } 14596 db_printf("worklist: %p type %s state 0x%b next %p\n ", wk, 14597 TYPENAME(wk->wk_type), (u_int)wk->wk_state, PRINT_SOFTDEP_FLAGS, 14598 LIST_NEXT(wk, wk_list)); 14599 db_print_ffs(VFSTOUFS(wk->wk_mp)); 14600 } 14601 14602 static void 14603 inodedep_print(struct inodedep *inodedep, int verbose) 14604 { 14605 14606 worklist_print(&inodedep->id_list, 0); 14607 db_printf(" fs %p ino %jd inoblk %jd delta %jd nlink %jd\n", 14608 inodedep->id_fs, 14609 (intmax_t)inodedep->id_ino, 14610 (intmax_t)fsbtodb(inodedep->id_fs, 14611 ino_to_fsba(inodedep->id_fs, inodedep->id_ino)), 14612 (intmax_t)inodedep->id_nlinkdelta, 14613 (intmax_t)inodedep->id_savednlink); 14614 14615 if (verbose == 0) 14616 return; 14617 14618 db_printf(" bmsafemap %p, mkdiradd %p, inoreflst %p\n", 14619 inodedep->id_bmsafemap, 14620 inodedep->id_mkdiradd, 14621 TAILQ_FIRST(&inodedep->id_inoreflst)); 14622 db_printf(" dirremhd %p, pendinghd %p, bufwait %p\n", 14623 LIST_FIRST(&inodedep->id_dirremhd), 14624 LIST_FIRST(&inodedep->id_pendinghd), 14625 LIST_FIRST(&inodedep->id_bufwait)); 14626 db_printf(" inowait %p, inoupdt %p, newinoupdt %p\n", 14627 LIST_FIRST(&inodedep->id_inowait), 14628 TAILQ_FIRST(&inodedep->id_inoupdt), 14629 TAILQ_FIRST(&inodedep->id_newinoupdt)); 14630 db_printf(" extupdt %p, newextupdt %p, freeblklst %p\n", 14631 TAILQ_FIRST(&inodedep->id_extupdt), 14632 TAILQ_FIRST(&inodedep->id_newextupdt), 14633 TAILQ_FIRST(&inodedep->id_freeblklst)); 14634 db_printf(" saveino %p, savedsize %jd, savedextsize %jd\n", 14635 inodedep->id_savedino1, 14636 (intmax_t)inodedep->id_savedsize, 14637 (intmax_t)inodedep->id_savedextsize); 14638 } 14639 14640 static void 14641 newblk_print(struct newblk *nbp) 14642 { 14643 14644 worklist_print(&nbp->nb_list, 0); 14645 db_printf(" newblkno %jd\n", (intmax_t)nbp->nb_newblkno); 14646 db_printf(" jnewblk %p, bmsafemap %p, freefrag %p\n", 14647 &nbp->nb_jnewblk, 14648 &nbp->nb_bmsafemap, 14649 &nbp->nb_freefrag); 14650 db_printf(" indirdeps %p, newdirblk %p, jwork %p\n", 14651 LIST_FIRST(&nbp->nb_indirdeps), 14652 LIST_FIRST(&nbp->nb_newdirblk), 14653 LIST_FIRST(&nbp->nb_jwork)); 14654 } 14655 14656 static void 14657 allocdirect_print(struct allocdirect *adp) 14658 { 14659 14660 newblk_print(&adp->ad_block); 14661 db_printf(" oldblkno %jd, oldsize %ld, newsize %ld\n", 14662 adp->ad_oldblkno, adp->ad_oldsize, adp->ad_newsize); 14663 db_printf(" offset %d, inodedep %p\n", 14664 adp->ad_offset, adp->ad_inodedep); 14665 } 14666 14667 static void 14668 allocindir_print(struct allocindir *aip) 14669 { 14670 14671 newblk_print(&aip->ai_block); 14672 db_printf(" oldblkno %jd, lbn %jd\n", 14673 (intmax_t)aip->ai_oldblkno, (intmax_t)aip->ai_lbn); 14674 db_printf(" offset %d, indirdep %p\n", 14675 aip->ai_offset, aip->ai_indirdep); 14676 } 14677 14678 static void 14679 mkdir_print(struct mkdir *mkdir) 14680 { 14681 14682 worklist_print(&mkdir->md_list, 0); 14683 db_printf(" diradd %p, jaddref %p, buf %p\n", 14684 mkdir->md_diradd, mkdir->md_jaddref, mkdir->md_buf); 14685 } 14686 14687 DB_SHOW_COMMAND(sd_inodedep, db_show_sd_inodedep) 14688 { 14689 14690 if (have_addr == 0) { 14691 db_printf("inodedep address required\n"); 14692 return; 14693 } 14694 inodedep_print((struct inodedep*)addr, 1); 14695 } 14696 14697 DB_SHOW_COMMAND(sd_allinodedeps, db_show_sd_allinodedeps) 14698 { 14699 struct inodedep_hashhead *inodedephd; 14700 struct inodedep *inodedep; 14701 struct ufsmount *ump; 14702 int cnt; 14703 14704 if (have_addr == 0) { 14705 db_printf("ufsmount address required\n"); 14706 return; 14707 } 14708 ump = (struct ufsmount *)addr; 14709 for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) { 14710 inodedephd = &ump->inodedep_hashtbl[cnt]; 14711 LIST_FOREACH(inodedep, inodedephd, id_hash) { 14712 inodedep_print(inodedep, 0); 14713 } 14714 } 14715 } 14716 14717 DB_SHOW_COMMAND(sd_worklist, db_show_sd_worklist) 14718 { 14719 14720 if (have_addr == 0) { 14721 db_printf("worklist address required\n"); 14722 return; 14723 } 14724 worklist_print((struct worklist *)addr, 1); 14725 } 14726 14727 DB_SHOW_COMMAND(sd_workhead, db_show_sd_workhead) 14728 { 14729 struct worklist *wk; 14730 struct workhead *wkhd; 14731 14732 if (have_addr == 0) { 14733 db_printf("worklist address required " 14734 "(for example value in bp->b_dep)\n"); 14735 return; 14736 } 14737 /* 14738 * We often do not have the address of the worklist head but 14739 * instead a pointer to its first entry (e.g., we have the 14740 * contents of bp->b_dep rather than &bp->b_dep). But the back 14741 * pointer of bp->b_dep will point at the head of the list, so 14742 * we cheat and use that instead. If we are in the middle of 14743 * a list we will still get the same result, so nothing 14744 * unexpected will result. 14745 */ 14746 wk = (struct worklist *)addr; 14747 if (wk == NULL) 14748 return; 14749 wkhd = (struct workhead *)wk->wk_list.le_prev; 14750 LIST_FOREACH(wk, wkhd, wk_list) { 14751 switch(wk->wk_type) { 14752 case D_INODEDEP: 14753 inodedep_print(WK_INODEDEP(wk), 0); 14754 continue; 14755 case D_ALLOCDIRECT: 14756 allocdirect_print(WK_ALLOCDIRECT(wk)); 14757 continue; 14758 case D_ALLOCINDIR: 14759 allocindir_print(WK_ALLOCINDIR(wk)); 14760 continue; 14761 case D_MKDIR: 14762 mkdir_print(WK_MKDIR(wk)); 14763 continue; 14764 default: 14765 worklist_print(wk, 0); 14766 continue; 14767 } 14768 } 14769 } 14770 14771 DB_SHOW_COMMAND(sd_mkdir, db_show_sd_mkdir) 14772 { 14773 if (have_addr == 0) { 14774 db_printf("mkdir address required\n"); 14775 return; 14776 } 14777 mkdir_print((struct mkdir *)addr); 14778 } 14779 14780 DB_SHOW_COMMAND(sd_mkdir_list, db_show_sd_mkdir_list) 14781 { 14782 struct mkdirlist *mkdirlisthd; 14783 struct mkdir *mkdir; 14784 14785 if (have_addr == 0) { 14786 db_printf("mkdir listhead address required\n"); 14787 return; 14788 } 14789 mkdirlisthd = (struct mkdirlist *)addr; 14790 LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) { 14791 mkdir_print(mkdir); 14792 if (mkdir->md_diradd != NULL) { 14793 db_printf(" "); 14794 worklist_print(&mkdir->md_diradd->da_list, 0); 14795 } 14796 if (mkdir->md_jaddref != NULL) { 14797 db_printf(" "); 14798 worklist_print(&mkdir->md_jaddref->ja_list, 0); 14799 } 14800 } 14801 } 14802 14803 DB_SHOW_COMMAND(sd_allocdirect, db_show_sd_allocdirect) 14804 { 14805 if (have_addr == 0) { 14806 db_printf("allocdirect address required\n"); 14807 return; 14808 } 14809 allocdirect_print((struct allocdirect *)addr); 14810 } 14811 14812 DB_SHOW_COMMAND(sd_allocindir, db_show_sd_allocindir) 14813 { 14814 if (have_addr == 0) { 14815 db_printf("allocindir address required\n"); 14816 return; 14817 } 14818 allocindir_print((struct allocindir *)addr); 14819 } 14820 14821 #endif /* DDB */ 14822 14823 #endif /* SOFTUPDATES */ 14824