1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 /* 45 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 46 */ 47 #ifndef DIAGNOSTIC 48 #define DIAGNOSTIC 49 #endif 50 #ifndef DEBUG 51 #define DEBUG 52 #endif 53 54 #include <sys/param.h> 55 #include <sys/kernel.h> 56 #include <sys/systm.h> 57 #include <sys/stdint.h> 58 #include <sys/bio.h> 59 #include <sys/buf.h> 60 #include <sys/malloc.h> 61 #include <sys/mount.h> 62 #include <sys/proc.h> 63 #include <sys/stat.h> 64 #include <sys/syslog.h> 65 #include <sys/vnode.h> 66 #include <sys/conf.h> 67 #include <ufs/ufs/dir.h> 68 #include <ufs/ufs/extattr.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/inode.h> 71 #include <ufs/ufs/ufsmount.h> 72 #include <ufs/ffs/fs.h> 73 #include <ufs/ffs/softdep.h> 74 #include <ufs/ffs/ffs_extern.h> 75 #include <ufs/ufs/ufs_extern.h> 76 77 /* 78 * These definitions need to be adapted to the system to which 79 * this file is being ported. 80 */ 81 /* 82 * malloc types defined for the softdep system. 83 */ 84 static MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 85 static MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 86 static MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 87 static MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 88 static MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 89 static MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 90 static MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 91 static MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 92 static MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 93 static MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 94 static MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 95 static MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 96 static MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 97 static MALLOC_DEFINE(M_NEWDIRBLK, "newdirblk","Unclaimed new directory block"); 98 99 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 100 101 #define D_PAGEDEP 0 102 #define D_INODEDEP 1 103 #define D_NEWBLK 2 104 #define D_BMSAFEMAP 3 105 #define D_ALLOCDIRECT 4 106 #define D_INDIRDEP 5 107 #define D_ALLOCINDIR 6 108 #define D_FREEFRAG 7 109 #define D_FREEBLKS 8 110 #define D_FREEFILE 9 111 #define D_DIRADD 10 112 #define D_MKDIR 11 113 #define D_DIRREM 12 114 #define D_NEWDIRBLK 13 115 #define D_LAST D_NEWDIRBLK 116 117 /* 118 * translate from workitem type to memory type 119 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 120 */ 121 static struct malloc_type *memtype[] = { 122 M_PAGEDEP, 123 M_INODEDEP, 124 M_NEWBLK, 125 M_BMSAFEMAP, 126 M_ALLOCDIRECT, 127 M_INDIRDEP, 128 M_ALLOCINDIR, 129 M_FREEFRAG, 130 M_FREEBLKS, 131 M_FREEFILE, 132 M_DIRADD, 133 M_MKDIR, 134 M_DIRREM, 135 M_NEWDIRBLK 136 }; 137 138 #define DtoM(type) (memtype[type]) 139 140 /* 141 * Names of malloc types. 142 */ 143 #define TYPENAME(type) \ 144 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 145 /* 146 * End system adaptaion definitions. 147 */ 148 149 /* 150 * Internal function prototypes. 151 */ 152 static void softdep_error(char *, int); 153 static void drain_output(struct vnode *, int); 154 static int getdirtybuf(struct buf **, int); 155 static void clear_remove(struct thread *); 156 static void clear_inodedeps(struct thread *); 157 static int flush_pagedep_deps(struct vnode *, struct mount *, 158 struct diraddhd *); 159 static int flush_inodedep_deps(struct fs *, ino_t); 160 static int flush_deplist(struct allocdirectlst *, int, int *); 161 static int handle_written_filepage(struct pagedep *, struct buf *); 162 static void diradd_inode_written(struct diradd *, struct inodedep *); 163 static int handle_written_inodeblock(struct inodedep *, struct buf *); 164 static void handle_allocdirect_partdone(struct allocdirect *); 165 static void handle_allocindir_partdone(struct allocindir *); 166 static void initiate_write_filepage(struct pagedep *, struct buf *); 167 static void handle_written_mkdir(struct mkdir *, int); 168 static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 169 static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 170 static void handle_workitem_freefile(struct freefile *); 171 static void handle_workitem_remove(struct dirrem *, struct vnode *); 172 static struct dirrem *newdirrem(struct buf *, struct inode *, 173 struct inode *, int, struct dirrem **); 174 static void free_diradd(struct diradd *); 175 static void free_allocindir(struct allocindir *, struct inodedep *); 176 static void free_newdirblk(struct newdirblk *); 177 static int indir_trunc(struct freeblks *, ufs2_daddr_t, int, ufs_lbn_t, 178 ufs2_daddr_t *); 179 static void deallocate_dependencies(struct buf *, struct inodedep *); 180 static void free_allocdirect(struct allocdirectlst *, 181 struct allocdirect *, int); 182 static int check_inode_unwritten(struct inodedep *); 183 static int free_inodedep(struct inodedep *); 184 static void handle_workitem_freeblocks(struct freeblks *, int); 185 static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 186 static void setup_allocindir_phase2(struct buf *, struct inode *, 187 struct allocindir *); 188 static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 189 ufs2_daddr_t); 190 static void handle_workitem_freefrag(struct freefrag *); 191 static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long); 192 static void allocdirect_merge(struct allocdirectlst *, 193 struct allocdirect *, struct allocdirect *); 194 static struct bmsafemap *bmsafemap_lookup(struct buf *); 195 static int newblk_lookup(struct fs *, ufs2_daddr_t, int, struct newblk **); 196 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 197 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, struct pagedep **); 198 static void pause_timer(void *); 199 static int request_cleanup(int, int); 200 static int process_worklist_item(struct mount *, int); 201 static void add_to_worklist(struct worklist *); 202 203 /* 204 * Exported softdep operations. 205 */ 206 static void softdep_disk_io_initiation(struct buf *); 207 static void softdep_disk_write_complete(struct buf *); 208 static void softdep_deallocate_dependencies(struct buf *); 209 static void softdep_move_dependencies(struct buf *, struct buf *); 210 static int softdep_count_dependencies(struct buf *bp, int); 211 212 /* 213 * Locking primitives. 214 * 215 * For a uniprocessor, all we need to do is protect against disk 216 * interrupts. For a multiprocessor, this lock would have to be 217 * a mutex. A single mutex is used throughout this file, though 218 * finer grain locking could be used if contention warranted it. 219 * 220 * For a multiprocessor, the sleep call would accept a lock and 221 * release it after the sleep processing was complete. In a uniprocessor 222 * implementation there is no such interlock, so we simple mark 223 * the places where it needs to be done with the `interlocked' form 224 * of the lock calls. Since the uniprocessor sleep already interlocks 225 * the spl, there is nothing that really needs to be done. 226 */ 227 #ifndef /* NOT */ DEBUG 228 static struct lockit { 229 int lkt_spl; 230 } lk = { 0 }; 231 #define ACQUIRE_LOCK(lk) (lk)->lkt_spl = splbio() 232 #define FREE_LOCK(lk) splx((lk)->lkt_spl) 233 234 #else /* DEBUG */ 235 #define NOHOLDER ((struct thread *)-1) 236 #define SPECIAL_FLAG ((struct thread *)-2) 237 static struct lockit { 238 int lkt_spl; 239 struct thread *lkt_held; 240 } lk = { 0, NOHOLDER }; 241 static int lockcnt; 242 243 static void acquire_lock(struct lockit *); 244 static void free_lock(struct lockit *); 245 void softdep_panic(char *); 246 247 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 248 #define FREE_LOCK(lk) free_lock(lk) 249 250 static void 251 acquire_lock(lk) 252 struct lockit *lk; 253 { 254 struct thread *holder; 255 256 if (lk->lkt_held != NOHOLDER) { 257 holder = lk->lkt_held; 258 FREE_LOCK(lk); 259 if (holder == curthread) 260 panic("softdep_lock: locking against myself"); 261 else 262 panic("softdep_lock: lock held by %p", holder); 263 } 264 lk->lkt_spl = splbio(); 265 lk->lkt_held = curthread; 266 lockcnt++; 267 } 268 269 static void 270 free_lock(lk) 271 struct lockit *lk; 272 { 273 274 if (lk->lkt_held == NOHOLDER) 275 panic("softdep_unlock: lock not held"); 276 lk->lkt_held = NOHOLDER; 277 splx(lk->lkt_spl); 278 } 279 280 /* 281 * Function to release soft updates lock and panic. 282 */ 283 void 284 softdep_panic(msg) 285 char *msg; 286 { 287 288 if (lk.lkt_held != NOHOLDER) 289 FREE_LOCK(&lk); 290 panic(msg); 291 } 292 #endif /* DEBUG */ 293 294 static int interlocked_sleep(struct lockit *, int, void *, struct mtx *, int, 295 const char *, int); 296 297 /* 298 * When going to sleep, we must save our SPL so that it does 299 * not get lost if some other process uses the lock while we 300 * are sleeping. We restore it after we have slept. This routine 301 * wraps the interlocking with functions that sleep. The list 302 * below enumerates the available set of operations. 303 */ 304 #define UNKNOWN 0 305 #define SLEEP 1 306 #define LOCKBUF 2 307 308 static int 309 interlocked_sleep(lk, op, ident, mtx, flags, wmesg, timo) 310 struct lockit *lk; 311 int op; 312 void *ident; 313 struct mtx *mtx; 314 int flags; 315 const char *wmesg; 316 int timo; 317 { 318 struct thread *holder; 319 int s, retval; 320 321 s = lk->lkt_spl; 322 # ifdef DEBUG 323 if (lk->lkt_held == NOHOLDER) 324 panic("interlocked_sleep: lock not held"); 325 lk->lkt_held = NOHOLDER; 326 # endif /* DEBUG */ 327 switch (op) { 328 case SLEEP: 329 retval = msleep(ident, mtx, flags, wmesg, timo); 330 break; 331 case LOCKBUF: 332 retval = BUF_LOCK((struct buf *)ident, flags); 333 break; 334 default: 335 panic("interlocked_sleep: unknown operation"); 336 } 337 # ifdef DEBUG 338 if (lk->lkt_held != NOHOLDER) { 339 holder = lk->lkt_held; 340 FREE_LOCK(lk); 341 if (holder == curthread) 342 panic("interlocked_sleep: locking against self"); 343 else 344 panic("interlocked_sleep: lock held by %p", holder); 345 } 346 lk->lkt_held = curthread; 347 lockcnt++; 348 # endif /* DEBUG */ 349 lk->lkt_spl = s; 350 return (retval); 351 } 352 353 /* 354 * Place holder for real semaphores. 355 */ 356 struct sema { 357 int value; 358 struct thread *holder; 359 char *name; 360 int prio; 361 int timo; 362 }; 363 static void sema_init(struct sema *, char *, int, int); 364 static int sema_get(struct sema *, struct lockit *); 365 static void sema_release(struct sema *); 366 367 static void 368 sema_init(semap, name, prio, timo) 369 struct sema *semap; 370 char *name; 371 int prio, timo; 372 { 373 374 semap->holder = NOHOLDER; 375 semap->value = 0; 376 semap->name = name; 377 semap->prio = prio; 378 semap->timo = timo; 379 } 380 381 static int 382 sema_get(semap, interlock) 383 struct sema *semap; 384 struct lockit *interlock; 385 { 386 387 if (semap->value++ > 0) { 388 if (interlock != NULL) { 389 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 390 NULL, semap->prio, semap->name, 391 semap->timo); 392 FREE_LOCK(interlock); 393 } else { 394 tsleep((caddr_t)semap, semap->prio, semap->name, 395 semap->timo); 396 } 397 return (0); 398 } 399 semap->holder = curthread; 400 if (interlock != NULL) 401 FREE_LOCK(interlock); 402 return (1); 403 } 404 405 static void 406 sema_release(semap) 407 struct sema *semap; 408 { 409 410 if (semap->value <= 0 || semap->holder != curthread) { 411 if (lk.lkt_held != NOHOLDER) 412 FREE_LOCK(&lk); 413 panic("sema_release: not held"); 414 } 415 if (--semap->value > 0) { 416 semap->value = 0; 417 wakeup(semap); 418 } 419 semap->holder = NOHOLDER; 420 } 421 422 /* 423 * Worklist queue management. 424 * These routines require that the lock be held. 425 */ 426 #ifndef /* NOT */ DEBUG 427 #define WORKLIST_INSERT(head, item) do { \ 428 (item)->wk_state |= ONWORKLIST; \ 429 LIST_INSERT_HEAD(head, item, wk_list); \ 430 } while (0) 431 #define WORKLIST_REMOVE(item) do { \ 432 (item)->wk_state &= ~ONWORKLIST; \ 433 LIST_REMOVE(item, wk_list); \ 434 } while (0) 435 #define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 436 437 #else /* DEBUG */ 438 static void worklist_insert(struct workhead *, struct worklist *); 439 static void worklist_remove(struct worklist *); 440 static void workitem_free(struct worklist *, int); 441 442 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 443 #define WORKLIST_REMOVE(item) worklist_remove(item) 444 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 445 446 static void 447 worklist_insert(head, item) 448 struct workhead *head; 449 struct worklist *item; 450 { 451 452 if (lk.lkt_held == NOHOLDER) 453 panic("worklist_insert: lock not held"); 454 if (item->wk_state & ONWORKLIST) { 455 FREE_LOCK(&lk); 456 panic("worklist_insert: already on list"); 457 } 458 item->wk_state |= ONWORKLIST; 459 LIST_INSERT_HEAD(head, item, wk_list); 460 } 461 462 static void 463 worklist_remove(item) 464 struct worklist *item; 465 { 466 467 if (lk.lkt_held == NOHOLDER) 468 panic("worklist_remove: lock not held"); 469 if ((item->wk_state & ONWORKLIST) == 0) { 470 FREE_LOCK(&lk); 471 panic("worklist_remove: not on list"); 472 } 473 item->wk_state &= ~ONWORKLIST; 474 LIST_REMOVE(item, wk_list); 475 } 476 477 static void 478 workitem_free(item, type) 479 struct worklist *item; 480 int type; 481 { 482 483 if (item->wk_state & ONWORKLIST) { 484 if (lk.lkt_held != NOHOLDER) 485 FREE_LOCK(&lk); 486 panic("workitem_free: still on list"); 487 } 488 if (item->wk_type != type) { 489 if (lk.lkt_held != NOHOLDER) 490 FREE_LOCK(&lk); 491 panic("workitem_free: type mismatch"); 492 } 493 FREE(item, DtoM(type)); 494 } 495 #endif /* DEBUG */ 496 497 /* 498 * Workitem queue management 499 */ 500 static struct workhead softdep_workitem_pending; 501 static int num_on_worklist; /* number of worklist items to be processed */ 502 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 503 static int softdep_worklist_req; /* serialized waiters */ 504 static int max_softdeps; /* maximum number of structs before slowdown */ 505 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 506 static int proc_waiting; /* tracks whether we have a timeout posted */ 507 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 508 static struct callout_handle handle; /* handle on posted proc_waiting timeout */ 509 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 510 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 511 #define FLUSH_INODES 1 512 static int req_clear_remove; /* syncer process flush some freeblks */ 513 #define FLUSH_REMOVE 2 514 #define FLUSH_REMOVE_WAIT 3 515 /* 516 * runtime statistics 517 */ 518 static int stat_worklist_push; /* number of worklist cleanups */ 519 static int stat_blk_limit_push; /* number of times block limit neared */ 520 static int stat_ino_limit_push; /* number of times inode limit neared */ 521 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 522 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 523 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 524 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 525 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 526 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 527 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 528 #ifdef DEBUG 529 #include <vm/vm.h> 530 #include <sys/sysctl.h> 531 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 532 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 533 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 534 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 535 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 536 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 537 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 538 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 539 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 540 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 541 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 542 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 543 #endif /* DEBUG */ 544 545 /* 546 * Add an item to the end of the work queue. 547 * This routine requires that the lock be held. 548 * This is the only routine that adds items to the list. 549 * The following routine is the only one that removes items 550 * and does so in order from first to last. 551 */ 552 static void 553 add_to_worklist(wk) 554 struct worklist *wk; 555 { 556 static struct worklist *worklist_tail; 557 558 if (wk->wk_state & ONWORKLIST) { 559 if (lk.lkt_held != NOHOLDER) 560 FREE_LOCK(&lk); 561 panic("add_to_worklist: already on list"); 562 } 563 wk->wk_state |= ONWORKLIST; 564 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 565 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 566 else 567 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 568 worklist_tail = wk; 569 num_on_worklist += 1; 570 } 571 572 /* 573 * Process that runs once per second to handle items in the background queue. 574 * 575 * Note that we ensure that everything is done in the order in which they 576 * appear in the queue. The code below depends on this property to ensure 577 * that blocks of a file are freed before the inode itself is freed. This 578 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 579 * until all the old ones have been purged from the dependency lists. 580 */ 581 int 582 softdep_process_worklist(matchmnt) 583 struct mount *matchmnt; 584 { 585 struct thread *td = curthread; 586 int cnt, matchcnt, loopcount; 587 long starttime; 588 589 /* 590 * Record the process identifier of our caller so that we can give 591 * this process preferential treatment in request_cleanup below. 592 */ 593 filesys_syncer = td; 594 matchcnt = 0; 595 596 /* 597 * There is no danger of having multiple processes run this 598 * code, but we have to single-thread it when softdep_flushfiles() 599 * is in operation to get an accurate count of the number of items 600 * related to its mount point that are in the list. 601 */ 602 if (matchmnt == NULL) { 603 if (softdep_worklist_busy < 0) 604 return(-1); 605 softdep_worklist_busy += 1; 606 } 607 608 /* 609 * If requested, try removing inode or removal dependencies. 610 */ 611 if (req_clear_inodedeps) { 612 clear_inodedeps(td); 613 req_clear_inodedeps -= 1; 614 wakeup_one(&proc_waiting); 615 } 616 if (req_clear_remove) { 617 clear_remove(td); 618 req_clear_remove -= 1; 619 wakeup_one(&proc_waiting); 620 } 621 loopcount = 1; 622 starttime = time_second; 623 while (num_on_worklist > 0) { 624 if ((cnt = process_worklist_item(matchmnt, 0)) == -1) 625 break; 626 else 627 matchcnt += cnt; 628 629 /* 630 * If a umount operation wants to run the worklist 631 * accurately, abort. 632 */ 633 if (softdep_worklist_req && matchmnt == NULL) { 634 matchcnt = -1; 635 break; 636 } 637 638 /* 639 * If requested, try removing inode or removal dependencies. 640 */ 641 if (req_clear_inodedeps) { 642 clear_inodedeps(td); 643 req_clear_inodedeps -= 1; 644 wakeup_one(&proc_waiting); 645 } 646 if (req_clear_remove) { 647 clear_remove(td); 648 req_clear_remove -= 1; 649 wakeup_one(&proc_waiting); 650 } 651 /* 652 * We do not generally want to stop for buffer space, but if 653 * we are really being a buffer hog, we will stop and wait. 654 */ 655 if (loopcount++ % 128 == 0) 656 bwillwrite(); 657 /* 658 * Never allow processing to run for more than one 659 * second. Otherwise the other syncer tasks may get 660 * excessively backlogged. 661 */ 662 if (starttime != time_second && matchmnt == NULL) { 663 matchcnt = -1; 664 break; 665 } 666 } 667 if (matchmnt == NULL) { 668 softdep_worklist_busy -= 1; 669 if (softdep_worklist_req && softdep_worklist_busy == 0) 670 wakeup(&softdep_worklist_req); 671 } 672 return (matchcnt); 673 } 674 675 /* 676 * Process one item on the worklist. 677 */ 678 static int 679 process_worklist_item(matchmnt, flags) 680 struct mount *matchmnt; 681 int flags; 682 { 683 struct worklist *wk; 684 struct mount *mp; 685 struct vnode *vp; 686 int matchcnt = 0; 687 688 ACQUIRE_LOCK(&lk); 689 /* 690 * Normally we just process each item on the worklist in order. 691 * However, if we are in a situation where we cannot lock any 692 * inodes, we have to skip over any dirrem requests whose 693 * vnodes are resident and locked. 694 */ 695 vp = NULL; 696 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 697 if (wk->wk_state & INPROGRESS) 698 continue; 699 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 700 break; 701 wk->wk_state |= INPROGRESS; 702 FREE_LOCK(&lk); 703 VFS_VGET(WK_DIRREM(wk)->dm_mnt, WK_DIRREM(wk)->dm_oldinum, 704 LK_NOWAIT | LK_EXCLUSIVE, &vp); 705 ACQUIRE_LOCK(&lk); 706 wk->wk_state &= ~INPROGRESS; 707 if (vp != NULL) 708 break; 709 } 710 if (wk == 0) { 711 FREE_LOCK(&lk); 712 return (-1); 713 } 714 WORKLIST_REMOVE(wk); 715 num_on_worklist -= 1; 716 FREE_LOCK(&lk); 717 switch (wk->wk_type) { 718 719 case D_DIRREM: 720 /* removal of a directory entry */ 721 mp = WK_DIRREM(wk)->dm_mnt; 722 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 723 panic("%s: dirrem on suspended filesystem", 724 "process_worklist_item"); 725 if (mp == matchmnt) 726 matchcnt += 1; 727 handle_workitem_remove(WK_DIRREM(wk), vp); 728 break; 729 730 case D_FREEBLKS: 731 /* releasing blocks and/or fragments from a file */ 732 mp = WK_FREEBLKS(wk)->fb_mnt; 733 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 734 panic("%s: freeblks on suspended filesystem", 735 "process_worklist_item"); 736 if (mp == matchmnt) 737 matchcnt += 1; 738 handle_workitem_freeblocks(WK_FREEBLKS(wk), flags & LK_NOWAIT); 739 break; 740 741 case D_FREEFRAG: 742 /* releasing a fragment when replaced as a file grows */ 743 mp = WK_FREEFRAG(wk)->ff_mnt; 744 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 745 panic("%s: freefrag on suspended filesystem", 746 "process_worklist_item"); 747 if (mp == matchmnt) 748 matchcnt += 1; 749 handle_workitem_freefrag(WK_FREEFRAG(wk)); 750 break; 751 752 case D_FREEFILE: 753 /* releasing an inode when its link count drops to 0 */ 754 mp = WK_FREEFILE(wk)->fx_mnt; 755 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 756 panic("%s: freefile on suspended filesystem", 757 "process_worklist_item"); 758 if (mp == matchmnt) 759 matchcnt += 1; 760 handle_workitem_freefile(WK_FREEFILE(wk)); 761 break; 762 763 default: 764 panic("%s_process_worklist: Unknown type %s", 765 "softdep", TYPENAME(wk->wk_type)); 766 /* NOTREACHED */ 767 } 768 return (matchcnt); 769 } 770 771 /* 772 * Move dependencies from one buffer to another. 773 */ 774 static void 775 softdep_move_dependencies(oldbp, newbp) 776 struct buf *oldbp; 777 struct buf *newbp; 778 { 779 struct worklist *wk, *wktail; 780 781 if (LIST_FIRST(&newbp->b_dep) != NULL) 782 panic("softdep_move_dependencies: need merge code"); 783 wktail = 0; 784 ACQUIRE_LOCK(&lk); 785 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 786 LIST_REMOVE(wk, wk_list); 787 if (wktail == 0) 788 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 789 else 790 LIST_INSERT_AFTER(wktail, wk, wk_list); 791 wktail = wk; 792 } 793 FREE_LOCK(&lk); 794 } 795 796 /* 797 * Purge the work list of all items associated with a particular mount point. 798 */ 799 int 800 softdep_flushworklist(oldmnt, countp, td) 801 struct mount *oldmnt; 802 int *countp; 803 struct thread *td; 804 { 805 struct vnode *devvp; 806 int count, error = 0; 807 808 /* 809 * Await our turn to clear out the queue, then serialize access. 810 */ 811 while (softdep_worklist_busy) { 812 softdep_worklist_req += 1; 813 tsleep(&softdep_worklist_req, PRIBIO, "softflush", 0); 814 softdep_worklist_req -= 1; 815 } 816 softdep_worklist_busy = -1; 817 /* 818 * Alternately flush the block device associated with the mount 819 * point and process any dependencies that the flushing 820 * creates. We continue until no more worklist dependencies 821 * are found. 822 */ 823 *countp = 0; 824 devvp = VFSTOUFS(oldmnt)->um_devvp; 825 while ((count = softdep_process_worklist(oldmnt)) > 0) { 826 *countp += count; 827 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td); 828 error = VOP_FSYNC(devvp, td->td_ucred, MNT_WAIT, td); 829 VOP_UNLOCK(devvp, 0, td); 830 if (error) 831 break; 832 } 833 softdep_worklist_busy = 0; 834 if (softdep_worklist_req) 835 wakeup(&softdep_worklist_req); 836 return (error); 837 } 838 839 /* 840 * Flush all vnodes and worklist items associated with a specified mount point. 841 */ 842 int 843 softdep_flushfiles(oldmnt, flags, td) 844 struct mount *oldmnt; 845 int flags; 846 struct thread *td; 847 { 848 int error, count, loopcnt; 849 850 error = 0; 851 852 /* 853 * Alternately flush the vnodes associated with the mount 854 * point and process any dependencies that the flushing 855 * creates. In theory, this loop can happen at most twice, 856 * but we give it a few extra just to be sure. 857 */ 858 for (loopcnt = 10; loopcnt > 0; loopcnt--) { 859 /* 860 * Do another flush in case any vnodes were brought in 861 * as part of the cleanup operations. 862 */ 863 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) 864 break; 865 if ((error = softdep_flushworklist(oldmnt, &count, td)) != 0 || 866 count == 0) 867 break; 868 } 869 /* 870 * If we are unmounting then it is an error to fail. If we 871 * are simply trying to downgrade to read-only, then filesystem 872 * activity can keep us busy forever, so we just fail with EBUSY. 873 */ 874 if (loopcnt == 0) { 875 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 876 panic("softdep_flushfiles: looping"); 877 error = EBUSY; 878 } 879 return (error); 880 } 881 882 /* 883 * Structure hashing. 884 * 885 * There are three types of structures that can be looked up: 886 * 1) pagedep structures identified by mount point, inode number, 887 * and logical block. 888 * 2) inodedep structures identified by mount point and inode number. 889 * 3) newblk structures identified by mount point and 890 * physical block number. 891 * 892 * The "pagedep" and "inodedep" dependency structures are hashed 893 * separately from the file blocks and inodes to which they correspond. 894 * This separation helps when the in-memory copy of an inode or 895 * file block must be replaced. It also obviates the need to access 896 * an inode or file page when simply updating (or de-allocating) 897 * dependency structures. Lookup of newblk structures is needed to 898 * find newly allocated blocks when trying to associate them with 899 * their allocdirect or allocindir structure. 900 * 901 * The lookup routines optionally create and hash a new instance when 902 * an existing entry is not found. 903 */ 904 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 905 #define NODELAY 0x0002 /* cannot do background work */ 906 907 /* 908 * Structures and routines associated with pagedep caching. 909 */ 910 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 911 u_long pagedep_hash; /* size of hash table - 1 */ 912 #define PAGEDEP_HASH(mp, inum, lbn) \ 913 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 914 pagedep_hash]) 915 static struct sema pagedep_in_progress; 916 917 /* 918 * Look up a pagedep. Return 1 if found, 0 if not found or found 919 * when asked to allocate but not associated with any buffer. 920 * If not found, allocate if DEPALLOC flag is passed. 921 * Found or allocated entry is returned in pagedeppp. 922 * This routine must be called with splbio interrupts blocked. 923 */ 924 static int 925 pagedep_lookup(ip, lbn, flags, pagedeppp) 926 struct inode *ip; 927 ufs_lbn_t lbn; 928 int flags; 929 struct pagedep **pagedeppp; 930 { 931 struct pagedep *pagedep; 932 struct pagedep_hashhead *pagedephd; 933 struct mount *mp; 934 int i; 935 936 #ifdef DEBUG 937 if (lk.lkt_held == NOHOLDER) 938 panic("pagedep_lookup: lock not held"); 939 #endif 940 mp = ITOV(ip)->v_mount; 941 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 942 top: 943 LIST_FOREACH(pagedep, pagedephd, pd_hash) 944 if (ip->i_number == pagedep->pd_ino && 945 lbn == pagedep->pd_lbn && 946 mp == pagedep->pd_mnt) 947 break; 948 if (pagedep) { 949 *pagedeppp = pagedep; 950 if ((flags & DEPALLOC) != 0 && 951 (pagedep->pd_state & ONWORKLIST) == 0) 952 return (0); 953 return (1); 954 } 955 if ((flags & DEPALLOC) == 0) { 956 *pagedeppp = NULL; 957 return (0); 958 } 959 if (sema_get(&pagedep_in_progress, &lk) == 0) { 960 ACQUIRE_LOCK(&lk); 961 goto top; 962 } 963 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 964 M_SOFTDEP_FLAGS|M_ZERO); 965 pagedep->pd_list.wk_type = D_PAGEDEP; 966 pagedep->pd_mnt = mp; 967 pagedep->pd_ino = ip->i_number; 968 pagedep->pd_lbn = lbn; 969 LIST_INIT(&pagedep->pd_dirremhd); 970 LIST_INIT(&pagedep->pd_pendinghd); 971 for (i = 0; i < DAHASHSZ; i++) 972 LIST_INIT(&pagedep->pd_diraddhd[i]); 973 ACQUIRE_LOCK(&lk); 974 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 975 sema_release(&pagedep_in_progress); 976 *pagedeppp = pagedep; 977 return (0); 978 } 979 980 /* 981 * Structures and routines associated with inodedep caching. 982 */ 983 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 984 static u_long inodedep_hash; /* size of hash table - 1 */ 985 static long num_inodedep; /* number of inodedep allocated */ 986 #define INODEDEP_HASH(fs, inum) \ 987 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 988 static struct sema inodedep_in_progress; 989 990 /* 991 * Look up a inodedep. Return 1 if found, 0 if not found. 992 * If not found, allocate if DEPALLOC flag is passed. 993 * Found or allocated entry is returned in inodedeppp. 994 * This routine must be called with splbio interrupts blocked. 995 */ 996 static int 997 inodedep_lookup(fs, inum, flags, inodedeppp) 998 struct fs *fs; 999 ino_t inum; 1000 int flags; 1001 struct inodedep **inodedeppp; 1002 { 1003 struct inodedep *inodedep; 1004 struct inodedep_hashhead *inodedephd; 1005 int firsttry; 1006 1007 #ifdef DEBUG 1008 if (lk.lkt_held == NOHOLDER) 1009 panic("inodedep_lookup: lock not held"); 1010 #endif 1011 firsttry = 1; 1012 inodedephd = INODEDEP_HASH(fs, inum); 1013 top: 1014 LIST_FOREACH(inodedep, inodedephd, id_hash) 1015 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 1016 break; 1017 if (inodedep) { 1018 *inodedeppp = inodedep; 1019 return (1); 1020 } 1021 if ((flags & DEPALLOC) == 0) { 1022 *inodedeppp = NULL; 1023 return (0); 1024 } 1025 /* 1026 * If we are over our limit, try to improve the situation. 1027 */ 1028 if (num_inodedep > max_softdeps && firsttry && (flags & NODELAY) == 0 && 1029 request_cleanup(FLUSH_INODES, 1)) { 1030 firsttry = 0; 1031 goto top; 1032 } 1033 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1034 ACQUIRE_LOCK(&lk); 1035 goto top; 1036 } 1037 num_inodedep += 1; 1038 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1039 M_INODEDEP, M_SOFTDEP_FLAGS); 1040 inodedep->id_list.wk_type = D_INODEDEP; 1041 inodedep->id_fs = fs; 1042 inodedep->id_ino = inum; 1043 inodedep->id_state = ALLCOMPLETE; 1044 inodedep->id_nlinkdelta = 0; 1045 inodedep->id_savedino1 = NULL; 1046 inodedep->id_savedsize = -1; 1047 inodedep->id_savedextsize = -1; 1048 inodedep->id_buf = NULL; 1049 LIST_INIT(&inodedep->id_pendinghd); 1050 LIST_INIT(&inodedep->id_inowait); 1051 LIST_INIT(&inodedep->id_bufwait); 1052 TAILQ_INIT(&inodedep->id_inoupdt); 1053 TAILQ_INIT(&inodedep->id_newinoupdt); 1054 TAILQ_INIT(&inodedep->id_extupdt); 1055 TAILQ_INIT(&inodedep->id_newextupdt); 1056 ACQUIRE_LOCK(&lk); 1057 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1058 sema_release(&inodedep_in_progress); 1059 *inodedeppp = inodedep; 1060 return (0); 1061 } 1062 1063 /* 1064 * Structures and routines associated with newblk caching. 1065 */ 1066 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1067 u_long newblk_hash; /* size of hash table - 1 */ 1068 #define NEWBLK_HASH(fs, inum) \ 1069 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1070 static struct sema newblk_in_progress; 1071 1072 /* 1073 * Look up a newblk. Return 1 if found, 0 if not found. 1074 * If not found, allocate if DEPALLOC flag is passed. 1075 * Found or allocated entry is returned in newblkpp. 1076 */ 1077 static int 1078 newblk_lookup(fs, newblkno, flags, newblkpp) 1079 struct fs *fs; 1080 ufs2_daddr_t newblkno; 1081 int flags; 1082 struct newblk **newblkpp; 1083 { 1084 struct newblk *newblk; 1085 struct newblk_hashhead *newblkhd; 1086 1087 newblkhd = NEWBLK_HASH(fs, newblkno); 1088 top: 1089 LIST_FOREACH(newblk, newblkhd, nb_hash) 1090 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1091 break; 1092 if (newblk) { 1093 *newblkpp = newblk; 1094 return (1); 1095 } 1096 if ((flags & DEPALLOC) == 0) { 1097 *newblkpp = NULL; 1098 return (0); 1099 } 1100 if (sema_get(&newblk_in_progress, 0) == 0) 1101 goto top; 1102 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1103 M_NEWBLK, M_SOFTDEP_FLAGS); 1104 newblk->nb_state = 0; 1105 newblk->nb_fs = fs; 1106 newblk->nb_newblkno = newblkno; 1107 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1108 sema_release(&newblk_in_progress); 1109 *newblkpp = newblk; 1110 return (0); 1111 } 1112 1113 /* 1114 * Executed during filesystem system initialization before 1115 * mounting any filesystems. 1116 */ 1117 void 1118 softdep_initialize() 1119 { 1120 1121 LIST_INIT(&mkdirlisthd); 1122 LIST_INIT(&softdep_workitem_pending); 1123 max_softdeps = desiredvnodes * 8; 1124 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1125 &pagedep_hash); 1126 sema_init(&pagedep_in_progress, "pagedep", PRIBIO, 0); 1127 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1128 sema_init(&inodedep_in_progress, "inodedep", PRIBIO, 0); 1129 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1130 sema_init(&newblk_in_progress, "newblk", PRIBIO, 0); 1131 1132 /* hooks through which the main kernel code calls us */ 1133 softdep_process_worklist_hook = softdep_process_worklist; 1134 softdep_fsync_hook = softdep_fsync; 1135 1136 /* initialise bioops hack */ 1137 bioops.io_start = softdep_disk_io_initiation; 1138 bioops.io_complete = softdep_disk_write_complete; 1139 bioops.io_deallocate = softdep_deallocate_dependencies; 1140 bioops.io_movedeps = softdep_move_dependencies; 1141 bioops.io_countdeps = softdep_count_dependencies; 1142 } 1143 1144 /* 1145 * Executed after all filesystems have been unmounted during 1146 * filesystem module unload. 1147 */ 1148 void 1149 softdep_uninitialize() 1150 { 1151 1152 softdep_process_worklist_hook = NULL; 1153 softdep_fsync_hook = NULL; 1154 hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash); 1155 hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash); 1156 hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash); 1157 } 1158 1159 /* 1160 * Called at mount time to notify the dependency code that a 1161 * filesystem wishes to use it. 1162 */ 1163 int 1164 softdep_mount(devvp, mp, fs, cred) 1165 struct vnode *devvp; 1166 struct mount *mp; 1167 struct fs *fs; 1168 struct ucred *cred; 1169 { 1170 struct csum_total cstotal; 1171 struct cg *cgp; 1172 struct buf *bp; 1173 int error, cyl; 1174 1175 mp->mnt_flag &= ~MNT_ASYNC; 1176 mp->mnt_flag |= MNT_SOFTDEP; 1177 /* 1178 * When doing soft updates, the counters in the 1179 * superblock may have gotten out of sync, so we have 1180 * to scan the cylinder groups and recalculate them. 1181 */ 1182 if (fs->fs_clean != 0) 1183 return (0); 1184 bzero(&cstotal, sizeof cstotal); 1185 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1186 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 1187 fs->fs_cgsize, cred, &bp)) != 0) { 1188 brelse(bp); 1189 return (error); 1190 } 1191 cgp = (struct cg *)bp->b_data; 1192 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1193 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1194 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1195 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1196 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1197 brelse(bp); 1198 } 1199 #ifdef DEBUG 1200 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1201 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 1202 #endif 1203 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1204 return (0); 1205 } 1206 1207 /* 1208 * Protecting the freemaps (or bitmaps). 1209 * 1210 * To eliminate the need to execute fsck before mounting a filesystem 1211 * after a power failure, one must (conservatively) guarantee that the 1212 * on-disk copy of the bitmaps never indicate that a live inode or block is 1213 * free. So, when a block or inode is allocated, the bitmap should be 1214 * updated (on disk) before any new pointers. When a block or inode is 1215 * freed, the bitmap should not be updated until all pointers have been 1216 * reset. The latter dependency is handled by the delayed de-allocation 1217 * approach described below for block and inode de-allocation. The former 1218 * dependency is handled by calling the following procedure when a block or 1219 * inode is allocated. When an inode is allocated an "inodedep" is created 1220 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1221 * Each "inodedep" is also inserted into the hash indexing structure so 1222 * that any additional link additions can be made dependent on the inode 1223 * allocation. 1224 * 1225 * The ufs filesystem maintains a number of free block counts (e.g., per 1226 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1227 * in addition to the bitmaps. These counts are used to improve efficiency 1228 * during allocation and therefore must be consistent with the bitmaps. 1229 * There is no convenient way to guarantee post-crash consistency of these 1230 * counts with simple update ordering, for two main reasons: (1) The counts 1231 * and bitmaps for a single cylinder group block are not in the same disk 1232 * sector. If a disk write is interrupted (e.g., by power failure), one may 1233 * be written and the other not. (2) Some of the counts are located in the 1234 * superblock rather than the cylinder group block. So, we focus our soft 1235 * updates implementation on protecting the bitmaps. When mounting a 1236 * filesystem, we recompute the auxiliary counts from the bitmaps. 1237 */ 1238 1239 /* 1240 * Called just after updating the cylinder group block to allocate an inode. 1241 */ 1242 void 1243 softdep_setup_inomapdep(bp, ip, newinum) 1244 struct buf *bp; /* buffer for cylgroup block with inode map */ 1245 struct inode *ip; /* inode related to allocation */ 1246 ino_t newinum; /* new inode number being allocated */ 1247 { 1248 struct inodedep *inodedep; 1249 struct bmsafemap *bmsafemap; 1250 1251 /* 1252 * Create a dependency for the newly allocated inode. 1253 * Panic if it already exists as something is seriously wrong. 1254 * Otherwise add it to the dependency list for the buffer holding 1255 * the cylinder group map from which it was allocated. 1256 */ 1257 ACQUIRE_LOCK(&lk); 1258 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1259 FREE_LOCK(&lk); 1260 panic("softdep_setup_inomapdep: found inode"); 1261 } 1262 inodedep->id_buf = bp; 1263 inodedep->id_state &= ~DEPCOMPLETE; 1264 bmsafemap = bmsafemap_lookup(bp); 1265 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1266 FREE_LOCK(&lk); 1267 } 1268 1269 /* 1270 * Called just after updating the cylinder group block to 1271 * allocate block or fragment. 1272 */ 1273 void 1274 softdep_setup_blkmapdep(bp, fs, newblkno) 1275 struct buf *bp; /* buffer for cylgroup block with block map */ 1276 struct fs *fs; /* filesystem doing allocation */ 1277 ufs2_daddr_t newblkno; /* number of newly allocated block */ 1278 { 1279 struct newblk *newblk; 1280 struct bmsafemap *bmsafemap; 1281 1282 /* 1283 * Create a dependency for the newly allocated block. 1284 * Add it to the dependency list for the buffer holding 1285 * the cylinder group map from which it was allocated. 1286 */ 1287 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1288 panic("softdep_setup_blkmapdep: found block"); 1289 ACQUIRE_LOCK(&lk); 1290 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1291 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1292 FREE_LOCK(&lk); 1293 } 1294 1295 /* 1296 * Find the bmsafemap associated with a cylinder group buffer. 1297 * If none exists, create one. The buffer must be locked when 1298 * this routine is called and this routine must be called with 1299 * splbio interrupts blocked. 1300 */ 1301 static struct bmsafemap * 1302 bmsafemap_lookup(bp) 1303 struct buf *bp; 1304 { 1305 struct bmsafemap *bmsafemap; 1306 struct worklist *wk; 1307 1308 #ifdef DEBUG 1309 if (lk.lkt_held == NOHOLDER) 1310 panic("bmsafemap_lookup: lock not held"); 1311 #endif 1312 LIST_FOREACH(wk, &bp->b_dep, wk_list) 1313 if (wk->wk_type == D_BMSAFEMAP) 1314 return (WK_BMSAFEMAP(wk)); 1315 FREE_LOCK(&lk); 1316 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1317 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1318 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1319 bmsafemap->sm_list.wk_state = 0; 1320 bmsafemap->sm_buf = bp; 1321 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1322 LIST_INIT(&bmsafemap->sm_allocindirhd); 1323 LIST_INIT(&bmsafemap->sm_inodedephd); 1324 LIST_INIT(&bmsafemap->sm_newblkhd); 1325 ACQUIRE_LOCK(&lk); 1326 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 1327 return (bmsafemap); 1328 } 1329 1330 /* 1331 * Direct block allocation dependencies. 1332 * 1333 * When a new block is allocated, the corresponding disk locations must be 1334 * initialized (with zeros or new data) before the on-disk inode points to 1335 * them. Also, the freemap from which the block was allocated must be 1336 * updated (on disk) before the inode's pointer. These two dependencies are 1337 * independent of each other and are needed for all file blocks and indirect 1338 * blocks that are pointed to directly by the inode. Just before the 1339 * "in-core" version of the inode is updated with a newly allocated block 1340 * number, a procedure (below) is called to setup allocation dependency 1341 * structures. These structures are removed when the corresponding 1342 * dependencies are satisfied or when the block allocation becomes obsolete 1343 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1344 * fragment that gets upgraded). All of these cases are handled in 1345 * procedures described later. 1346 * 1347 * When a file extension causes a fragment to be upgraded, either to a larger 1348 * fragment or to a full block, the on-disk location may change (if the 1349 * previous fragment could not simply be extended). In this case, the old 1350 * fragment must be de-allocated, but not until after the inode's pointer has 1351 * been updated. In most cases, this is handled by later procedures, which 1352 * will construct a "freefrag" structure to be added to the workitem queue 1353 * when the inode update is complete (or obsolete). The main exception to 1354 * this is when an allocation occurs while a pending allocation dependency 1355 * (for the same block pointer) remains. This case is handled in the main 1356 * allocation dependency setup procedure by immediately freeing the 1357 * unreferenced fragments. 1358 */ 1359 void 1360 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1361 struct inode *ip; /* inode to which block is being added */ 1362 ufs_lbn_t lbn; /* block pointer within inode */ 1363 ufs2_daddr_t newblkno; /* disk block number being added */ 1364 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 1365 long newsize; /* size of new block */ 1366 long oldsize; /* size of new block */ 1367 struct buf *bp; /* bp for allocated block */ 1368 { 1369 struct allocdirect *adp, *oldadp; 1370 struct allocdirectlst *adphead; 1371 struct bmsafemap *bmsafemap; 1372 struct inodedep *inodedep; 1373 struct pagedep *pagedep; 1374 struct newblk *newblk; 1375 1376 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1377 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1378 adp->ad_list.wk_type = D_ALLOCDIRECT; 1379 adp->ad_lbn = lbn; 1380 adp->ad_newblkno = newblkno; 1381 adp->ad_oldblkno = oldblkno; 1382 adp->ad_newsize = newsize; 1383 adp->ad_oldsize = oldsize; 1384 adp->ad_state = ATTACHED; 1385 LIST_INIT(&adp->ad_newdirblk); 1386 if (newblkno == oldblkno) 1387 adp->ad_freefrag = NULL; 1388 else 1389 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1390 1391 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1392 panic("softdep_setup_allocdirect: lost block"); 1393 1394 ACQUIRE_LOCK(&lk); 1395 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1396 adp->ad_inodedep = inodedep; 1397 1398 if (newblk->nb_state == DEPCOMPLETE) { 1399 adp->ad_state |= DEPCOMPLETE; 1400 adp->ad_buf = NULL; 1401 } else { 1402 bmsafemap = newblk->nb_bmsafemap; 1403 adp->ad_buf = bmsafemap->sm_buf; 1404 LIST_REMOVE(newblk, nb_deps); 1405 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1406 } 1407 LIST_REMOVE(newblk, nb_hash); 1408 FREE(newblk, M_NEWBLK); 1409 1410 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1411 if (lbn >= NDADDR) { 1412 /* allocating an indirect block */ 1413 if (oldblkno != 0) { 1414 FREE_LOCK(&lk); 1415 panic("softdep_setup_allocdirect: non-zero indir"); 1416 } 1417 } else { 1418 /* 1419 * Allocating a direct block. 1420 * 1421 * If we are allocating a directory block, then we must 1422 * allocate an associated pagedep to track additions and 1423 * deletions. 1424 */ 1425 if ((ip->i_mode & IFMT) == IFDIR && 1426 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1427 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 1428 } 1429 /* 1430 * The list of allocdirects must be kept in sorted and ascending 1431 * order so that the rollback routines can quickly determine the 1432 * first uncommitted block (the size of the file stored on disk 1433 * ends at the end of the lowest committed fragment, or if there 1434 * are no fragments, at the end of the highest committed block). 1435 * Since files generally grow, the typical case is that the new 1436 * block is to be added at the end of the list. We speed this 1437 * special case by checking against the last allocdirect in the 1438 * list before laboriously traversing the list looking for the 1439 * insertion point. 1440 */ 1441 adphead = &inodedep->id_newinoupdt; 1442 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1443 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1444 /* insert at end of list */ 1445 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1446 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1447 allocdirect_merge(adphead, adp, oldadp); 1448 FREE_LOCK(&lk); 1449 return; 1450 } 1451 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1452 if (oldadp->ad_lbn >= lbn) 1453 break; 1454 } 1455 if (oldadp == NULL) { 1456 FREE_LOCK(&lk); 1457 panic("softdep_setup_allocdirect: lost entry"); 1458 } 1459 /* insert in middle of list */ 1460 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1461 if (oldadp->ad_lbn == lbn) 1462 allocdirect_merge(adphead, adp, oldadp); 1463 FREE_LOCK(&lk); 1464 } 1465 1466 /* 1467 * Replace an old allocdirect dependency with a newer one. 1468 * This routine must be called with splbio interrupts blocked. 1469 */ 1470 static void 1471 allocdirect_merge(adphead, newadp, oldadp) 1472 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 1473 struct allocdirect *newadp; /* allocdirect being added */ 1474 struct allocdirect *oldadp; /* existing allocdirect being checked */ 1475 { 1476 struct worklist *wk; 1477 struct freefrag *freefrag; 1478 struct newdirblk *newdirblk; 1479 1480 #ifdef DEBUG 1481 if (lk.lkt_held == NOHOLDER) 1482 panic("allocdirect_merge: lock not held"); 1483 #endif 1484 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1485 newadp->ad_oldsize != oldadp->ad_newsize || 1486 newadp->ad_lbn >= NDADDR) { 1487 FREE_LOCK(&lk); 1488 panic("%s %jd != new %jd || old size %ld != new %ld", 1489 "allocdirect_merge: old blkno", 1490 (intmax_t)newadp->ad_oldblkno, 1491 (intmax_t)oldadp->ad_newblkno, 1492 newadp->ad_oldsize, oldadp->ad_newsize); 1493 } 1494 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1495 newadp->ad_oldsize = oldadp->ad_oldsize; 1496 /* 1497 * If the old dependency had a fragment to free or had never 1498 * previously had a block allocated, then the new dependency 1499 * can immediately post its freefrag and adopt the old freefrag. 1500 * This action is done by swapping the freefrag dependencies. 1501 * The new dependency gains the old one's freefrag, and the 1502 * old one gets the new one and then immediately puts it on 1503 * the worklist when it is freed by free_allocdirect. It is 1504 * not possible to do this swap when the old dependency had a 1505 * non-zero size but no previous fragment to free. This condition 1506 * arises when the new block is an extension of the old block. 1507 * Here, the first part of the fragment allocated to the new 1508 * dependency is part of the block currently claimed on disk by 1509 * the old dependency, so cannot legitimately be freed until the 1510 * conditions for the new dependency are fulfilled. 1511 */ 1512 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1513 freefrag = newadp->ad_freefrag; 1514 newadp->ad_freefrag = oldadp->ad_freefrag; 1515 oldadp->ad_freefrag = freefrag; 1516 } 1517 /* 1518 * If we are tracking a new directory-block allocation, 1519 * move it from the old allocdirect to the new allocdirect. 1520 */ 1521 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 1522 newdirblk = WK_NEWDIRBLK(wk); 1523 WORKLIST_REMOVE(&newdirblk->db_list); 1524 if (LIST_FIRST(&oldadp->ad_newdirblk) != NULL) 1525 panic("allocdirect_merge: extra newdirblk"); 1526 WORKLIST_INSERT(&newadp->ad_newdirblk, &newdirblk->db_list); 1527 } 1528 free_allocdirect(adphead, oldadp, 0); 1529 } 1530 1531 /* 1532 * Allocate a new freefrag structure if needed. 1533 */ 1534 static struct freefrag * 1535 newfreefrag(ip, blkno, size) 1536 struct inode *ip; 1537 ufs2_daddr_t blkno; 1538 long size; 1539 { 1540 struct freefrag *freefrag; 1541 struct fs *fs; 1542 1543 if (blkno == 0) 1544 return (NULL); 1545 fs = ip->i_fs; 1546 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1547 panic("newfreefrag: frag size"); 1548 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1549 M_FREEFRAG, M_SOFTDEP_FLAGS); 1550 freefrag->ff_list.wk_type = D_FREEFRAG; 1551 freefrag->ff_state = 0; 1552 freefrag->ff_inum = ip->i_number; 1553 freefrag->ff_mnt = ITOV(ip)->v_mount; 1554 freefrag->ff_blkno = blkno; 1555 freefrag->ff_fragsize = size; 1556 return (freefrag); 1557 } 1558 1559 /* 1560 * This workitem de-allocates fragments that were replaced during 1561 * file block allocation. 1562 */ 1563 static void 1564 handle_workitem_freefrag(freefrag) 1565 struct freefrag *freefrag; 1566 { 1567 struct ufsmount *ump = VFSTOUFS(freefrag->ff_mnt); 1568 1569 ffs_blkfree(ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 1570 freefrag->ff_fragsize, freefrag->ff_inum); 1571 FREE(freefrag, M_FREEFRAG); 1572 } 1573 1574 /* 1575 * Set up a dependency structure for an external attributes data block. 1576 * This routine follows much of the structure of softdep_setup_allocdirect. 1577 * See the description of softdep_setup_allocdirect above for details. 1578 */ 1579 void 1580 softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1581 struct inode *ip; 1582 ufs_lbn_t lbn; 1583 ufs2_daddr_t newblkno; 1584 ufs2_daddr_t oldblkno; 1585 long newsize; 1586 long oldsize; 1587 struct buf *bp; 1588 { 1589 struct allocdirect *adp, *oldadp; 1590 struct allocdirectlst *adphead; 1591 struct bmsafemap *bmsafemap; 1592 struct inodedep *inodedep; 1593 struct newblk *newblk; 1594 1595 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1596 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1597 adp->ad_list.wk_type = D_ALLOCDIRECT; 1598 adp->ad_lbn = lbn; 1599 adp->ad_newblkno = newblkno; 1600 adp->ad_oldblkno = oldblkno; 1601 adp->ad_newsize = newsize; 1602 adp->ad_oldsize = oldsize; 1603 adp->ad_state = ATTACHED | EXTDATA; 1604 LIST_INIT(&adp->ad_newdirblk); 1605 if (newblkno == oldblkno) 1606 adp->ad_freefrag = NULL; 1607 else 1608 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1609 1610 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1611 panic("softdep_setup_allocext: lost block"); 1612 1613 ACQUIRE_LOCK(&lk); 1614 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1615 adp->ad_inodedep = inodedep; 1616 1617 if (newblk->nb_state == DEPCOMPLETE) { 1618 adp->ad_state |= DEPCOMPLETE; 1619 adp->ad_buf = NULL; 1620 } else { 1621 bmsafemap = newblk->nb_bmsafemap; 1622 adp->ad_buf = bmsafemap->sm_buf; 1623 LIST_REMOVE(newblk, nb_deps); 1624 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1625 } 1626 LIST_REMOVE(newblk, nb_hash); 1627 FREE(newblk, M_NEWBLK); 1628 1629 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1630 if (lbn >= NXADDR) { 1631 FREE_LOCK(&lk); 1632 panic("softdep_setup_allocext: lbn %lld > NXADDR", 1633 (long long)lbn); 1634 } 1635 /* 1636 * The list of allocdirects must be kept in sorted and ascending 1637 * order so that the rollback routines can quickly determine the 1638 * first uncommitted block (the size of the file stored on disk 1639 * ends at the end of the lowest committed fragment, or if there 1640 * are no fragments, at the end of the highest committed block). 1641 * Since files generally grow, the typical case is that the new 1642 * block is to be added at the end of the list. We speed this 1643 * special case by checking against the last allocdirect in the 1644 * list before laboriously traversing the list looking for the 1645 * insertion point. 1646 */ 1647 adphead = &inodedep->id_newextupdt; 1648 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1649 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1650 /* insert at end of list */ 1651 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1652 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1653 allocdirect_merge(adphead, adp, oldadp); 1654 FREE_LOCK(&lk); 1655 return; 1656 } 1657 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1658 if (oldadp->ad_lbn >= lbn) 1659 break; 1660 } 1661 if (oldadp == NULL) { 1662 FREE_LOCK(&lk); 1663 panic("softdep_setup_allocext: lost entry"); 1664 } 1665 /* insert in middle of list */ 1666 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1667 if (oldadp->ad_lbn == lbn) 1668 allocdirect_merge(adphead, adp, oldadp); 1669 FREE_LOCK(&lk); 1670 } 1671 1672 /* 1673 * Indirect block allocation dependencies. 1674 * 1675 * The same dependencies that exist for a direct block also exist when 1676 * a new block is allocated and pointed to by an entry in a block of 1677 * indirect pointers. The undo/redo states described above are also 1678 * used here. Because an indirect block contains many pointers that 1679 * may have dependencies, a second copy of the entire in-memory indirect 1680 * block is kept. The buffer cache copy is always completely up-to-date. 1681 * The second copy, which is used only as a source for disk writes, 1682 * contains only the safe pointers (i.e., those that have no remaining 1683 * update dependencies). The second copy is freed when all pointers 1684 * are safe. The cache is not allowed to replace indirect blocks with 1685 * pending update dependencies. If a buffer containing an indirect 1686 * block with dependencies is written, these routines will mark it 1687 * dirty again. It can only be successfully written once all the 1688 * dependencies are removed. The ffs_fsync routine in conjunction with 1689 * softdep_sync_metadata work together to get all the dependencies 1690 * removed so that a file can be successfully written to disk. Three 1691 * procedures are used when setting up indirect block pointer 1692 * dependencies. The division is necessary because of the organization 1693 * of the "balloc" routine and because of the distinction between file 1694 * pages and file metadata blocks. 1695 */ 1696 1697 /* 1698 * Allocate a new allocindir structure. 1699 */ 1700 static struct allocindir * 1701 newallocindir(ip, ptrno, newblkno, oldblkno) 1702 struct inode *ip; /* inode for file being extended */ 1703 int ptrno; /* offset of pointer in indirect block */ 1704 ufs2_daddr_t newblkno; /* disk block number being added */ 1705 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 1706 { 1707 struct allocindir *aip; 1708 1709 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1710 M_ALLOCINDIR, M_SOFTDEP_FLAGS|M_ZERO); 1711 aip->ai_list.wk_type = D_ALLOCINDIR; 1712 aip->ai_state = ATTACHED; 1713 aip->ai_offset = ptrno; 1714 aip->ai_newblkno = newblkno; 1715 aip->ai_oldblkno = oldblkno; 1716 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1717 return (aip); 1718 } 1719 1720 /* 1721 * Called just before setting an indirect block pointer 1722 * to a newly allocated file page. 1723 */ 1724 void 1725 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 1726 struct inode *ip; /* inode for file being extended */ 1727 ufs_lbn_t lbn; /* allocated block number within file */ 1728 struct buf *bp; /* buffer with indirect blk referencing page */ 1729 int ptrno; /* offset of pointer in indirect block */ 1730 ufs2_daddr_t newblkno; /* disk block number being added */ 1731 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 1732 struct buf *nbp; /* buffer holding allocated page */ 1733 { 1734 struct allocindir *aip; 1735 struct pagedep *pagedep; 1736 1737 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1738 ACQUIRE_LOCK(&lk); 1739 /* 1740 * If we are allocating a directory page, then we must 1741 * allocate an associated pagedep to track additions and 1742 * deletions. 1743 */ 1744 if ((ip->i_mode & IFMT) == IFDIR && 1745 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1746 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list); 1747 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1748 FREE_LOCK(&lk); 1749 setup_allocindir_phase2(bp, ip, aip); 1750 } 1751 1752 /* 1753 * Called just before setting an indirect block pointer to a 1754 * newly allocated indirect block. 1755 */ 1756 void 1757 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 1758 struct buf *nbp; /* newly allocated indirect block */ 1759 struct inode *ip; /* inode for file being extended */ 1760 struct buf *bp; /* indirect block referencing allocated block */ 1761 int ptrno; /* offset of pointer in indirect block */ 1762 ufs2_daddr_t newblkno; /* disk block number being added */ 1763 { 1764 struct allocindir *aip; 1765 1766 aip = newallocindir(ip, ptrno, newblkno, 0); 1767 ACQUIRE_LOCK(&lk); 1768 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1769 FREE_LOCK(&lk); 1770 setup_allocindir_phase2(bp, ip, aip); 1771 } 1772 1773 /* 1774 * Called to finish the allocation of the "aip" allocated 1775 * by one of the two routines above. 1776 */ 1777 static void 1778 setup_allocindir_phase2(bp, ip, aip) 1779 struct buf *bp; /* in-memory copy of the indirect block */ 1780 struct inode *ip; /* inode for file being extended */ 1781 struct allocindir *aip; /* allocindir allocated by the above routines */ 1782 { 1783 struct worklist *wk; 1784 struct indirdep *indirdep, *newindirdep; 1785 struct bmsafemap *bmsafemap; 1786 struct allocindir *oldaip; 1787 struct freefrag *freefrag; 1788 struct newblk *newblk; 1789 ufs2_daddr_t blkno; 1790 1791 if (bp->b_lblkno >= 0) 1792 panic("setup_allocindir_phase2: not indir blk"); 1793 for (indirdep = NULL, newindirdep = NULL; ; ) { 1794 ACQUIRE_LOCK(&lk); 1795 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1796 if (wk->wk_type != D_INDIRDEP) 1797 continue; 1798 indirdep = WK_INDIRDEP(wk); 1799 break; 1800 } 1801 if (indirdep == NULL && newindirdep) { 1802 indirdep = newindirdep; 1803 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 1804 newindirdep = NULL; 1805 } 1806 FREE_LOCK(&lk); 1807 if (indirdep) { 1808 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1809 &newblk) == 0) 1810 panic("setup_allocindir: lost block"); 1811 ACQUIRE_LOCK(&lk); 1812 if (newblk->nb_state == DEPCOMPLETE) { 1813 aip->ai_state |= DEPCOMPLETE; 1814 aip->ai_buf = NULL; 1815 } else { 1816 bmsafemap = newblk->nb_bmsafemap; 1817 aip->ai_buf = bmsafemap->sm_buf; 1818 LIST_REMOVE(newblk, nb_deps); 1819 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1820 aip, ai_deps); 1821 } 1822 LIST_REMOVE(newblk, nb_hash); 1823 FREE(newblk, M_NEWBLK); 1824 aip->ai_indirdep = indirdep; 1825 /* 1826 * Check to see if there is an existing dependency 1827 * for this block. If there is, merge the old 1828 * dependency into the new one. 1829 */ 1830 if (aip->ai_oldblkno == 0) 1831 oldaip = NULL; 1832 else 1833 1834 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1835 if (oldaip->ai_offset == aip->ai_offset) 1836 break; 1837 freefrag = NULL; 1838 if (oldaip != NULL) { 1839 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1840 FREE_LOCK(&lk); 1841 panic("setup_allocindir_phase2: blkno"); 1842 } 1843 aip->ai_oldblkno = oldaip->ai_oldblkno; 1844 freefrag = aip->ai_freefrag; 1845 aip->ai_freefrag = oldaip->ai_freefrag; 1846 oldaip->ai_freefrag = NULL; 1847 free_allocindir(oldaip, NULL); 1848 } 1849 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1850 if (ip->i_ump->um_fstype == UFS1) 1851 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data) 1852 [aip->ai_offset] = aip->ai_oldblkno; 1853 else 1854 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data) 1855 [aip->ai_offset] = aip->ai_oldblkno; 1856 FREE_LOCK(&lk); 1857 if (freefrag != NULL) 1858 handle_workitem_freefrag(freefrag); 1859 } 1860 if (newindirdep) { 1861 if (indirdep->ir_savebp != NULL) 1862 brelse(newindirdep->ir_savebp); 1863 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1864 } 1865 if (indirdep) 1866 break; 1867 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1868 M_INDIRDEP, M_SOFTDEP_FLAGS); 1869 newindirdep->ir_list.wk_type = D_INDIRDEP; 1870 newindirdep->ir_state = ATTACHED; 1871 if (ip->i_ump->um_fstype == UFS1) 1872 newindirdep->ir_state |= UFS1FMT; 1873 LIST_INIT(&newindirdep->ir_deplisthd); 1874 LIST_INIT(&newindirdep->ir_donehd); 1875 if (bp->b_blkno == bp->b_lblkno) { 1876 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 1877 NULL, NULL); 1878 bp->b_blkno = blkno; 1879 } 1880 newindirdep->ir_savebp = 1881 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0); 1882 BUF_KERNPROC(newindirdep->ir_savebp); 1883 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1884 } 1885 } 1886 1887 /* 1888 * Block de-allocation dependencies. 1889 * 1890 * When blocks are de-allocated, the on-disk pointers must be nullified before 1891 * the blocks are made available for use by other files. (The true 1892 * requirement is that old pointers must be nullified before new on-disk 1893 * pointers are set. We chose this slightly more stringent requirement to 1894 * reduce complexity.) Our implementation handles this dependency by updating 1895 * the inode (or indirect block) appropriately but delaying the actual block 1896 * de-allocation (i.e., freemap and free space count manipulation) until 1897 * after the updated versions reach stable storage. After the disk is 1898 * updated, the blocks can be safely de-allocated whenever it is convenient. 1899 * This implementation handles only the common case of reducing a file's 1900 * length to zero. Other cases are handled by the conventional synchronous 1901 * write approach. 1902 * 1903 * The ffs implementation with which we worked double-checks 1904 * the state of the block pointers and file size as it reduces 1905 * a file's length. Some of this code is replicated here in our 1906 * soft updates implementation. The freeblks->fb_chkcnt field is 1907 * used to transfer a part of this information to the procedure 1908 * that eventually de-allocates the blocks. 1909 * 1910 * This routine should be called from the routine that shortens 1911 * a file's length, before the inode's size or block pointers 1912 * are modified. It will save the block pointer information for 1913 * later release and zero the inode so that the calling routine 1914 * can release it. 1915 */ 1916 void 1917 softdep_setup_freeblocks(ip, length, flags) 1918 struct inode *ip; /* The inode whose length is to be reduced */ 1919 off_t length; /* The new length for the file */ 1920 int flags; /* IO_EXT and/or IO_NORMAL */ 1921 { 1922 struct freeblks *freeblks; 1923 struct inodedep *inodedep; 1924 struct allocdirect *adp; 1925 struct vnode *vp; 1926 struct buf *bp; 1927 struct fs *fs; 1928 ufs2_daddr_t extblocks, datablocks; 1929 int i, delay, error; 1930 1931 fs = ip->i_fs; 1932 if (length != 0) 1933 panic("softdep_setup_freeblocks: non-zero length"); 1934 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1935 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 1936 freeblks->fb_list.wk_type = D_FREEBLKS; 1937 freeblks->fb_uid = ip->i_uid; 1938 freeblks->fb_previousinum = ip->i_number; 1939 freeblks->fb_devvp = ip->i_devvp; 1940 freeblks->fb_mnt = ITOV(ip)->v_mount; 1941 extblocks = 0; 1942 if (fs->fs_magic == FS_UFS2_MAGIC) 1943 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 1944 datablocks = DIP(ip, i_blocks) - extblocks; 1945 if ((flags & IO_NORMAL) == 0) { 1946 freeblks->fb_oldsize = 0; 1947 freeblks->fb_chkcnt = 0; 1948 } else { 1949 freeblks->fb_oldsize = ip->i_size; 1950 ip->i_size = 0; 1951 DIP(ip, i_size) = 0; 1952 freeblks->fb_chkcnt = datablocks; 1953 for (i = 0; i < NDADDR; i++) { 1954 freeblks->fb_dblks[i] = DIP(ip, i_db[i]); 1955 DIP(ip, i_db[i]) = 0; 1956 } 1957 for (i = 0; i < NIADDR; i++) { 1958 freeblks->fb_iblks[i] = DIP(ip, i_ib[i]); 1959 DIP(ip, i_ib[i]) = 0; 1960 } 1961 /* 1962 * If the file was removed, then the space being freed was 1963 * accounted for then (see softdep_filereleased()). If the 1964 * file is merely being truncated, then we account for it now. 1965 */ 1966 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 1967 fs->fs_pendingblocks += datablocks; 1968 } 1969 if ((flags & IO_EXT) == 0) { 1970 freeblks->fb_oldextsize = 0; 1971 } else { 1972 freeblks->fb_oldextsize = ip->i_din2->di_extsize; 1973 ip->i_din2->di_extsize = 0; 1974 freeblks->fb_chkcnt += extblocks; 1975 for (i = 0; i < NXADDR; i++) { 1976 freeblks->fb_eblks[i] = ip->i_din2->di_extb[i]; 1977 ip->i_din2->di_extb[i] = 0; 1978 } 1979 } 1980 DIP(ip, i_blocks) -= freeblks->fb_chkcnt; 1981 /* 1982 * Push the zero'ed inode to to its disk buffer so that we are free 1983 * to delete its dependencies below. Once the dependencies are gone 1984 * the buffer can be safely released. 1985 */ 1986 if ((error = bread(ip->i_devvp, 1987 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 1988 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 1989 brelse(bp); 1990 softdep_error("softdep_setup_freeblocks", error); 1991 } 1992 if (ip->i_ump->um_fstype == UFS1) 1993 *((struct ufs1_dinode *)bp->b_data + 1994 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 1995 else 1996 *((struct ufs2_dinode *)bp->b_data + 1997 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 1998 /* 1999 * Find and eliminate any inode dependencies. 2000 */ 2001 ACQUIRE_LOCK(&lk); 2002 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 2003 if ((inodedep->id_state & IOSTARTED) != 0) { 2004 FREE_LOCK(&lk); 2005 panic("softdep_setup_freeblocks: inode busy"); 2006 } 2007 /* 2008 * Add the freeblks structure to the list of operations that 2009 * must await the zero'ed inode being written to disk. If we 2010 * still have a bitmap dependency (delay == 0), then the inode 2011 * has never been written to disk, so we can process the 2012 * freeblks below once we have deleted the dependencies. 2013 */ 2014 delay = (inodedep->id_state & DEPCOMPLETE); 2015 if (delay) 2016 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 2017 /* 2018 * Because the file length has been truncated to zero, any 2019 * pending block allocation dependency structures associated 2020 * with this inode are obsolete and can simply be de-allocated. 2021 * We must first merge the two dependency lists to get rid of 2022 * any duplicate freefrag structures, then purge the merged list. 2023 * If we still have a bitmap dependency, then the inode has never 2024 * been written to disk, so we can free any fragments without delay. 2025 */ 2026 if (flags & IO_NORMAL) { 2027 merge_inode_lists(&inodedep->id_newinoupdt, 2028 &inodedep->id_inoupdt); 2029 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 2030 free_allocdirect(&inodedep->id_inoupdt, adp, delay); 2031 } 2032 if (flags & IO_EXT) { 2033 merge_inode_lists(&inodedep->id_newextupdt, 2034 &inodedep->id_extupdt); 2035 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 2036 free_allocdirect(&inodedep->id_extupdt, adp, delay); 2037 } 2038 FREE_LOCK(&lk); 2039 bdwrite(bp); 2040 /* 2041 * We must wait for any I/O in progress to finish so that 2042 * all potential buffers on the dirty list will be visible. 2043 * Once they are all there, walk the list and get rid of 2044 * any dependencies. 2045 */ 2046 vp = ITOV(ip); 2047 ACQUIRE_LOCK(&lk); 2048 drain_output(vp, 1); 2049 restart: 2050 TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2051 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 2052 ((flags & IO_NORMAL) == 0 && 2053 (bp->b_xflags & BX_ALTDATA) == 0)) 2054 continue; 2055 if (getdirtybuf(&bp, MNT_WAIT) == 0) 2056 goto restart; 2057 (void) inodedep_lookup(fs, ip->i_number, 0, &inodedep); 2058 deallocate_dependencies(bp, inodedep); 2059 bp->b_flags |= B_INVAL | B_NOCACHE; 2060 FREE_LOCK(&lk); 2061 brelse(bp); 2062 ACQUIRE_LOCK(&lk); 2063 goto restart; 2064 } 2065 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 2066 (void) free_inodedep(inodedep); 2067 FREE_LOCK(&lk); 2068 /* 2069 * If the inode has never been written to disk (delay == 0), 2070 * then we can process the freeblks now that we have deleted 2071 * the dependencies. 2072 */ 2073 if (!delay) 2074 handle_workitem_freeblocks(freeblks, 0); 2075 } 2076 2077 /* 2078 * Reclaim any dependency structures from a buffer that is about to 2079 * be reallocated to a new vnode. The buffer must be locked, thus, 2080 * no I/O completion operations can occur while we are manipulating 2081 * its associated dependencies. The mutex is held so that other I/O's 2082 * associated with related dependencies do not occur. 2083 */ 2084 static void 2085 deallocate_dependencies(bp, inodedep) 2086 struct buf *bp; 2087 struct inodedep *inodedep; 2088 { 2089 struct worklist *wk; 2090 struct indirdep *indirdep; 2091 struct allocindir *aip; 2092 struct pagedep *pagedep; 2093 struct dirrem *dirrem; 2094 struct diradd *dap; 2095 int i; 2096 2097 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2098 switch (wk->wk_type) { 2099 2100 case D_INDIRDEP: 2101 indirdep = WK_INDIRDEP(wk); 2102 /* 2103 * None of the indirect pointers will ever be visible, 2104 * so they can simply be tossed. GOINGAWAY ensures 2105 * that allocated pointers will be saved in the buffer 2106 * cache until they are freed. Note that they will 2107 * only be able to be found by their physical address 2108 * since the inode mapping the logical address will 2109 * be gone. The save buffer used for the safe copy 2110 * was allocated in setup_allocindir_phase2 using 2111 * the physical address so it could be used for this 2112 * purpose. Hence we swap the safe copy with the real 2113 * copy, allowing the safe copy to be freed and holding 2114 * on to the real copy for later use in indir_trunc. 2115 */ 2116 if (indirdep->ir_state & GOINGAWAY) { 2117 FREE_LOCK(&lk); 2118 panic("deallocate_dependencies: already gone"); 2119 } 2120 indirdep->ir_state |= GOINGAWAY; 2121 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 2122 free_allocindir(aip, inodedep); 2123 if (bp->b_lblkno >= 0 || 2124 bp->b_blkno != indirdep->ir_savebp->b_lblkno) { 2125 FREE_LOCK(&lk); 2126 panic("deallocate_dependencies: not indir"); 2127 } 2128 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 2129 bp->b_bcount); 2130 WORKLIST_REMOVE(wk); 2131 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk); 2132 continue; 2133 2134 case D_PAGEDEP: 2135 pagedep = WK_PAGEDEP(wk); 2136 /* 2137 * None of the directory additions will ever be 2138 * visible, so they can simply be tossed. 2139 */ 2140 for (i = 0; i < DAHASHSZ; i++) 2141 while ((dap = 2142 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 2143 free_diradd(dap); 2144 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 2145 free_diradd(dap); 2146 /* 2147 * Copy any directory remove dependencies to the list 2148 * to be processed after the zero'ed inode is written. 2149 * If the inode has already been written, then they 2150 * can be dumped directly onto the work list. 2151 */ 2152 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 2153 LIST_REMOVE(dirrem, dm_next); 2154 dirrem->dm_dirinum = pagedep->pd_ino; 2155 if (inodedep == NULL || 2156 (inodedep->id_state & ALLCOMPLETE) == 2157 ALLCOMPLETE) 2158 add_to_worklist(&dirrem->dm_list); 2159 else 2160 WORKLIST_INSERT(&inodedep->id_bufwait, 2161 &dirrem->dm_list); 2162 } 2163 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2164 LIST_FOREACH(wk, &inodedep->id_bufwait, wk_list) 2165 if (wk->wk_type == D_NEWDIRBLK && 2166 WK_NEWDIRBLK(wk)->db_pagedep == 2167 pagedep) 2168 break; 2169 if (wk != NULL) { 2170 WORKLIST_REMOVE(wk); 2171 free_newdirblk(WK_NEWDIRBLK(wk)); 2172 } else { 2173 FREE_LOCK(&lk); 2174 panic("deallocate_dependencies: " 2175 "lost pagedep"); 2176 } 2177 } 2178 WORKLIST_REMOVE(&pagedep->pd_list); 2179 LIST_REMOVE(pagedep, pd_hash); 2180 WORKITEM_FREE(pagedep, D_PAGEDEP); 2181 continue; 2182 2183 case D_ALLOCINDIR: 2184 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 2185 continue; 2186 2187 case D_ALLOCDIRECT: 2188 case D_INODEDEP: 2189 FREE_LOCK(&lk); 2190 panic("deallocate_dependencies: Unexpected type %s", 2191 TYPENAME(wk->wk_type)); 2192 /* NOTREACHED */ 2193 2194 default: 2195 FREE_LOCK(&lk); 2196 panic("deallocate_dependencies: Unknown type %s", 2197 TYPENAME(wk->wk_type)); 2198 /* NOTREACHED */ 2199 } 2200 } 2201 } 2202 2203 /* 2204 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2205 * This routine must be called with splbio interrupts blocked. 2206 */ 2207 static void 2208 free_allocdirect(adphead, adp, delay) 2209 struct allocdirectlst *adphead; 2210 struct allocdirect *adp; 2211 int delay; 2212 { 2213 struct newdirblk *newdirblk; 2214 struct worklist *wk; 2215 2216 #ifdef DEBUG 2217 if (lk.lkt_held == NOHOLDER) 2218 panic("free_allocdirect: lock not held"); 2219 #endif 2220 if ((adp->ad_state & DEPCOMPLETE) == 0) 2221 LIST_REMOVE(adp, ad_deps); 2222 TAILQ_REMOVE(adphead, adp, ad_next); 2223 if ((adp->ad_state & COMPLETE) == 0) 2224 WORKLIST_REMOVE(&adp->ad_list); 2225 if (adp->ad_freefrag != NULL) { 2226 if (delay) 2227 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2228 &adp->ad_freefrag->ff_list); 2229 else 2230 add_to_worklist(&adp->ad_freefrag->ff_list); 2231 } 2232 if ((wk = LIST_FIRST(&adp->ad_newdirblk)) != NULL) { 2233 newdirblk = WK_NEWDIRBLK(wk); 2234 WORKLIST_REMOVE(&newdirblk->db_list); 2235 if (LIST_FIRST(&adp->ad_newdirblk) != NULL) 2236 panic("free_allocdirect: extra newdirblk"); 2237 if (delay) 2238 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2239 &newdirblk->db_list); 2240 else 2241 free_newdirblk(newdirblk); 2242 } 2243 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2244 } 2245 2246 /* 2247 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 2248 * This routine must be called with splbio interrupts blocked. 2249 */ 2250 static void 2251 free_newdirblk(newdirblk) 2252 struct newdirblk *newdirblk; 2253 { 2254 struct pagedep *pagedep; 2255 struct diradd *dap; 2256 int i; 2257 2258 #ifdef DEBUG 2259 if (lk.lkt_held == NOHOLDER) 2260 panic("free_newdirblk: lock not held"); 2261 #endif 2262 /* 2263 * If the pagedep is still linked onto the directory buffer 2264 * dependency chain, then some of the entries on the 2265 * pd_pendinghd list may not be committed to disk yet. In 2266 * this case, we will simply clear the NEWBLOCK flag and 2267 * let the pd_pendinghd list be processed when the pagedep 2268 * is next written. If the pagedep is no longer on the buffer 2269 * dependency chain, then all the entries on the pd_pending 2270 * list are committed to disk and we can free them here. 2271 */ 2272 pagedep = newdirblk->db_pagedep; 2273 pagedep->pd_state &= ~NEWBLOCK; 2274 if ((pagedep->pd_state & ONWORKLIST) == 0) 2275 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 2276 free_diradd(dap); 2277 /* 2278 * If no dependencies remain, the pagedep will be freed. 2279 */ 2280 for (i = 0; i < DAHASHSZ; i++) 2281 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 2282 break; 2283 if (i == DAHASHSZ && (pagedep->pd_state & ONWORKLIST) == 0) { 2284 LIST_REMOVE(pagedep, pd_hash); 2285 WORKITEM_FREE(pagedep, D_PAGEDEP); 2286 } 2287 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2288 } 2289 2290 /* 2291 * Prepare an inode to be freed. The actual free operation is not 2292 * done until the zero'ed inode has been written to disk. 2293 */ 2294 void 2295 softdep_freefile(pvp, ino, mode) 2296 struct vnode *pvp; 2297 ino_t ino; 2298 int mode; 2299 { 2300 struct inode *ip = VTOI(pvp); 2301 struct inodedep *inodedep; 2302 struct freefile *freefile; 2303 2304 /* 2305 * This sets up the inode de-allocation dependency. 2306 */ 2307 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2308 M_FREEFILE, M_SOFTDEP_FLAGS); 2309 freefile->fx_list.wk_type = D_FREEFILE; 2310 freefile->fx_list.wk_state = 0; 2311 freefile->fx_mode = mode; 2312 freefile->fx_oldinum = ino; 2313 freefile->fx_devvp = ip->i_devvp; 2314 freefile->fx_mnt = ITOV(ip)->v_mount; 2315 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 2316 ip->i_fs->fs_pendinginodes += 1; 2317 2318 /* 2319 * If the inodedep does not exist, then the zero'ed inode has 2320 * been written to disk. If the allocated inode has never been 2321 * written to disk, then the on-disk inode is zero'ed. In either 2322 * case we can free the file immediately. 2323 */ 2324 ACQUIRE_LOCK(&lk); 2325 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2326 check_inode_unwritten(inodedep)) { 2327 FREE_LOCK(&lk); 2328 handle_workitem_freefile(freefile); 2329 return; 2330 } 2331 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2332 FREE_LOCK(&lk); 2333 } 2334 2335 /* 2336 * Check to see if an inode has never been written to disk. If 2337 * so free the inodedep and return success, otherwise return failure. 2338 * This routine must be called with splbio interrupts blocked. 2339 * 2340 * If we still have a bitmap dependency, then the inode has never 2341 * been written to disk. Drop the dependency as it is no longer 2342 * necessary since the inode is being deallocated. We set the 2343 * ALLCOMPLETE flags since the bitmap now properly shows that the 2344 * inode is not allocated. Even if the inode is actively being 2345 * written, it has been rolled back to its zero'ed state, so we 2346 * are ensured that a zero inode is what is on the disk. For short 2347 * lived files, this change will usually result in removing all the 2348 * dependencies from the inode so that it can be freed immediately. 2349 */ 2350 static int 2351 check_inode_unwritten(inodedep) 2352 struct inodedep *inodedep; 2353 { 2354 2355 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2356 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2357 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2358 LIST_FIRST(&inodedep->id_inowait) != NULL || 2359 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2360 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2361 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 2362 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 2363 inodedep->id_nlinkdelta != 0) 2364 return (0); 2365 inodedep->id_state |= ALLCOMPLETE; 2366 LIST_REMOVE(inodedep, id_deps); 2367 inodedep->id_buf = NULL; 2368 if (inodedep->id_state & ONWORKLIST) 2369 WORKLIST_REMOVE(&inodedep->id_list); 2370 if (inodedep->id_savedino1 != NULL) { 2371 FREE(inodedep->id_savedino1, M_INODEDEP); 2372 inodedep->id_savedino1 = NULL; 2373 } 2374 if (free_inodedep(inodedep) == 0) { 2375 FREE_LOCK(&lk); 2376 panic("check_inode_unwritten: busy inode"); 2377 } 2378 return (1); 2379 } 2380 2381 /* 2382 * Try to free an inodedep structure. Return 1 if it could be freed. 2383 */ 2384 static int 2385 free_inodedep(inodedep) 2386 struct inodedep *inodedep; 2387 { 2388 2389 if ((inodedep->id_state & ONWORKLIST) != 0 || 2390 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2391 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2392 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2393 LIST_FIRST(&inodedep->id_inowait) != NULL || 2394 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2395 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2396 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 2397 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 2398 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino1 != NULL) 2399 return (0); 2400 LIST_REMOVE(inodedep, id_hash); 2401 WORKITEM_FREE(inodedep, D_INODEDEP); 2402 num_inodedep -= 1; 2403 return (1); 2404 } 2405 2406 /* 2407 * This workitem routine performs the block de-allocation. 2408 * The workitem is added to the pending list after the updated 2409 * inode block has been written to disk. As mentioned above, 2410 * checks regarding the number of blocks de-allocated (compared 2411 * to the number of blocks allocated for the file) are also 2412 * performed in this function. 2413 */ 2414 static void 2415 handle_workitem_freeblocks(freeblks, flags) 2416 struct freeblks *freeblks; 2417 int flags; 2418 { 2419 struct inode *ip; 2420 struct vnode *vp; 2421 struct fs *fs; 2422 int i, nblocks, level, bsize; 2423 ufs2_daddr_t bn, blocksreleased = 0; 2424 int error, allerror = 0; 2425 ufs_lbn_t baselbns[NIADDR], tmpval; 2426 2427 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2428 tmpval = 1; 2429 baselbns[0] = NDADDR; 2430 for (i = 1; i < NIADDR; i++) { 2431 tmpval *= NINDIR(fs); 2432 baselbns[i] = baselbns[i - 1] + tmpval; 2433 } 2434 nblocks = btodb(fs->fs_bsize); 2435 blocksreleased = 0; 2436 /* 2437 * Release all extended attribute blocks or frags. 2438 */ 2439 if (freeblks->fb_oldextsize > 0) { 2440 for (i = (NXADDR - 1); i >= 0; i--) { 2441 if ((bn = freeblks->fb_eblks[i]) == 0) 2442 continue; 2443 bsize = sblksize(fs, freeblks->fb_oldextsize, i); 2444 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2445 freeblks->fb_previousinum); 2446 blocksreleased += btodb(bsize); 2447 } 2448 } 2449 /* 2450 * Release all data blocks or frags. 2451 */ 2452 if (freeblks->fb_oldsize > 0) { 2453 /* 2454 * Indirect blocks first. 2455 */ 2456 for (level = (NIADDR - 1); level >= 0; level--) { 2457 if ((bn = freeblks->fb_iblks[level]) == 0) 2458 continue; 2459 if ((error = indir_trunc(freeblks, fsbtodb(fs, bn), 2460 level, baselbns[level], &blocksreleased)) == 0) 2461 allerror = error; 2462 ffs_blkfree(fs, freeblks->fb_devvp, bn, fs->fs_bsize, 2463 freeblks->fb_previousinum); 2464 fs->fs_pendingblocks -= nblocks; 2465 blocksreleased += nblocks; 2466 } 2467 /* 2468 * All direct blocks or frags. 2469 */ 2470 for (i = (NDADDR - 1); i >= 0; i--) { 2471 if ((bn = freeblks->fb_dblks[i]) == 0) 2472 continue; 2473 bsize = sblksize(fs, freeblks->fb_oldsize, i); 2474 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2475 freeblks->fb_previousinum); 2476 fs->fs_pendingblocks -= btodb(bsize); 2477 blocksreleased += btodb(bsize); 2478 } 2479 } 2480 /* 2481 * If we still have not finished background cleanup, then check 2482 * to see if the block count needs to be adjusted. 2483 */ 2484 if (freeblks->fb_chkcnt != blocksreleased && 2485 (fs->fs_flags & FS_UNCLEAN) != 0 && 2486 VFS_VGET(freeblks->fb_mnt, freeblks->fb_previousinum, 2487 (flags & LK_NOWAIT) | LK_EXCLUSIVE, &vp) == 0) { 2488 ip = VTOI(vp); 2489 DIP(ip, i_blocks) += freeblks->fb_chkcnt - blocksreleased; 2490 ip->i_flag |= IN_CHANGE; 2491 vput(vp); 2492 } 2493 2494 #ifdef DIAGNOSTIC 2495 if (freeblks->fb_chkcnt != blocksreleased && 2496 ((fs->fs_flags & FS_UNCLEAN) == 0 || (flags & LK_NOWAIT) != 0)) 2497 printf("handle_workitem_freeblocks: block count"); 2498 if (allerror) 2499 softdep_error("handle_workitem_freeblks", allerror); 2500 #endif /* DIAGNOSTIC */ 2501 2502 WORKITEM_FREE(freeblks, D_FREEBLKS); 2503 } 2504 2505 /* 2506 * Release blocks associated with the inode ip and stored in the indirect 2507 * block dbn. If level is greater than SINGLE, the block is an indirect block 2508 * and recursive calls to indirtrunc must be used to cleanse other indirect 2509 * blocks. 2510 */ 2511 static int 2512 indir_trunc(freeblks, dbn, level, lbn, countp) 2513 struct freeblks *freeblks; 2514 ufs2_daddr_t dbn; 2515 int level; 2516 ufs_lbn_t lbn; 2517 ufs2_daddr_t *countp; 2518 { 2519 struct buf *bp; 2520 struct fs *fs; 2521 struct worklist *wk; 2522 struct indirdep *indirdep; 2523 ufs1_daddr_t *bap1 = 0; 2524 ufs2_daddr_t nb, *bap2 = 0; 2525 ufs_lbn_t lbnadd; 2526 int i, nblocks, ufs1fmt; 2527 int error, allerror = 0; 2528 2529 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2530 lbnadd = 1; 2531 for (i = level; i > 0; i--) 2532 lbnadd *= NINDIR(fs); 2533 /* 2534 * Get buffer of block pointers to be freed. This routine is not 2535 * called until the zero'ed inode has been written, so it is safe 2536 * to free blocks as they are encountered. Because the inode has 2537 * been zero'ed, calls to bmap on these blocks will fail. So, we 2538 * have to use the on-disk address and the block device for the 2539 * filesystem to look them up. If the file was deleted before its 2540 * indirect blocks were all written to disk, the routine that set 2541 * us up (deallocate_dependencies) will have arranged to leave 2542 * a complete copy of the indirect block in memory for our use. 2543 * Otherwise we have to read the blocks in from the disk. 2544 */ 2545 ACQUIRE_LOCK(&lk); 2546 if ((bp = incore(freeblks->fb_devvp, dbn)) != NULL && 2547 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2548 if (wk->wk_type != D_INDIRDEP || 2549 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2550 (indirdep->ir_state & GOINGAWAY) == 0) { 2551 FREE_LOCK(&lk); 2552 panic("indir_trunc: lost indirdep"); 2553 } 2554 WORKLIST_REMOVE(wk); 2555 WORKITEM_FREE(indirdep, D_INDIRDEP); 2556 if (LIST_FIRST(&bp->b_dep) != NULL) { 2557 FREE_LOCK(&lk); 2558 panic("indir_trunc: dangling dep"); 2559 } 2560 FREE_LOCK(&lk); 2561 } else { 2562 FREE_LOCK(&lk); 2563 error = bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 2564 NOCRED, &bp); 2565 if (error) { 2566 brelse(bp); 2567 return (error); 2568 } 2569 } 2570 /* 2571 * Recursively free indirect blocks. 2572 */ 2573 if (VFSTOUFS(freeblks->fb_mnt)->um_fstype == UFS1) { 2574 ufs1fmt = 1; 2575 bap1 = (ufs1_daddr_t *)bp->b_data; 2576 } else { 2577 ufs1fmt = 0; 2578 bap2 = (ufs2_daddr_t *)bp->b_data; 2579 } 2580 nblocks = btodb(fs->fs_bsize); 2581 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2582 if (ufs1fmt) 2583 nb = bap1[i]; 2584 else 2585 nb = bap2[i]; 2586 if (nb == 0) 2587 continue; 2588 if (level != 0) { 2589 if ((error = indir_trunc(freeblks, fsbtodb(fs, nb), 2590 level - 1, lbn + (i * lbnadd), countp)) != 0) 2591 allerror = error; 2592 } 2593 ffs_blkfree(fs, freeblks->fb_devvp, nb, fs->fs_bsize, 2594 freeblks->fb_previousinum); 2595 fs->fs_pendingblocks -= nblocks; 2596 *countp += nblocks; 2597 } 2598 bp->b_flags |= B_INVAL | B_NOCACHE; 2599 brelse(bp); 2600 return (allerror); 2601 } 2602 2603 /* 2604 * Free an allocindir. 2605 * This routine must be called with splbio interrupts blocked. 2606 */ 2607 static void 2608 free_allocindir(aip, inodedep) 2609 struct allocindir *aip; 2610 struct inodedep *inodedep; 2611 { 2612 struct freefrag *freefrag; 2613 2614 #ifdef DEBUG 2615 if (lk.lkt_held == NOHOLDER) 2616 panic("free_allocindir: lock not held"); 2617 #endif 2618 if ((aip->ai_state & DEPCOMPLETE) == 0) 2619 LIST_REMOVE(aip, ai_deps); 2620 if (aip->ai_state & ONWORKLIST) 2621 WORKLIST_REMOVE(&aip->ai_list); 2622 LIST_REMOVE(aip, ai_next); 2623 if ((freefrag = aip->ai_freefrag) != NULL) { 2624 if (inodedep == NULL) 2625 add_to_worklist(&freefrag->ff_list); 2626 else 2627 WORKLIST_INSERT(&inodedep->id_bufwait, 2628 &freefrag->ff_list); 2629 } 2630 WORKITEM_FREE(aip, D_ALLOCINDIR); 2631 } 2632 2633 /* 2634 * Directory entry addition dependencies. 2635 * 2636 * When adding a new directory entry, the inode (with its incremented link 2637 * count) must be written to disk before the directory entry's pointer to it. 2638 * Also, if the inode is newly allocated, the corresponding freemap must be 2639 * updated (on disk) before the directory entry's pointer. These requirements 2640 * are met via undo/redo on the directory entry's pointer, which consists 2641 * simply of the inode number. 2642 * 2643 * As directory entries are added and deleted, the free space within a 2644 * directory block can become fragmented. The ufs filesystem will compact 2645 * a fragmented directory block to make space for a new entry. When this 2646 * occurs, the offsets of previously added entries change. Any "diradd" 2647 * dependency structures corresponding to these entries must be updated with 2648 * the new offsets. 2649 */ 2650 2651 /* 2652 * This routine is called after the in-memory inode's link 2653 * count has been incremented, but before the directory entry's 2654 * pointer to the inode has been set. 2655 */ 2656 int 2657 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 2658 struct buf *bp; /* buffer containing directory block */ 2659 struct inode *dp; /* inode for directory */ 2660 off_t diroffset; /* offset of new entry in directory */ 2661 ino_t newinum; /* inode referenced by new directory entry */ 2662 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 2663 int isnewblk; /* entry is in a newly allocated block */ 2664 { 2665 int offset; /* offset of new entry within directory block */ 2666 ufs_lbn_t lbn; /* block in directory containing new entry */ 2667 struct fs *fs; 2668 struct diradd *dap; 2669 struct allocdirect *adp; 2670 struct pagedep *pagedep; 2671 struct inodedep *inodedep; 2672 struct newdirblk *newdirblk = 0; 2673 struct mkdir *mkdir1, *mkdir2; 2674 2675 /* 2676 * Whiteouts have no dependencies. 2677 */ 2678 if (newinum == WINO) { 2679 if (newdirbp != NULL) 2680 bdwrite(newdirbp); 2681 return (0); 2682 } 2683 2684 fs = dp->i_fs; 2685 lbn = lblkno(fs, diroffset); 2686 offset = blkoff(fs, diroffset); 2687 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2688 M_SOFTDEP_FLAGS|M_ZERO); 2689 dap->da_list.wk_type = D_DIRADD; 2690 dap->da_offset = offset; 2691 dap->da_newinum = newinum; 2692 dap->da_state = ATTACHED; 2693 if (isnewblk && lbn < NDADDR && fragoff(fs, diroffset) == 0) { 2694 MALLOC(newdirblk, struct newdirblk *, sizeof(struct newdirblk), 2695 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 2696 newdirblk->db_list.wk_type = D_NEWDIRBLK; 2697 newdirblk->db_state = 0; 2698 } 2699 if (newdirbp == NULL) { 2700 dap->da_state |= DEPCOMPLETE; 2701 ACQUIRE_LOCK(&lk); 2702 } else { 2703 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2704 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2705 M_SOFTDEP_FLAGS); 2706 mkdir1->md_list.wk_type = D_MKDIR; 2707 mkdir1->md_state = MKDIR_BODY; 2708 mkdir1->md_diradd = dap; 2709 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2710 M_SOFTDEP_FLAGS); 2711 mkdir2->md_list.wk_type = D_MKDIR; 2712 mkdir2->md_state = MKDIR_PARENT; 2713 mkdir2->md_diradd = dap; 2714 /* 2715 * Dependency on "." and ".." being written to disk. 2716 */ 2717 mkdir1->md_buf = newdirbp; 2718 ACQUIRE_LOCK(&lk); 2719 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2720 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list); 2721 FREE_LOCK(&lk); 2722 bdwrite(newdirbp); 2723 /* 2724 * Dependency on link count increase for parent directory 2725 */ 2726 ACQUIRE_LOCK(&lk); 2727 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0 2728 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2729 dap->da_state &= ~MKDIR_PARENT; 2730 WORKITEM_FREE(mkdir2, D_MKDIR); 2731 } else { 2732 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2733 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2734 } 2735 } 2736 /* 2737 * Link into parent directory pagedep to await its being written. 2738 */ 2739 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2740 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2741 dap->da_pagedep = pagedep; 2742 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2743 da_pdlist); 2744 /* 2745 * Link into its inodedep. Put it on the id_bufwait list if the inode 2746 * is not yet written. If it is written, do the post-inode write 2747 * processing to put it on the id_pendinghd list. 2748 */ 2749 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2750 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2751 diradd_inode_written(dap, inodedep); 2752 else 2753 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2754 if (isnewblk) { 2755 /* 2756 * Directories growing into indirect blocks are rare 2757 * enough and the frequency of new block allocation 2758 * in those cases even more rare, that we choose not 2759 * to bother tracking them. Rather we simply force the 2760 * new directory entry to disk. 2761 */ 2762 if (lbn >= NDADDR) { 2763 FREE_LOCK(&lk); 2764 /* 2765 * We only have a new allocation when at the 2766 * beginning of a new block, not when we are 2767 * expanding into an existing block. 2768 */ 2769 if (blkoff(fs, diroffset) == 0) 2770 return (1); 2771 return (0); 2772 } 2773 /* 2774 * We only have a new allocation when at the beginning 2775 * of a new fragment, not when we are expanding into an 2776 * existing fragment. Also, there is nothing to do if we 2777 * are already tracking this block. 2778 */ 2779 if (fragoff(fs, diroffset) != 0) { 2780 FREE_LOCK(&lk); 2781 return (0); 2782 } 2783 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2784 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2785 FREE_LOCK(&lk); 2786 return (0); 2787 } 2788 /* 2789 * Find our associated allocdirect and have it track us. 2790 */ 2791 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0) 2792 panic("softdep_setup_directory_add: lost inodedep"); 2793 adp = TAILQ_LAST(&inodedep->id_newinoupdt, allocdirectlst); 2794 if (adp == NULL || adp->ad_lbn != lbn) { 2795 FREE_LOCK(&lk); 2796 panic("softdep_setup_directory_add: lost entry"); 2797 } 2798 pagedep->pd_state |= NEWBLOCK; 2799 newdirblk->db_pagedep = pagedep; 2800 WORKLIST_INSERT(&adp->ad_newdirblk, &newdirblk->db_list); 2801 } 2802 FREE_LOCK(&lk); 2803 return (0); 2804 } 2805 2806 /* 2807 * This procedure is called to change the offset of a directory 2808 * entry when compacting a directory block which must be owned 2809 * exclusively by the caller. Note that the actual entry movement 2810 * must be done in this procedure to ensure that no I/O completions 2811 * occur while the move is in progress. 2812 */ 2813 void 2814 softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize) 2815 struct inode *dp; /* inode for directory */ 2816 caddr_t base; /* address of dp->i_offset */ 2817 caddr_t oldloc; /* address of old directory location */ 2818 caddr_t newloc; /* address of new directory location */ 2819 int entrysize; /* size of directory entry */ 2820 { 2821 int offset, oldoffset, newoffset; 2822 struct pagedep *pagedep; 2823 struct diradd *dap; 2824 ufs_lbn_t lbn; 2825 2826 ACQUIRE_LOCK(&lk); 2827 lbn = lblkno(dp->i_fs, dp->i_offset); 2828 offset = blkoff(dp->i_fs, dp->i_offset); 2829 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2830 goto done; 2831 oldoffset = offset + (oldloc - base); 2832 newoffset = offset + (newloc - base); 2833 2834 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2835 if (dap->da_offset != oldoffset) 2836 continue; 2837 dap->da_offset = newoffset; 2838 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2839 break; 2840 LIST_REMOVE(dap, da_pdlist); 2841 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2842 dap, da_pdlist); 2843 break; 2844 } 2845 if (dap == NULL) { 2846 2847 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2848 if (dap->da_offset == oldoffset) { 2849 dap->da_offset = newoffset; 2850 break; 2851 } 2852 } 2853 } 2854 done: 2855 bcopy(oldloc, newloc, entrysize); 2856 FREE_LOCK(&lk); 2857 } 2858 2859 /* 2860 * Free a diradd dependency structure. This routine must be called 2861 * with splbio interrupts blocked. 2862 */ 2863 static void 2864 free_diradd(dap) 2865 struct diradd *dap; 2866 { 2867 struct dirrem *dirrem; 2868 struct pagedep *pagedep; 2869 struct inodedep *inodedep; 2870 struct mkdir *mkdir, *nextmd; 2871 2872 #ifdef DEBUG 2873 if (lk.lkt_held == NOHOLDER) 2874 panic("free_diradd: lock not held"); 2875 #endif 2876 WORKLIST_REMOVE(&dap->da_list); 2877 LIST_REMOVE(dap, da_pdlist); 2878 if ((dap->da_state & DIRCHG) == 0) { 2879 pagedep = dap->da_pagedep; 2880 } else { 2881 dirrem = dap->da_previous; 2882 pagedep = dirrem->dm_pagedep; 2883 dirrem->dm_dirinum = pagedep->pd_ino; 2884 add_to_worklist(&dirrem->dm_list); 2885 } 2886 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2887 0, &inodedep) != 0) 2888 (void) free_inodedep(inodedep); 2889 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2890 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2891 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2892 if (mkdir->md_diradd != dap) 2893 continue; 2894 dap->da_state &= ~mkdir->md_state; 2895 WORKLIST_REMOVE(&mkdir->md_list); 2896 LIST_REMOVE(mkdir, md_mkdirs); 2897 WORKITEM_FREE(mkdir, D_MKDIR); 2898 } 2899 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2900 FREE_LOCK(&lk); 2901 panic("free_diradd: unfound ref"); 2902 } 2903 } 2904 WORKITEM_FREE(dap, D_DIRADD); 2905 } 2906 2907 /* 2908 * Directory entry removal dependencies. 2909 * 2910 * When removing a directory entry, the entry's inode pointer must be 2911 * zero'ed on disk before the corresponding inode's link count is decremented 2912 * (possibly freeing the inode for re-use). This dependency is handled by 2913 * updating the directory entry but delaying the inode count reduction until 2914 * after the directory block has been written to disk. After this point, the 2915 * inode count can be decremented whenever it is convenient. 2916 */ 2917 2918 /* 2919 * This routine should be called immediately after removing 2920 * a directory entry. The inode's link count should not be 2921 * decremented by the calling procedure -- the soft updates 2922 * code will do this task when it is safe. 2923 */ 2924 void 2925 softdep_setup_remove(bp, dp, ip, isrmdir) 2926 struct buf *bp; /* buffer containing directory block */ 2927 struct inode *dp; /* inode for the directory being modified */ 2928 struct inode *ip; /* inode for directory entry being removed */ 2929 int isrmdir; /* indicates if doing RMDIR */ 2930 { 2931 struct dirrem *dirrem, *prevdirrem; 2932 2933 /* 2934 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2935 */ 2936 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2937 2938 /* 2939 * If the COMPLETE flag is clear, then there were no active 2940 * entries and we want to roll back to a zeroed entry until 2941 * the new inode is committed to disk. If the COMPLETE flag is 2942 * set then we have deleted an entry that never made it to 2943 * disk. If the entry we deleted resulted from a name change, 2944 * then the old name still resides on disk. We cannot delete 2945 * its inode (returned to us in prevdirrem) until the zeroed 2946 * directory entry gets to disk. The new inode has never been 2947 * referenced on the disk, so can be deleted immediately. 2948 */ 2949 if ((dirrem->dm_state & COMPLETE) == 0) { 2950 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2951 dm_next); 2952 FREE_LOCK(&lk); 2953 } else { 2954 if (prevdirrem != NULL) 2955 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2956 prevdirrem, dm_next); 2957 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2958 FREE_LOCK(&lk); 2959 handle_workitem_remove(dirrem, NULL); 2960 } 2961 } 2962 2963 /* 2964 * Allocate a new dirrem if appropriate and return it along with 2965 * its associated pagedep. Called without a lock, returns with lock. 2966 */ 2967 static long num_dirrem; /* number of dirrem allocated */ 2968 static struct dirrem * 2969 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 2970 struct buf *bp; /* buffer containing directory block */ 2971 struct inode *dp; /* inode for the directory being modified */ 2972 struct inode *ip; /* inode for directory entry being removed */ 2973 int isrmdir; /* indicates if doing RMDIR */ 2974 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 2975 { 2976 int offset; 2977 ufs_lbn_t lbn; 2978 struct diradd *dap; 2979 struct dirrem *dirrem; 2980 struct pagedep *pagedep; 2981 2982 /* 2983 * Whiteouts have no deletion dependencies. 2984 */ 2985 if (ip == NULL) 2986 panic("newdirrem: whiteout"); 2987 /* 2988 * If we are over our limit, try to improve the situation. 2989 * Limiting the number of dirrem structures will also limit 2990 * the number of freefile and freeblks structures. 2991 */ 2992 if (num_dirrem > max_softdeps / 2) 2993 (void) request_cleanup(FLUSH_REMOVE, 0); 2994 num_dirrem += 1; 2995 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 2996 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 2997 dirrem->dm_list.wk_type = D_DIRREM; 2998 dirrem->dm_state = isrmdir ? RMDIR : 0; 2999 dirrem->dm_mnt = ITOV(ip)->v_mount; 3000 dirrem->dm_oldinum = ip->i_number; 3001 *prevdirremp = NULL; 3002 3003 ACQUIRE_LOCK(&lk); 3004 lbn = lblkno(dp->i_fs, dp->i_offset); 3005 offset = blkoff(dp->i_fs, dp->i_offset); 3006 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 3007 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 3008 dirrem->dm_pagedep = pagedep; 3009 /* 3010 * Check for a diradd dependency for the same directory entry. 3011 * If present, then both dependencies become obsolete and can 3012 * be de-allocated. Check for an entry on both the pd_dirraddhd 3013 * list and the pd_pendinghd list. 3014 */ 3015 3016 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 3017 if (dap->da_offset == offset) 3018 break; 3019 if (dap == NULL) { 3020 3021 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 3022 if (dap->da_offset == offset) 3023 break; 3024 if (dap == NULL) 3025 return (dirrem); 3026 } 3027 /* 3028 * Must be ATTACHED at this point. 3029 */ 3030 if ((dap->da_state & ATTACHED) == 0) { 3031 FREE_LOCK(&lk); 3032 panic("newdirrem: not ATTACHED"); 3033 } 3034 if (dap->da_newinum != ip->i_number) { 3035 FREE_LOCK(&lk); 3036 panic("newdirrem: inum %d should be %d", 3037 ip->i_number, dap->da_newinum); 3038 } 3039 /* 3040 * If we are deleting a changed name that never made it to disk, 3041 * then return the dirrem describing the previous inode (which 3042 * represents the inode currently referenced from this entry on disk). 3043 */ 3044 if ((dap->da_state & DIRCHG) != 0) { 3045 *prevdirremp = dap->da_previous; 3046 dap->da_state &= ~DIRCHG; 3047 dap->da_pagedep = pagedep; 3048 } 3049 /* 3050 * We are deleting an entry that never made it to disk. 3051 * Mark it COMPLETE so we can delete its inode immediately. 3052 */ 3053 dirrem->dm_state |= COMPLETE; 3054 free_diradd(dap); 3055 return (dirrem); 3056 } 3057 3058 /* 3059 * Directory entry change dependencies. 3060 * 3061 * Changing an existing directory entry requires that an add operation 3062 * be completed first followed by a deletion. The semantics for the addition 3063 * are identical to the description of adding a new entry above except 3064 * that the rollback is to the old inode number rather than zero. Once 3065 * the addition dependency is completed, the removal is done as described 3066 * in the removal routine above. 3067 */ 3068 3069 /* 3070 * This routine should be called immediately after changing 3071 * a directory entry. The inode's link count should not be 3072 * decremented by the calling procedure -- the soft updates 3073 * code will perform this task when it is safe. 3074 */ 3075 void 3076 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 3077 struct buf *bp; /* buffer containing directory block */ 3078 struct inode *dp; /* inode for the directory being modified */ 3079 struct inode *ip; /* inode for directory entry being removed */ 3080 ino_t newinum; /* new inode number for changed entry */ 3081 int isrmdir; /* indicates if doing RMDIR */ 3082 { 3083 int offset; 3084 struct diradd *dap = NULL; 3085 struct dirrem *dirrem, *prevdirrem; 3086 struct pagedep *pagedep; 3087 struct inodedep *inodedep; 3088 3089 offset = blkoff(dp->i_fs, dp->i_offset); 3090 3091 /* 3092 * Whiteouts do not need diradd dependencies. 3093 */ 3094 if (newinum != WINO) { 3095 MALLOC(dap, struct diradd *, sizeof(struct diradd), 3096 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 3097 dap->da_list.wk_type = D_DIRADD; 3098 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 3099 dap->da_offset = offset; 3100 dap->da_newinum = newinum; 3101 } 3102 3103 /* 3104 * Allocate a new dirrem and ACQUIRE_LOCK. 3105 */ 3106 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 3107 pagedep = dirrem->dm_pagedep; 3108 /* 3109 * The possible values for isrmdir: 3110 * 0 - non-directory file rename 3111 * 1 - directory rename within same directory 3112 * inum - directory rename to new directory of given inode number 3113 * When renaming to a new directory, we are both deleting and 3114 * creating a new directory entry, so the link count on the new 3115 * directory should not change. Thus we do not need the followup 3116 * dirrem which is usually done in handle_workitem_remove. We set 3117 * the DIRCHG flag to tell handle_workitem_remove to skip the 3118 * followup dirrem. 3119 */ 3120 if (isrmdir > 1) 3121 dirrem->dm_state |= DIRCHG; 3122 3123 /* 3124 * Whiteouts have no additional dependencies, 3125 * so just put the dirrem on the correct list. 3126 */ 3127 if (newinum == WINO) { 3128 if ((dirrem->dm_state & COMPLETE) == 0) { 3129 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 3130 dm_next); 3131 } else { 3132 dirrem->dm_dirinum = pagedep->pd_ino; 3133 add_to_worklist(&dirrem->dm_list); 3134 } 3135 FREE_LOCK(&lk); 3136 return; 3137 } 3138 3139 /* 3140 * If the COMPLETE flag is clear, then there were no active 3141 * entries and we want to roll back to the previous inode until 3142 * the new inode is committed to disk. If the COMPLETE flag is 3143 * set, then we have deleted an entry that never made it to disk. 3144 * If the entry we deleted resulted from a name change, then the old 3145 * inode reference still resides on disk. Any rollback that we do 3146 * needs to be to that old inode (returned to us in prevdirrem). If 3147 * the entry we deleted resulted from a create, then there is 3148 * no entry on the disk, so we want to roll back to zero rather 3149 * than the uncommitted inode. In either of the COMPLETE cases we 3150 * want to immediately free the unwritten and unreferenced inode. 3151 */ 3152 if ((dirrem->dm_state & COMPLETE) == 0) { 3153 dap->da_previous = dirrem; 3154 } else { 3155 if (prevdirrem != NULL) { 3156 dap->da_previous = prevdirrem; 3157 } else { 3158 dap->da_state &= ~DIRCHG; 3159 dap->da_pagedep = pagedep; 3160 } 3161 dirrem->dm_dirinum = pagedep->pd_ino; 3162 add_to_worklist(&dirrem->dm_list); 3163 } 3164 /* 3165 * Link into its inodedep. Put it on the id_bufwait list if the inode 3166 * is not yet written. If it is written, do the post-inode write 3167 * processing to put it on the id_pendinghd list. 3168 */ 3169 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 3170 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 3171 dap->da_state |= COMPLETE; 3172 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3173 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3174 } else { 3175 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 3176 dap, da_pdlist); 3177 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 3178 } 3179 FREE_LOCK(&lk); 3180 } 3181 3182 /* 3183 * Called whenever the link count on an inode is changed. 3184 * It creates an inode dependency so that the new reference(s) 3185 * to the inode cannot be committed to disk until the updated 3186 * inode has been written. 3187 */ 3188 void 3189 softdep_change_linkcnt(ip) 3190 struct inode *ip; /* the inode with the increased link count */ 3191 { 3192 struct inodedep *inodedep; 3193 3194 ACQUIRE_LOCK(&lk); 3195 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 3196 if (ip->i_nlink < ip->i_effnlink) { 3197 FREE_LOCK(&lk); 3198 panic("softdep_change_linkcnt: bad delta"); 3199 } 3200 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3201 FREE_LOCK(&lk); 3202 } 3203 3204 /* 3205 * Called when the effective link count and the reference count 3206 * on an inode drops to zero. At this point there are no names 3207 * referencing the file in the filesystem and no active file 3208 * references. The space associated with the file will be freed 3209 * as soon as the necessary soft dependencies are cleared. 3210 */ 3211 void 3212 softdep_releasefile(ip) 3213 struct inode *ip; /* inode with the zero effective link count */ 3214 { 3215 struct inodedep *inodedep; 3216 struct fs *fs; 3217 int extblocks; 3218 3219 if (ip->i_effnlink > 0) 3220 panic("softdep_filerelease: file still referenced"); 3221 /* 3222 * We may be called several times as the real reference count 3223 * drops to zero. We only want to account for the space once. 3224 */ 3225 if (ip->i_flag & IN_SPACECOUNTED) 3226 return; 3227 /* 3228 * We have to deactivate a snapshot otherwise copyonwrites may 3229 * add blocks and the cleanup may remove blocks after we have 3230 * tried to account for them. 3231 */ 3232 if ((ip->i_flags & SF_SNAPSHOT) != 0) 3233 ffs_snapremove(ITOV(ip)); 3234 /* 3235 * If we are tracking an nlinkdelta, we have to also remember 3236 * whether we accounted for the freed space yet. 3237 */ 3238 ACQUIRE_LOCK(&lk); 3239 if ((inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep))) 3240 inodedep->id_state |= SPACECOUNTED; 3241 FREE_LOCK(&lk); 3242 fs = ip->i_fs; 3243 extblocks = 0; 3244 if (fs->fs_magic == FS_UFS2_MAGIC) 3245 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 3246 ip->i_fs->fs_pendingblocks += DIP(ip, i_blocks) - extblocks; 3247 ip->i_fs->fs_pendinginodes += 1; 3248 ip->i_flag |= IN_SPACECOUNTED; 3249 } 3250 3251 /* 3252 * This workitem decrements the inode's link count. 3253 * If the link count reaches zero, the file is removed. 3254 */ 3255 static void 3256 handle_workitem_remove(dirrem, xp) 3257 struct dirrem *dirrem; 3258 struct vnode *xp; 3259 { 3260 struct thread *td = curthread; 3261 struct inodedep *inodedep; 3262 struct vnode *vp; 3263 struct inode *ip; 3264 ino_t oldinum; 3265 int error; 3266 3267 if ((vp = xp) == NULL && 3268 (error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, LK_EXCLUSIVE, 3269 &vp)) != 0) { 3270 softdep_error("handle_workitem_remove: vget", error); 3271 return; 3272 } 3273 ip = VTOI(vp); 3274 ACQUIRE_LOCK(&lk); 3275 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 3276 FREE_LOCK(&lk); 3277 panic("handle_workitem_remove: lost inodedep"); 3278 } 3279 /* 3280 * Normal file deletion. 3281 */ 3282 if ((dirrem->dm_state & RMDIR) == 0) { 3283 ip->i_nlink--; 3284 DIP(ip, i_nlink) = ip->i_nlink; 3285 ip->i_flag |= IN_CHANGE; 3286 if (ip->i_nlink < ip->i_effnlink) { 3287 FREE_LOCK(&lk); 3288 panic("handle_workitem_remove: bad file delta"); 3289 } 3290 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3291 FREE_LOCK(&lk); 3292 vput(vp); 3293 num_dirrem -= 1; 3294 WORKITEM_FREE(dirrem, D_DIRREM); 3295 return; 3296 } 3297 /* 3298 * Directory deletion. Decrement reference count for both the 3299 * just deleted parent directory entry and the reference for ".". 3300 * Next truncate the directory to length zero. When the 3301 * truncation completes, arrange to have the reference count on 3302 * the parent decremented to account for the loss of "..". 3303 */ 3304 ip->i_nlink -= 2; 3305 DIP(ip, i_nlink) = ip->i_nlink; 3306 ip->i_flag |= IN_CHANGE; 3307 if (ip->i_nlink < ip->i_effnlink) { 3308 FREE_LOCK(&lk); 3309 panic("handle_workitem_remove: bad dir delta"); 3310 } 3311 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3312 FREE_LOCK(&lk); 3313 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, td->td_ucred, td)) != 0) 3314 softdep_error("handle_workitem_remove: truncate", error); 3315 /* 3316 * Rename a directory to a new parent. Since, we are both deleting 3317 * and creating a new directory entry, the link count on the new 3318 * directory should not change. Thus we skip the followup dirrem. 3319 */ 3320 if (dirrem->dm_state & DIRCHG) { 3321 vput(vp); 3322 num_dirrem -= 1; 3323 WORKITEM_FREE(dirrem, D_DIRREM); 3324 return; 3325 } 3326 /* 3327 * If the inodedep does not exist, then the zero'ed inode has 3328 * been written to disk. If the allocated inode has never been 3329 * written to disk, then the on-disk inode is zero'ed. In either 3330 * case we can remove the file immediately. 3331 */ 3332 ACQUIRE_LOCK(&lk); 3333 dirrem->dm_state = 0; 3334 oldinum = dirrem->dm_oldinum; 3335 dirrem->dm_oldinum = dirrem->dm_dirinum; 3336 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 3337 check_inode_unwritten(inodedep)) { 3338 FREE_LOCK(&lk); 3339 vput(vp); 3340 handle_workitem_remove(dirrem, NULL); 3341 return; 3342 } 3343 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 3344 FREE_LOCK(&lk); 3345 vput(vp); 3346 } 3347 3348 /* 3349 * Inode de-allocation dependencies. 3350 * 3351 * When an inode's link count is reduced to zero, it can be de-allocated. We 3352 * found it convenient to postpone de-allocation until after the inode is 3353 * written to disk with its new link count (zero). At this point, all of the 3354 * on-disk inode's block pointers are nullified and, with careful dependency 3355 * list ordering, all dependencies related to the inode will be satisfied and 3356 * the corresponding dependency structures de-allocated. So, if/when the 3357 * inode is reused, there will be no mixing of old dependencies with new 3358 * ones. This artificial dependency is set up by the block de-allocation 3359 * procedure above (softdep_setup_freeblocks) and completed by the 3360 * following procedure. 3361 */ 3362 static void 3363 handle_workitem_freefile(freefile) 3364 struct freefile *freefile; 3365 { 3366 struct fs *fs; 3367 struct inodedep *idp; 3368 int error; 3369 3370 fs = VFSTOUFS(freefile->fx_mnt)->um_fs; 3371 #ifdef DEBUG 3372 ACQUIRE_LOCK(&lk); 3373 error = inodedep_lookup(fs, freefile->fx_oldinum, 0, &idp); 3374 FREE_LOCK(&lk); 3375 if (error) 3376 panic("handle_workitem_freefile: inodedep survived"); 3377 #endif 3378 fs->fs_pendinginodes -= 1; 3379 if ((error = ffs_freefile(fs, freefile->fx_devvp, freefile->fx_oldinum, 3380 freefile->fx_mode)) != 0) 3381 softdep_error("handle_workitem_freefile", error); 3382 WORKITEM_FREE(freefile, D_FREEFILE); 3383 } 3384 3385 /* 3386 * Disk writes. 3387 * 3388 * The dependency structures constructed above are most actively used when file 3389 * system blocks are written to disk. No constraints are placed on when a 3390 * block can be written, but unsatisfied update dependencies are made safe by 3391 * modifying (or replacing) the source memory for the duration of the disk 3392 * write. When the disk write completes, the memory block is again brought 3393 * up-to-date. 3394 * 3395 * In-core inode structure reclamation. 3396 * 3397 * Because there are a finite number of "in-core" inode structures, they are 3398 * reused regularly. By transferring all inode-related dependencies to the 3399 * in-memory inode block and indexing them separately (via "inodedep"s), we 3400 * can allow "in-core" inode structures to be reused at any time and avoid 3401 * any increase in contention. 3402 * 3403 * Called just before entering the device driver to initiate a new disk I/O. 3404 * The buffer must be locked, thus, no I/O completion operations can occur 3405 * while we are manipulating its associated dependencies. 3406 */ 3407 static void 3408 softdep_disk_io_initiation(bp) 3409 struct buf *bp; /* structure describing disk write to occur */ 3410 { 3411 struct worklist *wk, *nextwk; 3412 struct indirdep *indirdep; 3413 struct inodedep *inodedep; 3414 3415 /* 3416 * We only care about write operations. There should never 3417 * be dependencies for reads. 3418 */ 3419 if (bp->b_iocmd == BIO_READ) 3420 panic("softdep_disk_io_initiation: read"); 3421 /* 3422 * Do any necessary pre-I/O processing. 3423 */ 3424 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = nextwk) { 3425 nextwk = LIST_NEXT(wk, wk_list); 3426 switch (wk->wk_type) { 3427 3428 case D_PAGEDEP: 3429 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3430 continue; 3431 3432 case D_INODEDEP: 3433 inodedep = WK_INODEDEP(wk); 3434 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 3435 initiate_write_inodeblock_ufs1(inodedep, bp); 3436 else 3437 initiate_write_inodeblock_ufs2(inodedep, bp); 3438 continue; 3439 3440 case D_INDIRDEP: 3441 indirdep = WK_INDIRDEP(wk); 3442 if (indirdep->ir_state & GOINGAWAY) 3443 panic("disk_io_initiation: indirdep gone"); 3444 /* 3445 * If there are no remaining dependencies, this 3446 * will be writing the real pointers, so the 3447 * dependency can be freed. 3448 */ 3449 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3450 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3451 brelse(indirdep->ir_savebp); 3452 /* inline expand WORKLIST_REMOVE(wk); */ 3453 wk->wk_state &= ~ONWORKLIST; 3454 LIST_REMOVE(wk, wk_list); 3455 WORKITEM_FREE(indirdep, D_INDIRDEP); 3456 continue; 3457 } 3458 /* 3459 * Replace up-to-date version with safe version. 3460 */ 3461 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3462 M_INDIRDEP, M_SOFTDEP_FLAGS); 3463 ACQUIRE_LOCK(&lk); 3464 indirdep->ir_state &= ~ATTACHED; 3465 indirdep->ir_state |= UNDONE; 3466 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3467 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3468 bp->b_bcount); 3469 FREE_LOCK(&lk); 3470 continue; 3471 3472 case D_MKDIR: 3473 case D_BMSAFEMAP: 3474 case D_ALLOCDIRECT: 3475 case D_ALLOCINDIR: 3476 continue; 3477 3478 default: 3479 panic("handle_disk_io_initiation: Unexpected type %s", 3480 TYPENAME(wk->wk_type)); 3481 /* NOTREACHED */ 3482 } 3483 } 3484 } 3485 3486 /* 3487 * Called from within the procedure above to deal with unsatisfied 3488 * allocation dependencies in a directory. The buffer must be locked, 3489 * thus, no I/O completion operations can occur while we are 3490 * manipulating its associated dependencies. 3491 */ 3492 static void 3493 initiate_write_filepage(pagedep, bp) 3494 struct pagedep *pagedep; 3495 struct buf *bp; 3496 { 3497 struct diradd *dap; 3498 struct direct *ep; 3499 int i; 3500 3501 if (pagedep->pd_state & IOSTARTED) { 3502 /* 3503 * This can only happen if there is a driver that does not 3504 * understand chaining. Here biodone will reissue the call 3505 * to strategy for the incomplete buffers. 3506 */ 3507 printf("initiate_write_filepage: already started\n"); 3508 return; 3509 } 3510 pagedep->pd_state |= IOSTARTED; 3511 ACQUIRE_LOCK(&lk); 3512 for (i = 0; i < DAHASHSZ; i++) { 3513 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3514 ep = (struct direct *) 3515 ((char *)bp->b_data + dap->da_offset); 3516 if (ep->d_ino != dap->da_newinum) { 3517 FREE_LOCK(&lk); 3518 panic("%s: dir inum %d != new %d", 3519 "initiate_write_filepage", 3520 ep->d_ino, dap->da_newinum); 3521 } 3522 if (dap->da_state & DIRCHG) 3523 ep->d_ino = dap->da_previous->dm_oldinum; 3524 else 3525 ep->d_ino = 0; 3526 dap->da_state &= ~ATTACHED; 3527 dap->da_state |= UNDONE; 3528 } 3529 } 3530 FREE_LOCK(&lk); 3531 } 3532 3533 /* 3534 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 3535 * Note that any bug fixes made to this routine must be done in the 3536 * version found below. 3537 * 3538 * Called from within the procedure above to deal with unsatisfied 3539 * allocation dependencies in an inodeblock. The buffer must be 3540 * locked, thus, no I/O completion operations can occur while we 3541 * are manipulating its associated dependencies. 3542 */ 3543 static void 3544 initiate_write_inodeblock_ufs1(inodedep, bp) 3545 struct inodedep *inodedep; 3546 struct buf *bp; /* The inode block */ 3547 { 3548 struct allocdirect *adp, *lastadp; 3549 struct ufs1_dinode *dp; 3550 struct fs *fs; 3551 ufs_lbn_t i, prevlbn = 0; 3552 int deplist; 3553 3554 if (inodedep->id_state & IOSTARTED) 3555 panic("initiate_write_inodeblock_ufs1: already started"); 3556 inodedep->id_state |= IOSTARTED; 3557 fs = inodedep->id_fs; 3558 dp = (struct ufs1_dinode *)bp->b_data + 3559 ino_to_fsbo(fs, inodedep->id_ino); 3560 /* 3561 * If the bitmap is not yet written, then the allocated 3562 * inode cannot be written to disk. 3563 */ 3564 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3565 if (inodedep->id_savedino1 != NULL) 3566 panic("initiate_write_inodeblock_ufs1: I/O underway"); 3567 MALLOC(inodedep->id_savedino1, struct ufs1_dinode *, 3568 sizeof(struct ufs1_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3569 *inodedep->id_savedino1 = *dp; 3570 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 3571 return; 3572 } 3573 /* 3574 * If no dependencies, then there is nothing to roll back. 3575 */ 3576 inodedep->id_savedsize = dp->di_size; 3577 inodedep->id_savedextsize = 0; 3578 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3579 return; 3580 /* 3581 * Set the dependencies to busy. 3582 */ 3583 ACQUIRE_LOCK(&lk); 3584 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3585 adp = TAILQ_NEXT(adp, ad_next)) { 3586 #ifdef DIAGNOSTIC 3587 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3588 FREE_LOCK(&lk); 3589 panic("softdep_write_inodeblock: lbn order"); 3590 } 3591 prevlbn = adp->ad_lbn; 3592 if (adp->ad_lbn < NDADDR && 3593 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3594 FREE_LOCK(&lk); 3595 panic("%s: direct pointer #%jd mismatch %d != %jd", 3596 "softdep_write_inodeblock", 3597 (intmax_t)adp->ad_lbn, 3598 dp->di_db[adp->ad_lbn], 3599 (intmax_t)adp->ad_newblkno); 3600 } 3601 if (adp->ad_lbn >= NDADDR && 3602 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3603 FREE_LOCK(&lk); 3604 panic("%s: indirect pointer #%jd mismatch %d != %jd", 3605 "softdep_write_inodeblock", 3606 (intmax_t)adp->ad_lbn - NDADDR, 3607 dp->di_ib[adp->ad_lbn - NDADDR], 3608 (intmax_t)adp->ad_newblkno); 3609 } 3610 deplist |= 1 << adp->ad_lbn; 3611 if ((adp->ad_state & ATTACHED) == 0) { 3612 FREE_LOCK(&lk); 3613 panic("softdep_write_inodeblock: Unknown state 0x%x", 3614 adp->ad_state); 3615 } 3616 #endif /* DIAGNOSTIC */ 3617 adp->ad_state &= ~ATTACHED; 3618 adp->ad_state |= UNDONE; 3619 } 3620 /* 3621 * The on-disk inode cannot claim to be any larger than the last 3622 * fragment that has been written. Otherwise, the on-disk inode 3623 * might have fragments that were not the last block in the file 3624 * which would corrupt the filesystem. 3625 */ 3626 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3627 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3628 if (adp->ad_lbn >= NDADDR) 3629 break; 3630 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3631 /* keep going until hitting a rollback to a frag */ 3632 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3633 continue; 3634 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3635 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3636 #ifdef DIAGNOSTIC 3637 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3638 FREE_LOCK(&lk); 3639 panic("softdep_write_inodeblock: lost dep1"); 3640 } 3641 #endif /* DIAGNOSTIC */ 3642 dp->di_db[i] = 0; 3643 } 3644 for (i = 0; i < NIADDR; i++) { 3645 #ifdef DIAGNOSTIC 3646 if (dp->di_ib[i] != 0 && 3647 (deplist & ((1 << NDADDR) << i)) == 0) { 3648 FREE_LOCK(&lk); 3649 panic("softdep_write_inodeblock: lost dep2"); 3650 } 3651 #endif /* DIAGNOSTIC */ 3652 dp->di_ib[i] = 0; 3653 } 3654 FREE_LOCK(&lk); 3655 return; 3656 } 3657 /* 3658 * If we have zero'ed out the last allocated block of the file, 3659 * roll back the size to the last currently allocated block. 3660 * We know that this last allocated block is a full-sized as 3661 * we already checked for fragments in the loop above. 3662 */ 3663 if (lastadp != NULL && 3664 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3665 for (i = lastadp->ad_lbn; i >= 0; i--) 3666 if (dp->di_db[i] != 0) 3667 break; 3668 dp->di_size = (i + 1) * fs->fs_bsize; 3669 } 3670 /* 3671 * The only dependencies are for indirect blocks. 3672 * 3673 * The file size for indirect block additions is not guaranteed. 3674 * Such a guarantee would be non-trivial to achieve. The conventional 3675 * synchronous write implementation also does not make this guarantee. 3676 * Fsck should catch and fix discrepancies. Arguably, the file size 3677 * can be over-estimated without destroying integrity when the file 3678 * moves into the indirect blocks (i.e., is large). If we want to 3679 * postpone fsck, we are stuck with this argument. 3680 */ 3681 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3682 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3683 FREE_LOCK(&lk); 3684 } 3685 3686 /* 3687 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 3688 * Note that any bug fixes made to this routine must be done in the 3689 * version found above. 3690 * 3691 * Called from within the procedure above to deal with unsatisfied 3692 * allocation dependencies in an inodeblock. The buffer must be 3693 * locked, thus, no I/O completion operations can occur while we 3694 * are manipulating its associated dependencies. 3695 */ 3696 static void 3697 initiate_write_inodeblock_ufs2(inodedep, bp) 3698 struct inodedep *inodedep; 3699 struct buf *bp; /* The inode block */ 3700 { 3701 struct allocdirect *adp, *lastadp; 3702 struct ufs2_dinode *dp; 3703 struct fs *fs; 3704 ufs_lbn_t i, prevlbn = 0; 3705 int deplist; 3706 3707 if (inodedep->id_state & IOSTARTED) 3708 panic("initiate_write_inodeblock_ufs2: already started"); 3709 inodedep->id_state |= IOSTARTED; 3710 fs = inodedep->id_fs; 3711 dp = (struct ufs2_dinode *)bp->b_data + 3712 ino_to_fsbo(fs, inodedep->id_ino); 3713 /* 3714 * If the bitmap is not yet written, then the allocated 3715 * inode cannot be written to disk. 3716 */ 3717 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3718 if (inodedep->id_savedino2 != NULL) 3719 panic("initiate_write_inodeblock_ufs2: I/O underway"); 3720 MALLOC(inodedep->id_savedino2, struct ufs2_dinode *, 3721 sizeof(struct ufs2_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3722 *inodedep->id_savedino2 = *dp; 3723 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 3724 return; 3725 } 3726 /* 3727 * If no dependencies, then there is nothing to roll back. 3728 */ 3729 inodedep->id_savedsize = dp->di_size; 3730 inodedep->id_savedextsize = dp->di_extsize; 3731 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL && 3732 TAILQ_FIRST(&inodedep->id_extupdt) == NULL) 3733 return; 3734 /* 3735 * Set the ext data dependencies to busy. 3736 */ 3737 ACQUIRE_LOCK(&lk); 3738 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 3739 adp = TAILQ_NEXT(adp, ad_next)) { 3740 #ifdef DIAGNOSTIC 3741 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3742 FREE_LOCK(&lk); 3743 panic("softdep_write_inodeblock: lbn order"); 3744 } 3745 prevlbn = adp->ad_lbn; 3746 if (dp->di_extb[adp->ad_lbn] != adp->ad_newblkno) { 3747 FREE_LOCK(&lk); 3748 panic("%s: direct pointer #%jd mismatch %jd != %jd", 3749 "softdep_write_inodeblock", 3750 (intmax_t)adp->ad_lbn, 3751 (intmax_t)dp->di_extb[adp->ad_lbn], 3752 (intmax_t)adp->ad_newblkno); 3753 } 3754 deplist |= 1 << adp->ad_lbn; 3755 if ((adp->ad_state & ATTACHED) == 0) { 3756 FREE_LOCK(&lk); 3757 panic("softdep_write_inodeblock: Unknown state 0x%x", 3758 adp->ad_state); 3759 } 3760 #endif /* DIAGNOSTIC */ 3761 adp->ad_state &= ~ATTACHED; 3762 adp->ad_state |= UNDONE; 3763 } 3764 /* 3765 * The on-disk inode cannot claim to be any larger than the last 3766 * fragment that has been written. Otherwise, the on-disk inode 3767 * might have fragments that were not the last block in the ext 3768 * data which would corrupt the filesystem. 3769 */ 3770 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 3771 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3772 dp->di_extb[adp->ad_lbn] = adp->ad_oldblkno; 3773 /* keep going until hitting a rollback to a frag */ 3774 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3775 continue; 3776 dp->di_extsize = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3777 for (i = adp->ad_lbn + 1; i < NXADDR; i++) { 3778 #ifdef DIAGNOSTIC 3779 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) { 3780 FREE_LOCK(&lk); 3781 panic("softdep_write_inodeblock: lost dep1"); 3782 } 3783 #endif /* DIAGNOSTIC */ 3784 dp->di_extb[i] = 0; 3785 } 3786 lastadp = NULL; 3787 break; 3788 } 3789 /* 3790 * If we have zero'ed out the last allocated block of the ext 3791 * data, roll back the size to the last currently allocated block. 3792 * We know that this last allocated block is a full-sized as 3793 * we already checked for fragments in the loop above. 3794 */ 3795 if (lastadp != NULL && 3796 dp->di_extsize <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3797 for (i = lastadp->ad_lbn; i >= 0; i--) 3798 if (dp->di_extb[i] != 0) 3799 break; 3800 dp->di_extsize = (i + 1) * fs->fs_bsize; 3801 } 3802 /* 3803 * Set the file data dependencies to busy. 3804 */ 3805 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3806 adp = TAILQ_NEXT(adp, ad_next)) { 3807 #ifdef DIAGNOSTIC 3808 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3809 FREE_LOCK(&lk); 3810 panic("softdep_write_inodeblock: lbn order"); 3811 } 3812 prevlbn = adp->ad_lbn; 3813 if (adp->ad_lbn < NDADDR && 3814 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3815 FREE_LOCK(&lk); 3816 panic("%s: direct pointer #%jd mismatch %jd != %jd", 3817 "softdep_write_inodeblock", 3818 (intmax_t)adp->ad_lbn, 3819 (intmax_t)dp->di_db[adp->ad_lbn], 3820 (intmax_t)adp->ad_newblkno); 3821 } 3822 if (adp->ad_lbn >= NDADDR && 3823 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3824 FREE_LOCK(&lk); 3825 panic("%s indirect pointer #%jd mismatch %jd != %jd", 3826 "softdep_write_inodeblock:", 3827 (intmax_t)adp->ad_lbn - NDADDR, 3828 (intmax_t)dp->di_ib[adp->ad_lbn - NDADDR], 3829 (intmax_t)adp->ad_newblkno); 3830 } 3831 deplist |= 1 << adp->ad_lbn; 3832 if ((adp->ad_state & ATTACHED) == 0) { 3833 FREE_LOCK(&lk); 3834 panic("softdep_write_inodeblock: Unknown state 0x%x", 3835 adp->ad_state); 3836 } 3837 #endif /* DIAGNOSTIC */ 3838 adp->ad_state &= ~ATTACHED; 3839 adp->ad_state |= UNDONE; 3840 } 3841 /* 3842 * The on-disk inode cannot claim to be any larger than the last 3843 * fragment that has been written. Otherwise, the on-disk inode 3844 * might have fragments that were not the last block in the file 3845 * which would corrupt the filesystem. 3846 */ 3847 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3848 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3849 if (adp->ad_lbn >= NDADDR) 3850 break; 3851 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3852 /* keep going until hitting a rollback to a frag */ 3853 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3854 continue; 3855 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3856 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3857 #ifdef DIAGNOSTIC 3858 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3859 FREE_LOCK(&lk); 3860 panic("softdep_write_inodeblock: lost dep2"); 3861 } 3862 #endif /* DIAGNOSTIC */ 3863 dp->di_db[i] = 0; 3864 } 3865 for (i = 0; i < NIADDR; i++) { 3866 #ifdef DIAGNOSTIC 3867 if (dp->di_ib[i] != 0 && 3868 (deplist & ((1 << NDADDR) << i)) == 0) { 3869 FREE_LOCK(&lk); 3870 panic("softdep_write_inodeblock: lost dep3"); 3871 } 3872 #endif /* DIAGNOSTIC */ 3873 dp->di_ib[i] = 0; 3874 } 3875 FREE_LOCK(&lk); 3876 return; 3877 } 3878 /* 3879 * If we have zero'ed out the last allocated block of the file, 3880 * roll back the size to the last currently allocated block. 3881 * We know that this last allocated block is a full-sized as 3882 * we already checked for fragments in the loop above. 3883 */ 3884 if (lastadp != NULL && 3885 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3886 for (i = lastadp->ad_lbn; i >= 0; i--) 3887 if (dp->di_db[i] != 0) 3888 break; 3889 dp->di_size = (i + 1) * fs->fs_bsize; 3890 } 3891 /* 3892 * The only dependencies are for indirect blocks. 3893 * 3894 * The file size for indirect block additions is not guaranteed. 3895 * Such a guarantee would be non-trivial to achieve. The conventional 3896 * synchronous write implementation also does not make this guarantee. 3897 * Fsck should catch and fix discrepancies. Arguably, the file size 3898 * can be over-estimated without destroying integrity when the file 3899 * moves into the indirect blocks (i.e., is large). If we want to 3900 * postpone fsck, we are stuck with this argument. 3901 */ 3902 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3903 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3904 FREE_LOCK(&lk); 3905 } 3906 3907 /* 3908 * This routine is called during the completion interrupt 3909 * service routine for a disk write (from the procedure called 3910 * by the device driver to inform the filesystem caches of 3911 * a request completion). It should be called early in this 3912 * procedure, before the block is made available to other 3913 * processes or other routines are called. 3914 */ 3915 static void 3916 softdep_disk_write_complete(bp) 3917 struct buf *bp; /* describes the completed disk write */ 3918 { 3919 struct worklist *wk; 3920 struct workhead reattach; 3921 struct newblk *newblk; 3922 struct allocindir *aip; 3923 struct allocdirect *adp; 3924 struct indirdep *indirdep; 3925 struct inodedep *inodedep; 3926 struct bmsafemap *bmsafemap; 3927 3928 #ifdef DEBUG 3929 if (lk.lkt_held != NOHOLDER) 3930 panic("softdep_disk_write_complete: lock is held"); 3931 lk.lkt_held = SPECIAL_FLAG; 3932 #endif 3933 LIST_INIT(&reattach); 3934 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3935 WORKLIST_REMOVE(wk); 3936 switch (wk->wk_type) { 3937 3938 case D_PAGEDEP: 3939 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3940 WORKLIST_INSERT(&reattach, wk); 3941 continue; 3942 3943 case D_INODEDEP: 3944 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3945 WORKLIST_INSERT(&reattach, wk); 3946 continue; 3947 3948 case D_BMSAFEMAP: 3949 bmsafemap = WK_BMSAFEMAP(wk); 3950 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3951 newblk->nb_state |= DEPCOMPLETE; 3952 newblk->nb_bmsafemap = NULL; 3953 LIST_REMOVE(newblk, nb_deps); 3954 } 3955 while ((adp = 3956 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3957 adp->ad_state |= DEPCOMPLETE; 3958 adp->ad_buf = NULL; 3959 LIST_REMOVE(adp, ad_deps); 3960 handle_allocdirect_partdone(adp); 3961 } 3962 while ((aip = 3963 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3964 aip->ai_state |= DEPCOMPLETE; 3965 aip->ai_buf = NULL; 3966 LIST_REMOVE(aip, ai_deps); 3967 handle_allocindir_partdone(aip); 3968 } 3969 while ((inodedep = 3970 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3971 inodedep->id_state |= DEPCOMPLETE; 3972 LIST_REMOVE(inodedep, id_deps); 3973 inodedep->id_buf = NULL; 3974 } 3975 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3976 continue; 3977 3978 case D_MKDIR: 3979 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3980 continue; 3981 3982 case D_ALLOCDIRECT: 3983 adp = WK_ALLOCDIRECT(wk); 3984 adp->ad_state |= COMPLETE; 3985 handle_allocdirect_partdone(adp); 3986 continue; 3987 3988 case D_ALLOCINDIR: 3989 aip = WK_ALLOCINDIR(wk); 3990 aip->ai_state |= COMPLETE; 3991 handle_allocindir_partdone(aip); 3992 continue; 3993 3994 case D_INDIRDEP: 3995 indirdep = WK_INDIRDEP(wk); 3996 if (indirdep->ir_state & GOINGAWAY) { 3997 lk.lkt_held = NOHOLDER; 3998 panic("disk_write_complete: indirdep gone"); 3999 } 4000 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 4001 FREE(indirdep->ir_saveddata, M_INDIRDEP); 4002 indirdep->ir_saveddata = 0; 4003 indirdep->ir_state &= ~UNDONE; 4004 indirdep->ir_state |= ATTACHED; 4005 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 4006 handle_allocindir_partdone(aip); 4007 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 4008 lk.lkt_held = NOHOLDER; 4009 panic("disk_write_complete: not gone"); 4010 } 4011 } 4012 WORKLIST_INSERT(&reattach, wk); 4013 if ((bp->b_flags & B_DELWRI) == 0) 4014 stat_indir_blk_ptrs++; 4015 bdirty(bp); 4016 continue; 4017 4018 default: 4019 lk.lkt_held = NOHOLDER; 4020 panic("handle_disk_write_complete: Unknown type %s", 4021 TYPENAME(wk->wk_type)); 4022 /* NOTREACHED */ 4023 } 4024 } 4025 /* 4026 * Reattach any requests that must be redone. 4027 */ 4028 while ((wk = LIST_FIRST(&reattach)) != NULL) { 4029 WORKLIST_REMOVE(wk); 4030 WORKLIST_INSERT(&bp->b_dep, wk); 4031 } 4032 #ifdef DEBUG 4033 if (lk.lkt_held != SPECIAL_FLAG) 4034 panic("softdep_disk_write_complete: lock lost"); 4035 lk.lkt_held = NOHOLDER; 4036 #endif 4037 } 4038 4039 /* 4040 * Called from within softdep_disk_write_complete above. Note that 4041 * this routine is always called from interrupt level with further 4042 * splbio interrupts blocked. 4043 */ 4044 static void 4045 handle_allocdirect_partdone(adp) 4046 struct allocdirect *adp; /* the completed allocdirect */ 4047 { 4048 struct allocdirectlst *listhead; 4049 struct allocdirect *listadp; 4050 struct inodedep *inodedep; 4051 long bsize, delay; 4052 4053 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 4054 return; 4055 if (adp->ad_buf != NULL) { 4056 lk.lkt_held = NOHOLDER; 4057 panic("handle_allocdirect_partdone: dangling dep"); 4058 } 4059 /* 4060 * The on-disk inode cannot claim to be any larger than the last 4061 * fragment that has been written. Otherwise, the on-disk inode 4062 * might have fragments that were not the last block in the file 4063 * which would corrupt the filesystem. Thus, we cannot free any 4064 * allocdirects after one whose ad_oldblkno claims a fragment as 4065 * these blocks must be rolled back to zero before writing the inode. 4066 * We check the currently active set of allocdirects in id_inoupdt 4067 * or id_extupdt as appropriate. 4068 */ 4069 inodedep = adp->ad_inodedep; 4070 bsize = inodedep->id_fs->fs_bsize; 4071 if (adp->ad_state & EXTDATA) 4072 listhead = &inodedep->id_extupdt; 4073 else 4074 listhead = &inodedep->id_inoupdt; 4075 TAILQ_FOREACH(listadp, listhead, ad_next) { 4076 /* found our block */ 4077 if (listadp == adp) 4078 break; 4079 /* continue if ad_oldlbn is not a fragment */ 4080 if (listadp->ad_oldsize == 0 || 4081 listadp->ad_oldsize == bsize) 4082 continue; 4083 /* hit a fragment */ 4084 return; 4085 } 4086 /* 4087 * If we have reached the end of the current list without 4088 * finding the just finished dependency, then it must be 4089 * on the future dependency list. Future dependencies cannot 4090 * be freed until they are moved to the current list. 4091 */ 4092 if (listadp == NULL) { 4093 #ifdef DEBUG 4094 if (adp->ad_state & EXTDATA) 4095 listhead = &inodedep->id_newextupdt; 4096 else 4097 listhead = &inodedep->id_newinoupdt; 4098 TAILQ_FOREACH(listadp, listhead, ad_next) 4099 /* found our block */ 4100 if (listadp == adp) 4101 break; 4102 if (listadp == NULL) { 4103 lk.lkt_held = NOHOLDER; 4104 panic("handle_allocdirect_partdone: lost dep"); 4105 } 4106 #endif /* DEBUG */ 4107 return; 4108 } 4109 /* 4110 * If we have found the just finished dependency, then free 4111 * it along with anything that follows it that is complete. 4112 * If the inode still has a bitmap dependency, then it has 4113 * never been written to disk, hence the on-disk inode cannot 4114 * reference the old fragment so we can free it without delay. 4115 */ 4116 delay = (inodedep->id_state & DEPCOMPLETE); 4117 for (; adp; adp = listadp) { 4118 listadp = TAILQ_NEXT(adp, ad_next); 4119 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 4120 return; 4121 free_allocdirect(listhead, adp, delay); 4122 } 4123 } 4124 4125 /* 4126 * Called from within softdep_disk_write_complete above. Note that 4127 * this routine is always called from interrupt level with further 4128 * splbio interrupts blocked. 4129 */ 4130 static void 4131 handle_allocindir_partdone(aip) 4132 struct allocindir *aip; /* the completed allocindir */ 4133 { 4134 struct indirdep *indirdep; 4135 4136 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 4137 return; 4138 if (aip->ai_buf != NULL) { 4139 lk.lkt_held = NOHOLDER; 4140 panic("handle_allocindir_partdone: dangling dependency"); 4141 } 4142 indirdep = aip->ai_indirdep; 4143 if (indirdep->ir_state & UNDONE) { 4144 LIST_REMOVE(aip, ai_next); 4145 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 4146 return; 4147 } 4148 if (indirdep->ir_state & UFS1FMT) 4149 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 4150 aip->ai_newblkno; 4151 else 4152 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 4153 aip->ai_newblkno; 4154 LIST_REMOVE(aip, ai_next); 4155 if (aip->ai_freefrag != NULL) 4156 add_to_worklist(&aip->ai_freefrag->ff_list); 4157 WORKITEM_FREE(aip, D_ALLOCINDIR); 4158 } 4159 4160 /* 4161 * Called from within softdep_disk_write_complete above to restore 4162 * in-memory inode block contents to their most up-to-date state. Note 4163 * that this routine is always called from interrupt level with further 4164 * splbio interrupts blocked. 4165 */ 4166 static int 4167 handle_written_inodeblock(inodedep, bp) 4168 struct inodedep *inodedep; 4169 struct buf *bp; /* buffer containing the inode block */ 4170 { 4171 struct worklist *wk, *filefree; 4172 struct allocdirect *adp, *nextadp; 4173 struct ufs1_dinode *dp1 = NULL; 4174 struct ufs2_dinode *dp2 = NULL; 4175 int hadchanges, fstype; 4176 4177 if ((inodedep->id_state & IOSTARTED) == 0) { 4178 lk.lkt_held = NOHOLDER; 4179 panic("handle_written_inodeblock: not started"); 4180 } 4181 inodedep->id_state &= ~IOSTARTED; 4182 inodedep->id_state |= COMPLETE; 4183 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 4184 fstype = UFS1; 4185 dp1 = (struct ufs1_dinode *)bp->b_data + 4186 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 4187 } else { 4188 fstype = UFS2; 4189 dp2 = (struct ufs2_dinode *)bp->b_data + 4190 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 4191 } 4192 /* 4193 * If we had to rollback the inode allocation because of 4194 * bitmaps being incomplete, then simply restore it. 4195 * Keep the block dirty so that it will not be reclaimed until 4196 * all associated dependencies have been cleared and the 4197 * corresponding updates written to disk. 4198 */ 4199 if (inodedep->id_savedino1 != NULL) { 4200 if (fstype == UFS1) 4201 *dp1 = *inodedep->id_savedino1; 4202 else 4203 *dp2 = *inodedep->id_savedino2; 4204 FREE(inodedep->id_savedino1, M_INODEDEP); 4205 inodedep->id_savedino1 = NULL; 4206 if ((bp->b_flags & B_DELWRI) == 0) 4207 stat_inode_bitmap++; 4208 bdirty(bp); 4209 return (1); 4210 } 4211 /* 4212 * Roll forward anything that had to be rolled back before 4213 * the inode could be updated. 4214 */ 4215 hadchanges = 0; 4216 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 4217 nextadp = TAILQ_NEXT(adp, ad_next); 4218 if (adp->ad_state & ATTACHED) { 4219 lk.lkt_held = NOHOLDER; 4220 panic("handle_written_inodeblock: new entry"); 4221 } 4222 if (fstype == UFS1) { 4223 if (adp->ad_lbn < NDADDR) { 4224 if (dp1->di_db[adp->ad_lbn]!=adp->ad_oldblkno) { 4225 lk.lkt_held = NOHOLDER; 4226 panic("%s %s #%jd mismatch %d != %jd", 4227 "handle_written_inodeblock:", 4228 "direct pointer", 4229 (intmax_t)adp->ad_lbn, 4230 dp1->di_db[adp->ad_lbn], 4231 (intmax_t)adp->ad_oldblkno); 4232 } 4233 dp1->di_db[adp->ad_lbn] = adp->ad_newblkno; 4234 } else { 4235 if (dp1->di_ib[adp->ad_lbn - NDADDR] != 0) { 4236 lk.lkt_held = NOHOLDER; 4237 panic("%s: %s #%jd allocated as %d", 4238 "handle_written_inodeblock", 4239 "indirect pointer", 4240 (intmax_t)adp->ad_lbn - NDADDR, 4241 dp1->di_ib[adp->ad_lbn - NDADDR]); 4242 } 4243 dp1->di_ib[adp->ad_lbn - NDADDR] = 4244 adp->ad_newblkno; 4245 } 4246 } else { 4247 if (adp->ad_lbn < NDADDR) { 4248 if (dp2->di_db[adp->ad_lbn]!=adp->ad_oldblkno) { 4249 lk.lkt_held = NOHOLDER; 4250 panic("%s: %s #%jd %s %jd != %jd", 4251 "handle_written_inodeblock", 4252 "direct pointer", 4253 (intmax_t)adp->ad_lbn, "mismatch", 4254 (intmax_t)dp2->di_db[adp->ad_lbn], 4255 (intmax_t)adp->ad_oldblkno); 4256 } 4257 dp2->di_db[adp->ad_lbn] = adp->ad_newblkno; 4258 } else { 4259 if (dp2->di_ib[adp->ad_lbn - NDADDR] != 0) { 4260 lk.lkt_held = NOHOLDER; 4261 panic("%s: %s #%jd allocated as %jd", 4262 "handle_written_inodeblock", 4263 "indirect pointer", 4264 (intmax_t)adp->ad_lbn - NDADDR, 4265 (intmax_t) 4266 dp2->di_ib[adp->ad_lbn - NDADDR]); 4267 } 4268 dp2->di_ib[adp->ad_lbn - NDADDR] = 4269 adp->ad_newblkno; 4270 } 4271 } 4272 adp->ad_state &= ~UNDONE; 4273 adp->ad_state |= ATTACHED; 4274 hadchanges = 1; 4275 } 4276 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 4277 nextadp = TAILQ_NEXT(adp, ad_next); 4278 if (adp->ad_state & ATTACHED) { 4279 lk.lkt_held = NOHOLDER; 4280 panic("handle_written_inodeblock: new entry"); 4281 } 4282 if (dp2->di_extb[adp->ad_lbn] != adp->ad_oldblkno) { 4283 lk.lkt_held = NOHOLDER; 4284 panic("%s: direct pointers #%jd %s %jd != %jd", 4285 "handle_written_inodeblock", 4286 (intmax_t)adp->ad_lbn, "mismatch", 4287 (intmax_t)dp2->di_extb[adp->ad_lbn], 4288 (intmax_t)adp->ad_oldblkno); 4289 } 4290 dp2->di_extb[adp->ad_lbn] = adp->ad_newblkno; 4291 adp->ad_state &= ~UNDONE; 4292 adp->ad_state |= ATTACHED; 4293 hadchanges = 1; 4294 } 4295 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 4296 stat_direct_blk_ptrs++; 4297 /* 4298 * Reset the file size to its most up-to-date value. 4299 */ 4300 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) { 4301 lk.lkt_held = NOHOLDER; 4302 panic("handle_written_inodeblock: bad size"); 4303 } 4304 if (fstype == UFS1) { 4305 if (dp1->di_size != inodedep->id_savedsize) { 4306 dp1->di_size = inodedep->id_savedsize; 4307 hadchanges = 1; 4308 } 4309 } else { 4310 if (dp2->di_size != inodedep->id_savedsize) { 4311 dp2->di_size = inodedep->id_savedsize; 4312 hadchanges = 1; 4313 } 4314 if (dp2->di_extsize != inodedep->id_savedextsize) { 4315 dp2->di_extsize = inodedep->id_savedextsize; 4316 hadchanges = 1; 4317 } 4318 } 4319 inodedep->id_savedsize = -1; 4320 inodedep->id_savedextsize = -1; 4321 /* 4322 * If there were any rollbacks in the inode block, then it must be 4323 * marked dirty so that its will eventually get written back in 4324 * its correct form. 4325 */ 4326 if (hadchanges) 4327 bdirty(bp); 4328 /* 4329 * Process any allocdirects that completed during the update. 4330 */ 4331 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 4332 handle_allocdirect_partdone(adp); 4333 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 4334 handle_allocdirect_partdone(adp); 4335 /* 4336 * Process deallocations that were held pending until the 4337 * inode had been written to disk. Freeing of the inode 4338 * is delayed until after all blocks have been freed to 4339 * avoid creation of new <vfsid, inum, lbn> triples 4340 * before the old ones have been deleted. 4341 */ 4342 filefree = NULL; 4343 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 4344 WORKLIST_REMOVE(wk); 4345 switch (wk->wk_type) { 4346 4347 case D_FREEFILE: 4348 /* 4349 * We defer adding filefree to the worklist until 4350 * all other additions have been made to ensure 4351 * that it will be done after all the old blocks 4352 * have been freed. 4353 */ 4354 if (filefree != NULL) { 4355 lk.lkt_held = NOHOLDER; 4356 panic("handle_written_inodeblock: filefree"); 4357 } 4358 filefree = wk; 4359 continue; 4360 4361 case D_MKDIR: 4362 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 4363 continue; 4364 4365 case D_DIRADD: 4366 diradd_inode_written(WK_DIRADD(wk), inodedep); 4367 continue; 4368 4369 case D_FREEBLKS: 4370 case D_FREEFRAG: 4371 case D_DIRREM: 4372 add_to_worklist(wk); 4373 continue; 4374 4375 case D_NEWDIRBLK: 4376 free_newdirblk(WK_NEWDIRBLK(wk)); 4377 continue; 4378 4379 default: 4380 lk.lkt_held = NOHOLDER; 4381 panic("handle_written_inodeblock: Unknown type %s", 4382 TYPENAME(wk->wk_type)); 4383 /* NOTREACHED */ 4384 } 4385 } 4386 if (filefree != NULL) { 4387 if (free_inodedep(inodedep) == 0) { 4388 lk.lkt_held = NOHOLDER; 4389 panic("handle_written_inodeblock: live inodedep"); 4390 } 4391 add_to_worklist(filefree); 4392 return (0); 4393 } 4394 4395 /* 4396 * If no outstanding dependencies, free it. 4397 */ 4398 if (free_inodedep(inodedep) || 4399 (TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 4400 TAILQ_FIRST(&inodedep->id_extupdt) == 0)) 4401 return (0); 4402 return (hadchanges); 4403 } 4404 4405 /* 4406 * Process a diradd entry after its dependent inode has been written. 4407 * This routine must be called with splbio interrupts blocked. 4408 */ 4409 static void 4410 diradd_inode_written(dap, inodedep) 4411 struct diradd *dap; 4412 struct inodedep *inodedep; 4413 { 4414 struct pagedep *pagedep; 4415 4416 dap->da_state |= COMPLETE; 4417 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4418 if (dap->da_state & DIRCHG) 4419 pagedep = dap->da_previous->dm_pagedep; 4420 else 4421 pagedep = dap->da_pagedep; 4422 LIST_REMOVE(dap, da_pdlist); 4423 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 4424 } 4425 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 4426 } 4427 4428 /* 4429 * Handle the completion of a mkdir dependency. 4430 */ 4431 static void 4432 handle_written_mkdir(mkdir, type) 4433 struct mkdir *mkdir; 4434 int type; 4435 { 4436 struct diradd *dap; 4437 struct pagedep *pagedep; 4438 4439 if (mkdir->md_state != type) { 4440 lk.lkt_held = NOHOLDER; 4441 panic("handle_written_mkdir: bad type"); 4442 } 4443 dap = mkdir->md_diradd; 4444 dap->da_state &= ~type; 4445 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 4446 dap->da_state |= DEPCOMPLETE; 4447 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4448 if (dap->da_state & DIRCHG) 4449 pagedep = dap->da_previous->dm_pagedep; 4450 else 4451 pagedep = dap->da_pagedep; 4452 LIST_REMOVE(dap, da_pdlist); 4453 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 4454 } 4455 LIST_REMOVE(mkdir, md_mkdirs); 4456 WORKITEM_FREE(mkdir, D_MKDIR); 4457 } 4458 4459 /* 4460 * Called from within softdep_disk_write_complete above. 4461 * A write operation was just completed. Removed inodes can 4462 * now be freed and associated block pointers may be committed. 4463 * Note that this routine is always called from interrupt level 4464 * with further splbio interrupts blocked. 4465 */ 4466 static int 4467 handle_written_filepage(pagedep, bp) 4468 struct pagedep *pagedep; 4469 struct buf *bp; /* buffer containing the written page */ 4470 { 4471 struct dirrem *dirrem; 4472 struct diradd *dap, *nextdap; 4473 struct direct *ep; 4474 int i, chgs; 4475 4476 if ((pagedep->pd_state & IOSTARTED) == 0) { 4477 lk.lkt_held = NOHOLDER; 4478 panic("handle_written_filepage: not started"); 4479 } 4480 pagedep->pd_state &= ~IOSTARTED; 4481 /* 4482 * Process any directory removals that have been committed. 4483 */ 4484 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 4485 LIST_REMOVE(dirrem, dm_next); 4486 dirrem->dm_dirinum = pagedep->pd_ino; 4487 add_to_worklist(&dirrem->dm_list); 4488 } 4489 /* 4490 * Free any directory additions that have been committed. 4491 * If it is a newly allocated block, we have to wait until 4492 * the on-disk directory inode claims the new block. 4493 */ 4494 if ((pagedep->pd_state & NEWBLOCK) == 0) 4495 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 4496 free_diradd(dap); 4497 /* 4498 * Uncommitted directory entries must be restored. 4499 */ 4500 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 4501 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 4502 dap = nextdap) { 4503 nextdap = LIST_NEXT(dap, da_pdlist); 4504 if (dap->da_state & ATTACHED) { 4505 lk.lkt_held = NOHOLDER; 4506 panic("handle_written_filepage: attached"); 4507 } 4508 ep = (struct direct *) 4509 ((char *)bp->b_data + dap->da_offset); 4510 ep->d_ino = dap->da_newinum; 4511 dap->da_state &= ~UNDONE; 4512 dap->da_state |= ATTACHED; 4513 chgs = 1; 4514 /* 4515 * If the inode referenced by the directory has 4516 * been written out, then the dependency can be 4517 * moved to the pending list. 4518 */ 4519 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4520 LIST_REMOVE(dap, da_pdlist); 4521 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 4522 da_pdlist); 4523 } 4524 } 4525 } 4526 /* 4527 * If there were any rollbacks in the directory, then it must be 4528 * marked dirty so that its will eventually get written back in 4529 * its correct form. 4530 */ 4531 if (chgs) { 4532 if ((bp->b_flags & B_DELWRI) == 0) 4533 stat_dir_entry++; 4534 bdirty(bp); 4535 return (1); 4536 } 4537 /* 4538 * If we are not waiting for a new directory block to be 4539 * claimed by its inode, then the pagedep will be freed. 4540 * Otherwise it will remain to track any new entries on 4541 * the page in case they are fsync'ed. 4542 */ 4543 if ((pagedep->pd_state & NEWBLOCK) == 0) { 4544 LIST_REMOVE(pagedep, pd_hash); 4545 WORKITEM_FREE(pagedep, D_PAGEDEP); 4546 } 4547 return (0); 4548 } 4549 4550 /* 4551 * Writing back in-core inode structures. 4552 * 4553 * The filesystem only accesses an inode's contents when it occupies an 4554 * "in-core" inode structure. These "in-core" structures are separate from 4555 * the page frames used to cache inode blocks. Only the latter are 4556 * transferred to/from the disk. So, when the updated contents of the 4557 * "in-core" inode structure are copied to the corresponding in-memory inode 4558 * block, the dependencies are also transferred. The following procedure is 4559 * called when copying a dirty "in-core" inode to a cached inode block. 4560 */ 4561 4562 /* 4563 * Called when an inode is loaded from disk. If the effective link count 4564 * differed from the actual link count when it was last flushed, then we 4565 * need to ensure that the correct effective link count is put back. 4566 */ 4567 void 4568 softdep_load_inodeblock(ip) 4569 struct inode *ip; /* the "in_core" copy of the inode */ 4570 { 4571 struct inodedep *inodedep; 4572 4573 /* 4574 * Check for alternate nlink count. 4575 */ 4576 ip->i_effnlink = ip->i_nlink; 4577 ACQUIRE_LOCK(&lk); 4578 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4579 FREE_LOCK(&lk); 4580 return; 4581 } 4582 ip->i_effnlink -= inodedep->id_nlinkdelta; 4583 if (inodedep->id_state & SPACECOUNTED) 4584 ip->i_flag |= IN_SPACECOUNTED; 4585 FREE_LOCK(&lk); 4586 } 4587 4588 /* 4589 * This routine is called just before the "in-core" inode 4590 * information is to be copied to the in-memory inode block. 4591 * Recall that an inode block contains several inodes. If 4592 * the force flag is set, then the dependencies will be 4593 * cleared so that the update can always be made. Note that 4594 * the buffer is locked when this routine is called, so we 4595 * will never be in the middle of writing the inode block 4596 * to disk. 4597 */ 4598 void 4599 softdep_update_inodeblock(ip, bp, waitfor) 4600 struct inode *ip; /* the "in_core" copy of the inode */ 4601 struct buf *bp; /* the buffer containing the inode block */ 4602 int waitfor; /* nonzero => update must be allowed */ 4603 { 4604 struct inodedep *inodedep; 4605 struct worklist *wk; 4606 int error, gotit; 4607 4608 /* 4609 * If the effective link count is not equal to the actual link 4610 * count, then we must track the difference in an inodedep while 4611 * the inode is (potentially) tossed out of the cache. Otherwise, 4612 * if there is no existing inodedep, then there are no dependencies 4613 * to track. 4614 */ 4615 ACQUIRE_LOCK(&lk); 4616 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4617 FREE_LOCK(&lk); 4618 if (ip->i_effnlink != ip->i_nlink) 4619 panic("softdep_update_inodeblock: bad link count"); 4620 return; 4621 } 4622 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 4623 FREE_LOCK(&lk); 4624 panic("softdep_update_inodeblock: bad delta"); 4625 } 4626 /* 4627 * Changes have been initiated. Anything depending on these 4628 * changes cannot occur until this inode has been written. 4629 */ 4630 inodedep->id_state &= ~COMPLETE; 4631 if ((inodedep->id_state & ONWORKLIST) == 0) 4632 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 4633 /* 4634 * Any new dependencies associated with the incore inode must 4635 * now be moved to the list associated with the buffer holding 4636 * the in-memory copy of the inode. Once merged process any 4637 * allocdirects that are completed by the merger. 4638 */ 4639 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 4640 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 4641 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 4642 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 4643 if (TAILQ_FIRST(&inodedep->id_extupdt) != NULL) 4644 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt)); 4645 /* 4646 * Now that the inode has been pushed into the buffer, the 4647 * operations dependent on the inode being written to disk 4648 * can be moved to the id_bufwait so that they will be 4649 * processed when the buffer I/O completes. 4650 */ 4651 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 4652 WORKLIST_REMOVE(wk); 4653 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4654 } 4655 /* 4656 * Newly allocated inodes cannot be written until the bitmap 4657 * that allocates them have been written (indicated by 4658 * DEPCOMPLETE being set in id_state). If we are doing a 4659 * forced sync (e.g., an fsync on a file), we force the bitmap 4660 * to be written so that the update can be done. 4661 */ 4662 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4663 FREE_LOCK(&lk); 4664 return; 4665 } 4666 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4667 FREE_LOCK(&lk); 4668 if (gotit && 4669 (error = BUF_WRITE(inodedep->id_buf)) != 0) 4670 softdep_error("softdep_update_inodeblock: bwrite", error); 4671 if ((inodedep->id_state & DEPCOMPLETE) == 0) 4672 panic("softdep_update_inodeblock: update failed"); 4673 } 4674 4675 /* 4676 * Merge the a new inode dependency list (such as id_newinoupdt) into an 4677 * old inode dependency list (such as id_inoupdt). This routine must be 4678 * called with splbio interrupts blocked. 4679 */ 4680 static void 4681 merge_inode_lists(newlisthead, oldlisthead) 4682 struct allocdirectlst *newlisthead; 4683 struct allocdirectlst *oldlisthead; 4684 { 4685 struct allocdirect *listadp, *newadp; 4686 4687 newadp = TAILQ_FIRST(newlisthead); 4688 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 4689 if (listadp->ad_lbn < newadp->ad_lbn) { 4690 listadp = TAILQ_NEXT(listadp, ad_next); 4691 continue; 4692 } 4693 TAILQ_REMOVE(newlisthead, newadp, ad_next); 4694 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4695 if (listadp->ad_lbn == newadp->ad_lbn) { 4696 allocdirect_merge(oldlisthead, newadp, 4697 listadp); 4698 listadp = newadp; 4699 } 4700 newadp = TAILQ_FIRST(newlisthead); 4701 } 4702 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 4703 TAILQ_REMOVE(newlisthead, newadp, ad_next); 4704 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 4705 } 4706 } 4707 4708 /* 4709 * If we are doing an fsync, then we must ensure that any directory 4710 * entries for the inode have been written after the inode gets to disk. 4711 */ 4712 int 4713 softdep_fsync(vp) 4714 struct vnode *vp; /* the "in_core" copy of the inode */ 4715 { 4716 struct inodedep *inodedep; 4717 struct pagedep *pagedep; 4718 struct worklist *wk; 4719 struct diradd *dap; 4720 struct mount *mnt; 4721 struct vnode *pvp; 4722 struct inode *ip; 4723 struct buf *bp; 4724 struct fs *fs; 4725 struct thread *td = curthread; 4726 int error, flushparent; 4727 ino_t parentino; 4728 ufs_lbn_t lbn; 4729 4730 ip = VTOI(vp); 4731 fs = ip->i_fs; 4732 ACQUIRE_LOCK(&lk); 4733 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4734 FREE_LOCK(&lk); 4735 return (0); 4736 } 4737 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4738 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4739 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 4740 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 4741 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4742 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4743 FREE_LOCK(&lk); 4744 panic("softdep_fsync: pending ops"); 4745 } 4746 for (error = 0, flushparent = 0; ; ) { 4747 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4748 break; 4749 if (wk->wk_type != D_DIRADD) { 4750 FREE_LOCK(&lk); 4751 panic("softdep_fsync: Unexpected type %s", 4752 TYPENAME(wk->wk_type)); 4753 } 4754 dap = WK_DIRADD(wk); 4755 /* 4756 * Flush our parent if this directory entry has a MKDIR_PARENT 4757 * dependency or is contained in a newly allocated block. 4758 */ 4759 if (dap->da_state & DIRCHG) 4760 pagedep = dap->da_previous->dm_pagedep; 4761 else 4762 pagedep = dap->da_pagedep; 4763 mnt = pagedep->pd_mnt; 4764 parentino = pagedep->pd_ino; 4765 lbn = pagedep->pd_lbn; 4766 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4767 FREE_LOCK(&lk); 4768 panic("softdep_fsync: dirty"); 4769 } 4770 if ((dap->da_state & MKDIR_PARENT) || 4771 (pagedep->pd_state & NEWBLOCK)) 4772 flushparent = 1; 4773 else 4774 flushparent = 0; 4775 /* 4776 * If we are being fsync'ed as part of vgone'ing this vnode, 4777 * then we will not be able to release and recover the 4778 * vnode below, so we just have to give up on writing its 4779 * directory entry out. It will eventually be written, just 4780 * not now, but then the user was not asking to have it 4781 * written, so we are not breaking any promises. 4782 */ 4783 mp_fixme("This operation is not atomic wrt the rest of the code"); 4784 VI_LOCK(vp); 4785 if (vp->v_iflag & VI_XLOCK) { 4786 VI_UNLOCK(vp); 4787 break; 4788 } else 4789 VI_UNLOCK(vp); 4790 /* 4791 * We prevent deadlock by always fetching inodes from the 4792 * root, moving down the directory tree. Thus, when fetching 4793 * our parent directory, we first try to get the lock. If 4794 * that fails, we must unlock ourselves before requesting 4795 * the lock on our parent. See the comment in ufs_lookup 4796 * for details on possible races. 4797 */ 4798 FREE_LOCK(&lk); 4799 if (VFS_VGET(mnt, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp)) { 4800 VOP_UNLOCK(vp, 0, td); 4801 error = VFS_VGET(mnt, parentino, LK_EXCLUSIVE, &pvp); 4802 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 4803 if (error != 0) 4804 return (error); 4805 } 4806 /* 4807 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 4808 * that are contained in direct blocks will be resolved by 4809 * doing a UFS_UPDATE. Pagedeps contained in indirect blocks 4810 * may require a complete sync'ing of the directory. So, we 4811 * try the cheap and fast UFS_UPDATE first, and if that fails, 4812 * then we do the slower VOP_FSYNC of the directory. 4813 */ 4814 if (flushparent) { 4815 if ((error = UFS_UPDATE(pvp, 1)) != 0) { 4816 vput(pvp); 4817 return (error); 4818 } 4819 if ((pagedep->pd_state & NEWBLOCK) && 4820 (error = VOP_FSYNC(pvp, td->td_ucred, MNT_WAIT, td))) { 4821 vput(pvp); 4822 return (error); 4823 } 4824 } 4825 /* 4826 * Flush directory page containing the inode's name. 4827 */ 4828 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 4829 &bp); 4830 if (error == 0) 4831 error = BUF_WRITE(bp); 4832 else 4833 brelse(bp); 4834 vput(pvp); 4835 if (error != 0) 4836 return (error); 4837 ACQUIRE_LOCK(&lk); 4838 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4839 break; 4840 } 4841 FREE_LOCK(&lk); 4842 return (0); 4843 } 4844 4845 /* 4846 * Flush all the dirty bitmaps associated with the block device 4847 * before flushing the rest of the dirty blocks so as to reduce 4848 * the number of dependencies that will have to be rolled back. 4849 */ 4850 void 4851 softdep_fsync_mountdev(vp) 4852 struct vnode *vp; 4853 { 4854 struct buf *bp, *nbp; 4855 struct worklist *wk; 4856 4857 if (!vn_isdisk(vp, NULL)) 4858 panic("softdep_fsync_mountdev: vnode not a disk"); 4859 ACQUIRE_LOCK(&lk); 4860 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 4861 nbp = TAILQ_NEXT(bp, b_vnbufs); 4862 /* 4863 * If it is already scheduled, skip to the next buffer. 4864 */ 4865 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4866 continue; 4867 if ((bp->b_flags & B_DELWRI) == 0) { 4868 FREE_LOCK(&lk); 4869 panic("softdep_fsync_mountdev: not dirty"); 4870 } 4871 /* 4872 * We are only interested in bitmaps with outstanding 4873 * dependencies. 4874 */ 4875 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4876 wk->wk_type != D_BMSAFEMAP || 4877 (bp->b_xflags & BX_BKGRDINPROG)) { 4878 BUF_UNLOCK(bp); 4879 continue; 4880 } 4881 bremfree(bp); 4882 FREE_LOCK(&lk); 4883 (void) bawrite(bp); 4884 ACQUIRE_LOCK(&lk); 4885 /* 4886 * Since we may have slept during the I/O, we need 4887 * to start from a known point. 4888 */ 4889 nbp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4890 } 4891 drain_output(vp, 1); 4892 FREE_LOCK(&lk); 4893 } 4894 4895 /* 4896 * This routine is called when we are trying to synchronously flush a 4897 * file. This routine must eliminate any filesystem metadata dependencies 4898 * so that the syncing routine can succeed by pushing the dirty blocks 4899 * associated with the file. If any I/O errors occur, they are returned. 4900 */ 4901 int 4902 softdep_sync_metadata(ap) 4903 struct vop_fsync_args /* { 4904 struct vnode *a_vp; 4905 struct ucred *a_cred; 4906 int a_waitfor; 4907 struct thread *a_td; 4908 } */ *ap; 4909 { 4910 struct vnode *vp = ap->a_vp; 4911 struct pagedep *pagedep; 4912 struct allocdirect *adp; 4913 struct allocindir *aip; 4914 struct buf *bp, *nbp; 4915 struct worklist *wk; 4916 int i, error, waitfor; 4917 4918 /* 4919 * Check whether this vnode is involved in a filesystem 4920 * that is doing soft dependency processing. 4921 */ 4922 if (!vn_isdisk(vp, NULL)) { 4923 if (!DOINGSOFTDEP(vp)) 4924 return (0); 4925 } else 4926 if (vp->v_rdev->si_mountpoint == NULL || 4927 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4928 return (0); 4929 /* 4930 * Ensure that any direct block dependencies have been cleared. 4931 */ 4932 ACQUIRE_LOCK(&lk); 4933 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4934 FREE_LOCK(&lk); 4935 return (error); 4936 } 4937 /* 4938 * For most files, the only metadata dependencies are the 4939 * cylinder group maps that allocate their inode or blocks. 4940 * The block allocation dependencies can be found by traversing 4941 * the dependency lists for any buffers that remain on their 4942 * dirty buffer list. The inode allocation dependency will 4943 * be resolved when the inode is updated with MNT_WAIT. 4944 * This work is done in two passes. The first pass grabs most 4945 * of the buffers and begins asynchronously writing them. The 4946 * only way to wait for these asynchronous writes is to sleep 4947 * on the filesystem vnode which may stay busy for a long time 4948 * if the filesystem is active. So, instead, we make a second 4949 * pass over the dependencies blocking on each write. In the 4950 * usual case we will be blocking against a write that we 4951 * initiated, so when it is done the dependency will have been 4952 * resolved. Thus the second pass is expected to end quickly. 4953 */ 4954 waitfor = MNT_NOWAIT; 4955 top: 4956 /* 4957 * We must wait for any I/O in progress to finish so that 4958 * all potential buffers on the dirty list will be visible. 4959 */ 4960 drain_output(vp, 1); 4961 if (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT) == 0) { 4962 FREE_LOCK(&lk); 4963 return (0); 4964 } 4965 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4966 /* While syncing snapshots, we must allow recursive lookups */ 4967 bp->b_lock.lk_flags |= LK_CANRECURSE; 4968 loop: 4969 /* 4970 * As we hold the buffer locked, none of its dependencies 4971 * will disappear. 4972 */ 4973 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4974 switch (wk->wk_type) { 4975 4976 case D_ALLOCDIRECT: 4977 adp = WK_ALLOCDIRECT(wk); 4978 if (adp->ad_state & DEPCOMPLETE) 4979 continue; 4980 nbp = adp->ad_buf; 4981 if (getdirtybuf(&nbp, waitfor) == 0) 4982 continue; 4983 FREE_LOCK(&lk); 4984 if (waitfor == MNT_NOWAIT) { 4985 bawrite(nbp); 4986 } else if ((error = BUF_WRITE(nbp)) != 0) { 4987 break; 4988 } 4989 ACQUIRE_LOCK(&lk); 4990 continue; 4991 4992 case D_ALLOCINDIR: 4993 aip = WK_ALLOCINDIR(wk); 4994 if (aip->ai_state & DEPCOMPLETE) 4995 continue; 4996 nbp = aip->ai_buf; 4997 if (getdirtybuf(&nbp, waitfor) == 0) 4998 continue; 4999 FREE_LOCK(&lk); 5000 if (waitfor == MNT_NOWAIT) { 5001 bawrite(nbp); 5002 } else if ((error = BUF_WRITE(nbp)) != 0) { 5003 break; 5004 } 5005 ACQUIRE_LOCK(&lk); 5006 continue; 5007 5008 case D_INDIRDEP: 5009 restart: 5010 5011 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 5012 if (aip->ai_state & DEPCOMPLETE) 5013 continue; 5014 nbp = aip->ai_buf; 5015 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 5016 goto restart; 5017 FREE_LOCK(&lk); 5018 if ((error = BUF_WRITE(nbp)) != 0) { 5019 break; 5020 } 5021 ACQUIRE_LOCK(&lk); 5022 goto restart; 5023 } 5024 continue; 5025 5026 case D_INODEDEP: 5027 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 5028 WK_INODEDEP(wk)->id_ino)) != 0) { 5029 FREE_LOCK(&lk); 5030 break; 5031 } 5032 continue; 5033 5034 case D_PAGEDEP: 5035 /* 5036 * We are trying to sync a directory that may 5037 * have dependencies on both its own metadata 5038 * and/or dependencies on the inodes of any 5039 * recently allocated files. We walk its diradd 5040 * lists pushing out the associated inode. 5041 */ 5042 pagedep = WK_PAGEDEP(wk); 5043 for (i = 0; i < DAHASHSZ; i++) { 5044 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 5045 continue; 5046 if ((error = 5047 flush_pagedep_deps(vp, pagedep->pd_mnt, 5048 &pagedep->pd_diraddhd[i]))) { 5049 FREE_LOCK(&lk); 5050 break; 5051 } 5052 } 5053 continue; 5054 5055 case D_MKDIR: 5056 /* 5057 * This case should never happen if the vnode has 5058 * been properly sync'ed. However, if this function 5059 * is used at a place where the vnode has not yet 5060 * been sync'ed, this dependency can show up. So, 5061 * rather than panic, just flush it. 5062 */ 5063 nbp = WK_MKDIR(wk)->md_buf; 5064 if (getdirtybuf(&nbp, waitfor) == 0) 5065 continue; 5066 FREE_LOCK(&lk); 5067 if (waitfor == MNT_NOWAIT) { 5068 bawrite(nbp); 5069 } else if ((error = BUF_WRITE(nbp)) != 0) { 5070 break; 5071 } 5072 ACQUIRE_LOCK(&lk); 5073 continue; 5074 5075 case D_BMSAFEMAP: 5076 /* 5077 * This case should never happen if the vnode has 5078 * been properly sync'ed. However, if this function 5079 * is used at a place where the vnode has not yet 5080 * been sync'ed, this dependency can show up. So, 5081 * rather than panic, just flush it. 5082 */ 5083 nbp = WK_BMSAFEMAP(wk)->sm_buf; 5084 if (getdirtybuf(&nbp, waitfor) == 0) 5085 continue; 5086 FREE_LOCK(&lk); 5087 if (waitfor == MNT_NOWAIT) { 5088 bawrite(nbp); 5089 } else if ((error = BUF_WRITE(nbp)) != 0) { 5090 break; 5091 } 5092 ACQUIRE_LOCK(&lk); 5093 continue; 5094 5095 default: 5096 FREE_LOCK(&lk); 5097 panic("softdep_sync_metadata: Unknown type %s", 5098 TYPENAME(wk->wk_type)); 5099 /* NOTREACHED */ 5100 } 5101 /* We reach here only in error and unlocked */ 5102 if (error == 0) 5103 panic("softdep_sync_metadata: zero error"); 5104 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 5105 bawrite(bp); 5106 return (error); 5107 } 5108 (void) getdirtybuf(&TAILQ_NEXT(bp, b_vnbufs), MNT_WAIT); 5109 nbp = TAILQ_NEXT(bp, b_vnbufs); 5110 FREE_LOCK(&lk); 5111 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 5112 bawrite(bp); 5113 ACQUIRE_LOCK(&lk); 5114 if (nbp != NULL) { 5115 bp = nbp; 5116 goto loop; 5117 } 5118 /* 5119 * The brief unlock is to allow any pent up dependency 5120 * processing to be done. Then proceed with the second pass. 5121 */ 5122 if (waitfor == MNT_NOWAIT) { 5123 waitfor = MNT_WAIT; 5124 FREE_LOCK(&lk); 5125 ACQUIRE_LOCK(&lk); 5126 goto top; 5127 } 5128 5129 /* 5130 * If we have managed to get rid of all the dirty buffers, 5131 * then we are done. For certain directories and block 5132 * devices, we may need to do further work. 5133 * 5134 * We must wait for any I/O in progress to finish so that 5135 * all potential buffers on the dirty list will be visible. 5136 */ 5137 drain_output(vp, 1); 5138 if (TAILQ_FIRST(&vp->v_dirtyblkhd) == NULL) { 5139 FREE_LOCK(&lk); 5140 return (0); 5141 } 5142 5143 FREE_LOCK(&lk); 5144 /* 5145 * If we are trying to sync a block device, some of its buffers may 5146 * contain metadata that cannot be written until the contents of some 5147 * partially written files have been written to disk. The only easy 5148 * way to accomplish this is to sync the entire filesystem (luckily 5149 * this happens rarely). 5150 */ 5151 if (vn_isdisk(vp, NULL) && 5152 vp->v_rdev->si_mountpoint && !VOP_ISLOCKED(vp, NULL) && 5153 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT, ap->a_cred, 5154 ap->a_td)) != 0) 5155 return (error); 5156 return (0); 5157 } 5158 5159 /* 5160 * Flush the dependencies associated with an inodedep. 5161 * Called with splbio blocked. 5162 */ 5163 static int 5164 flush_inodedep_deps(fs, ino) 5165 struct fs *fs; 5166 ino_t ino; 5167 { 5168 struct inodedep *inodedep; 5169 int error, waitfor; 5170 5171 /* 5172 * This work is done in two passes. The first pass grabs most 5173 * of the buffers and begins asynchronously writing them. The 5174 * only way to wait for these asynchronous writes is to sleep 5175 * on the filesystem vnode which may stay busy for a long time 5176 * if the filesystem is active. So, instead, we make a second 5177 * pass over the dependencies blocking on each write. In the 5178 * usual case we will be blocking against a write that we 5179 * initiated, so when it is done the dependency will have been 5180 * resolved. Thus the second pass is expected to end quickly. 5181 * We give a brief window at the top of the loop to allow 5182 * any pending I/O to complete. 5183 */ 5184 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 5185 if (error) 5186 return (error); 5187 FREE_LOCK(&lk); 5188 ACQUIRE_LOCK(&lk); 5189 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5190 return (0); 5191 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 5192 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 5193 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 5194 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 5195 continue; 5196 /* 5197 * If pass2, we are done, otherwise do pass 2. 5198 */ 5199 if (waitfor == MNT_WAIT) 5200 break; 5201 waitfor = MNT_WAIT; 5202 } 5203 /* 5204 * Try freeing inodedep in case all dependencies have been removed. 5205 */ 5206 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 5207 (void) free_inodedep(inodedep); 5208 return (0); 5209 } 5210 5211 /* 5212 * Flush an inode dependency list. 5213 * Called with splbio blocked. 5214 */ 5215 static int 5216 flush_deplist(listhead, waitfor, errorp) 5217 struct allocdirectlst *listhead; 5218 int waitfor; 5219 int *errorp; 5220 { 5221 struct allocdirect *adp; 5222 struct buf *bp; 5223 5224 TAILQ_FOREACH(adp, listhead, ad_next) { 5225 if (adp->ad_state & DEPCOMPLETE) 5226 continue; 5227 bp = adp->ad_buf; 5228 if (getdirtybuf(&bp, waitfor) == 0) { 5229 if (waitfor == MNT_NOWAIT) 5230 continue; 5231 return (1); 5232 } 5233 FREE_LOCK(&lk); 5234 if (waitfor == MNT_NOWAIT) { 5235 bawrite(bp); 5236 } else if ((*errorp = BUF_WRITE(bp)) != 0) { 5237 ACQUIRE_LOCK(&lk); 5238 return (1); 5239 } 5240 ACQUIRE_LOCK(&lk); 5241 return (1); 5242 } 5243 return (0); 5244 } 5245 5246 /* 5247 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 5248 * Called with splbio blocked. 5249 */ 5250 static int 5251 flush_pagedep_deps(pvp, mp, diraddhdp) 5252 struct vnode *pvp; 5253 struct mount *mp; 5254 struct diraddhd *diraddhdp; 5255 { 5256 struct thread *td = curthread; 5257 struct inodedep *inodedep; 5258 struct ufsmount *ump; 5259 struct diradd *dap; 5260 struct vnode *vp; 5261 int gotit, error = 0; 5262 struct buf *bp; 5263 ino_t inum; 5264 5265 ump = VFSTOUFS(mp); 5266 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 5267 /* 5268 * Flush ourselves if this directory entry 5269 * has a MKDIR_PARENT dependency. 5270 */ 5271 if (dap->da_state & MKDIR_PARENT) { 5272 FREE_LOCK(&lk); 5273 if ((error = UFS_UPDATE(pvp, 1)) != 0) 5274 break; 5275 ACQUIRE_LOCK(&lk); 5276 /* 5277 * If that cleared dependencies, go on to next. 5278 */ 5279 if (dap != LIST_FIRST(diraddhdp)) 5280 continue; 5281 if (dap->da_state & MKDIR_PARENT) { 5282 FREE_LOCK(&lk); 5283 panic("flush_pagedep_deps: MKDIR_PARENT"); 5284 } 5285 } 5286 /* 5287 * A newly allocated directory must have its "." and 5288 * ".." entries written out before its name can be 5289 * committed in its parent. We do not want or need 5290 * the full semantics of a synchronous VOP_FSYNC as 5291 * that may end up here again, once for each directory 5292 * level in the filesystem. Instead, we push the blocks 5293 * and wait for them to clear. We have to fsync twice 5294 * because the first call may choose to defer blocks 5295 * that still have dependencies, but deferral will 5296 * happen at most once. 5297 */ 5298 inum = dap->da_newinum; 5299 if (dap->da_state & MKDIR_BODY) { 5300 FREE_LOCK(&lk); 5301 if ((error = VFS_VGET(mp, inum, LK_EXCLUSIVE, &vp))) 5302 break; 5303 if ((error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td)) || 5304 (error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) { 5305 vput(vp); 5306 break; 5307 } 5308 drain_output(vp, 0); 5309 vput(vp); 5310 ACQUIRE_LOCK(&lk); 5311 /* 5312 * If that cleared dependencies, go on to next. 5313 */ 5314 if (dap != LIST_FIRST(diraddhdp)) 5315 continue; 5316 if (dap->da_state & MKDIR_BODY) { 5317 FREE_LOCK(&lk); 5318 panic("flush_pagedep_deps: MKDIR_BODY"); 5319 } 5320 } 5321 /* 5322 * Flush the inode on which the directory entry depends. 5323 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 5324 * the only remaining dependency is that the updated inode 5325 * count must get pushed to disk. The inode has already 5326 * been pushed into its inode buffer (via VOP_UPDATE) at 5327 * the time of the reference count change. So we need only 5328 * locate that buffer, ensure that there will be no rollback 5329 * caused by a bitmap dependency, then write the inode buffer. 5330 */ 5331 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 5332 FREE_LOCK(&lk); 5333 panic("flush_pagedep_deps: lost inode"); 5334 } 5335 /* 5336 * If the inode still has bitmap dependencies, 5337 * push them to disk. 5338 */ 5339 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5340 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 5341 FREE_LOCK(&lk); 5342 if (gotit && 5343 (error = BUF_WRITE(inodedep->id_buf)) != 0) 5344 break; 5345 ACQUIRE_LOCK(&lk); 5346 if (dap != LIST_FIRST(diraddhdp)) 5347 continue; 5348 } 5349 /* 5350 * If the inode is still sitting in a buffer waiting 5351 * to be written, push it to disk. 5352 */ 5353 FREE_LOCK(&lk); 5354 if ((error = bread(ump->um_devvp, 5355 fsbtodb(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 5356 (int)ump->um_fs->fs_bsize, NOCRED, &bp)) != 0) { 5357 brelse(bp); 5358 break; 5359 } 5360 if ((error = BUF_WRITE(bp)) != 0) 5361 break; 5362 ACQUIRE_LOCK(&lk); 5363 /* 5364 * If we have failed to get rid of all the dependencies 5365 * then something is seriously wrong. 5366 */ 5367 if (dap == LIST_FIRST(diraddhdp)) { 5368 FREE_LOCK(&lk); 5369 panic("flush_pagedep_deps: flush failed"); 5370 } 5371 } 5372 if (error) 5373 ACQUIRE_LOCK(&lk); 5374 return (error); 5375 } 5376 5377 /* 5378 * A large burst of file addition or deletion activity can drive the 5379 * memory load excessively high. First attempt to slow things down 5380 * using the techniques below. If that fails, this routine requests 5381 * the offending operations to fall back to running synchronously 5382 * until the memory load returns to a reasonable level. 5383 */ 5384 int 5385 softdep_slowdown(vp) 5386 struct vnode *vp; 5387 { 5388 int max_softdeps_hard; 5389 5390 max_softdeps_hard = max_softdeps * 11 / 10; 5391 if (num_dirrem < max_softdeps_hard / 2 && 5392 num_inodedep < max_softdeps_hard) 5393 return (0); 5394 stat_sync_limit_hit += 1; 5395 return (1); 5396 } 5397 5398 /* 5399 * Called by the allocation routines when they are about to fail 5400 * in the hope that we can free up some disk space. 5401 * 5402 * First check to see if the work list has anything on it. If it has, 5403 * clean up entries until we successfully free some space. Because this 5404 * process holds inodes locked, we cannot handle any remove requests 5405 * that might block on a locked inode as that could lead to deadlock. 5406 * If the worklist yields no free space, encourage the syncer daemon 5407 * to help us. In no event will we try for longer than tickdelay seconds. 5408 */ 5409 int 5410 softdep_request_cleanup(fs, vp) 5411 struct fs *fs; 5412 struct vnode *vp; 5413 { 5414 long starttime; 5415 ufs2_daddr_t needed; 5416 5417 needed = fs->fs_cstotal.cs_nbfree + fs->fs_contigsumsize; 5418 starttime = time_second + tickdelay; 5419 if (UFS_UPDATE(vp, 1) != 0) 5420 return (0); 5421 while (fs->fs_pendingblocks > 0 && fs->fs_cstotal.cs_nbfree <= needed) { 5422 if (time_second > starttime) 5423 return (0); 5424 if (num_on_worklist > 0 && 5425 process_worklist_item(NULL, LK_NOWAIT) != -1) { 5426 stat_worklist_push += 1; 5427 continue; 5428 } 5429 request_cleanup(FLUSH_REMOVE_WAIT, 0); 5430 } 5431 return (1); 5432 } 5433 5434 /* 5435 * If memory utilization has gotten too high, deliberately slow things 5436 * down and speed up the I/O processing. 5437 */ 5438 static int 5439 request_cleanup(resource, islocked) 5440 int resource; 5441 int islocked; 5442 { 5443 struct thread *td = curthread; 5444 5445 /* 5446 * We never hold up the filesystem syncer process. 5447 */ 5448 if (td == filesys_syncer) 5449 return (0); 5450 /* 5451 * First check to see if the work list has gotten backlogged. 5452 * If it has, co-opt this process to help clean up two entries. 5453 * Because this process may hold inodes locked, we cannot 5454 * handle any remove requests that might block on a locked 5455 * inode as that could lead to deadlock. 5456 */ 5457 if (num_on_worklist > max_softdeps / 10) { 5458 if (islocked) 5459 FREE_LOCK(&lk); 5460 process_worklist_item(NULL, LK_NOWAIT); 5461 process_worklist_item(NULL, LK_NOWAIT); 5462 stat_worklist_push += 2; 5463 if (islocked) 5464 ACQUIRE_LOCK(&lk); 5465 return(1); 5466 } 5467 /* 5468 * Next, we attempt to speed up the syncer process. If that 5469 * is successful, then we allow the process to continue. 5470 */ 5471 if (speedup_syncer() && resource != FLUSH_REMOVE_WAIT) 5472 return(0); 5473 /* 5474 * If we are resource constrained on inode dependencies, try 5475 * flushing some dirty inodes. Otherwise, we are constrained 5476 * by file deletions, so try accelerating flushes of directories 5477 * with removal dependencies. We would like to do the cleanup 5478 * here, but we probably hold an inode locked at this point and 5479 * that might deadlock against one that we try to clean. So, 5480 * the best that we can do is request the syncer daemon to do 5481 * the cleanup for us. 5482 */ 5483 switch (resource) { 5484 5485 case FLUSH_INODES: 5486 stat_ino_limit_push += 1; 5487 req_clear_inodedeps += 1; 5488 stat_countp = &stat_ino_limit_hit; 5489 break; 5490 5491 case FLUSH_REMOVE: 5492 case FLUSH_REMOVE_WAIT: 5493 stat_blk_limit_push += 1; 5494 req_clear_remove += 1; 5495 stat_countp = &stat_blk_limit_hit; 5496 break; 5497 5498 default: 5499 if (islocked) 5500 FREE_LOCK(&lk); 5501 panic("request_cleanup: unknown type"); 5502 } 5503 /* 5504 * Hopefully the syncer daemon will catch up and awaken us. 5505 * We wait at most tickdelay before proceeding in any case. 5506 */ 5507 if (islocked == 0) 5508 ACQUIRE_LOCK(&lk); 5509 proc_waiting += 1; 5510 if (handle.callout == NULL) 5511 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 5512 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, NULL, PPAUSE, 5513 "softupdate", 0); 5514 proc_waiting -= 1; 5515 if (islocked == 0) 5516 FREE_LOCK(&lk); 5517 return (1); 5518 } 5519 5520 /* 5521 * Awaken processes pausing in request_cleanup and clear proc_waiting 5522 * to indicate that there is no longer a timer running. 5523 */ 5524 void 5525 pause_timer(arg) 5526 void *arg; 5527 { 5528 5529 *stat_countp += 1; 5530 wakeup_one(&proc_waiting); 5531 if (proc_waiting > 0) 5532 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 5533 else 5534 handle.callout = NULL; 5535 } 5536 5537 /* 5538 * Flush out a directory with at least one removal dependency in an effort to 5539 * reduce the number of dirrem, freefile, and freeblks dependency structures. 5540 */ 5541 static void 5542 clear_remove(td) 5543 struct thread *td; 5544 { 5545 struct pagedep_hashhead *pagedephd; 5546 struct pagedep *pagedep; 5547 static int next = 0; 5548 struct mount *mp; 5549 struct vnode *vp; 5550 int error, cnt; 5551 ino_t ino; 5552 5553 ACQUIRE_LOCK(&lk); 5554 for (cnt = 0; cnt < pagedep_hash; cnt++) { 5555 pagedephd = &pagedep_hashtbl[next++]; 5556 if (next >= pagedep_hash) 5557 next = 0; 5558 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 5559 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 5560 continue; 5561 mp = pagedep->pd_mnt; 5562 ino = pagedep->pd_ino; 5563 FREE_LOCK(&lk); 5564 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5565 continue; 5566 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp))) { 5567 softdep_error("clear_remove: vget", error); 5568 vn_finished_write(mp); 5569 return; 5570 } 5571 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5572 softdep_error("clear_remove: fsync", error); 5573 drain_output(vp, 0); 5574 vput(vp); 5575 vn_finished_write(mp); 5576 return; 5577 } 5578 } 5579 FREE_LOCK(&lk); 5580 } 5581 5582 /* 5583 * Clear out a block of dirty inodes in an effort to reduce 5584 * the number of inodedep dependency structures. 5585 */ 5586 static void 5587 clear_inodedeps(td) 5588 struct thread *td; 5589 { 5590 struct inodedep_hashhead *inodedephd; 5591 struct inodedep *inodedep; 5592 static int next = 0; 5593 struct mount *mp; 5594 struct vnode *vp; 5595 struct fs *fs; 5596 int error, cnt; 5597 ino_t firstino, lastino, ino; 5598 5599 ACQUIRE_LOCK(&lk); 5600 /* 5601 * Pick a random inode dependency to be cleared. 5602 * We will then gather up all the inodes in its block 5603 * that have dependencies and flush them out. 5604 */ 5605 for (cnt = 0; cnt < inodedep_hash; cnt++) { 5606 inodedephd = &inodedep_hashtbl[next++]; 5607 if (next >= inodedep_hash) 5608 next = 0; 5609 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 5610 break; 5611 } 5612 if (inodedep == NULL) 5613 return; 5614 /* 5615 * Ugly code to find mount point given pointer to superblock. 5616 */ 5617 fs = inodedep->id_fs; 5618 TAILQ_FOREACH(mp, &mountlist, mnt_list) 5619 if ((mp->mnt_flag & MNT_SOFTDEP) && fs == VFSTOUFS(mp)->um_fs) 5620 break; 5621 /* 5622 * Find the last inode in the block with dependencies. 5623 */ 5624 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 5625 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 5626 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 5627 break; 5628 /* 5629 * Asynchronously push all but the last inode with dependencies. 5630 * Synchronously push the last inode with dependencies to ensure 5631 * that the inode block gets written to free up the inodedeps. 5632 */ 5633 for (ino = firstino; ino <= lastino; ino++) { 5634 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5635 continue; 5636 FREE_LOCK(&lk); 5637 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5638 continue; 5639 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp)) != 0) { 5640 softdep_error("clear_inodedeps: vget", error); 5641 vn_finished_write(mp); 5642 return; 5643 } 5644 if (ino == lastino) { 5645 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_WAIT, td))) 5646 softdep_error("clear_inodedeps: fsync1", error); 5647 } else { 5648 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5649 softdep_error("clear_inodedeps: fsync2", error); 5650 drain_output(vp, 0); 5651 } 5652 vput(vp); 5653 vn_finished_write(mp); 5654 ACQUIRE_LOCK(&lk); 5655 } 5656 FREE_LOCK(&lk); 5657 } 5658 5659 /* 5660 * Function to determine if the buffer has outstanding dependencies 5661 * that will cause a roll-back if the buffer is written. If wantcount 5662 * is set, return number of dependencies, otherwise just yes or no. 5663 */ 5664 static int 5665 softdep_count_dependencies(bp, wantcount) 5666 struct buf *bp; 5667 int wantcount; 5668 { 5669 struct worklist *wk; 5670 struct inodedep *inodedep; 5671 struct indirdep *indirdep; 5672 struct allocindir *aip; 5673 struct pagedep *pagedep; 5674 struct diradd *dap; 5675 int i, retval; 5676 5677 retval = 0; 5678 ACQUIRE_LOCK(&lk); 5679 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5680 switch (wk->wk_type) { 5681 5682 case D_INODEDEP: 5683 inodedep = WK_INODEDEP(wk); 5684 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5685 /* bitmap allocation dependency */ 5686 retval += 1; 5687 if (!wantcount) 5688 goto out; 5689 } 5690 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5691 /* direct block pointer dependency */ 5692 retval += 1; 5693 if (!wantcount) 5694 goto out; 5695 } 5696 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 5697 /* direct block pointer dependency */ 5698 retval += 1; 5699 if (!wantcount) 5700 goto out; 5701 } 5702 continue; 5703 5704 case D_INDIRDEP: 5705 indirdep = WK_INDIRDEP(wk); 5706 5707 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5708 /* indirect block pointer dependency */ 5709 retval += 1; 5710 if (!wantcount) 5711 goto out; 5712 } 5713 continue; 5714 5715 case D_PAGEDEP: 5716 pagedep = WK_PAGEDEP(wk); 5717 for (i = 0; i < DAHASHSZ; i++) { 5718 5719 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5720 /* directory entry dependency */ 5721 retval += 1; 5722 if (!wantcount) 5723 goto out; 5724 } 5725 } 5726 continue; 5727 5728 case D_BMSAFEMAP: 5729 case D_ALLOCDIRECT: 5730 case D_ALLOCINDIR: 5731 case D_MKDIR: 5732 /* never a dependency on these blocks */ 5733 continue; 5734 5735 default: 5736 FREE_LOCK(&lk); 5737 panic("softdep_check_for_rollback: Unexpected type %s", 5738 TYPENAME(wk->wk_type)); 5739 /* NOTREACHED */ 5740 } 5741 } 5742 out: 5743 FREE_LOCK(&lk); 5744 return retval; 5745 } 5746 5747 /* 5748 * Acquire exclusive access to a buffer. 5749 * Must be called with splbio blocked. 5750 * Return 1 if buffer was acquired. 5751 */ 5752 static int 5753 getdirtybuf(bpp, waitfor) 5754 struct buf **bpp; 5755 int waitfor; 5756 { 5757 struct buf *bp; 5758 int error; 5759 5760 for (;;) { 5761 if ((bp = *bpp) == NULL) 5762 return (0); 5763 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 5764 if ((bp->b_xflags & BX_BKGRDINPROG) == 0) 5765 break; 5766 BUF_UNLOCK(bp); 5767 if (waitfor != MNT_WAIT) 5768 return (0); 5769 bp->b_xflags |= BX_BKGRDWAIT; 5770 interlocked_sleep(&lk, SLEEP, &bp->b_xflags, NULL, 5771 PRIBIO, "getbuf", 0); 5772 continue; 5773 } 5774 if (waitfor != MNT_WAIT) 5775 return (0); 5776 error = interlocked_sleep(&lk, LOCKBUF, bp, NULL, 5777 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5778 if (error != ENOLCK) { 5779 FREE_LOCK(&lk); 5780 panic("getdirtybuf: inconsistent lock"); 5781 } 5782 } 5783 if ((bp->b_flags & B_DELWRI) == 0) { 5784 BUF_UNLOCK(bp); 5785 return (0); 5786 } 5787 bremfree(bp); 5788 return (1); 5789 } 5790 5791 /* 5792 * Wait for pending output on a vnode to complete. 5793 * Must be called with vnode locked. 5794 */ 5795 static void 5796 drain_output(vp, islocked) 5797 struct vnode *vp; 5798 int islocked; 5799 { 5800 5801 if (!islocked) 5802 ACQUIRE_LOCK(&lk); 5803 VI_LOCK(vp); 5804 while (vp->v_numoutput) { 5805 vp->v_iflag |= VI_BWAIT; 5806 interlocked_sleep(&lk, SLEEP, (caddr_t)&vp->v_numoutput, 5807 VI_MTX(vp), PRIBIO + 1, "drainvp", 0); 5808 } 5809 VI_UNLOCK(vp); 5810 if (!islocked) 5811 FREE_LOCK(&lk); 5812 } 5813 5814 /* 5815 * Called whenever a buffer that is being invalidated or reallocated 5816 * contains dependencies. This should only happen if an I/O error has 5817 * occurred. The routine is called with the buffer locked. 5818 */ 5819 static void 5820 softdep_deallocate_dependencies(bp) 5821 struct buf *bp; 5822 { 5823 5824 if ((bp->b_ioflags & BIO_ERROR) == 0) 5825 panic("softdep_deallocate_dependencies: dangling deps"); 5826 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 5827 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5828 } 5829 5830 /* 5831 * Function to handle asynchronous write errors in the filesystem. 5832 */ 5833 void 5834 softdep_error(func, error) 5835 char *func; 5836 int error; 5837 { 5838 5839 /* XXX should do something better! */ 5840 printf("%s: got error %d while accessing filesystem\n", func, error); 5841 } 5842