1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 /* 45 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 46 */ 47 #ifndef DIAGNOSTIC 48 #define DIAGNOSTIC 49 #endif 50 #ifndef DEBUG 51 #define DEBUG 52 #endif 53 54 #include <sys/param.h> 55 #include <sys/kernel.h> 56 #include <sys/systm.h> 57 #include <sys/bio.h> 58 #include <sys/buf.h> 59 #include <sys/malloc.h> 60 #include <sys/mount.h> 61 #include <sys/proc.h> 62 #include <sys/stat.h> 63 #include <sys/syslog.h> 64 #include <sys/vnode.h> 65 #include <sys/conf.h> 66 #include <ufs/ufs/dir.h> 67 #include <ufs/ufs/extattr.h> 68 #include <ufs/ufs/quota.h> 69 #include <ufs/ufs/inode.h> 70 #include <ufs/ufs/ufsmount.h> 71 #include <ufs/ffs/fs.h> 72 #include <ufs/ffs/softdep.h> 73 #include <ufs/ffs/ffs_extern.h> 74 #include <ufs/ufs/ufs_extern.h> 75 76 /* 77 * These definitions need to be adapted to the system to which 78 * this file is being ported. 79 */ 80 /* 81 * malloc types defined for the softdep system. 82 */ 83 static MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 84 static MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 85 static MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 86 static MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 87 static MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 88 static MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 89 static MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 90 static MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 91 static MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 92 static MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 93 static MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 94 static MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 95 static MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 96 static MALLOC_DEFINE(M_NEWDIRBLK, "newdirblk","Unclaimed new directory block"); 97 98 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 99 100 #define D_PAGEDEP 0 101 #define D_INODEDEP 1 102 #define D_NEWBLK 2 103 #define D_BMSAFEMAP 3 104 #define D_ALLOCDIRECT 4 105 #define D_INDIRDEP 5 106 #define D_ALLOCINDIR 6 107 #define D_FREEFRAG 7 108 #define D_FREEBLKS 8 109 #define D_FREEFILE 9 110 #define D_DIRADD 10 111 #define D_MKDIR 11 112 #define D_DIRREM 12 113 #define D_NEWDIRBLK 13 114 #define D_LAST D_NEWDIRBLK 115 116 /* 117 * translate from workitem type to memory type 118 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 119 */ 120 static struct malloc_type *memtype[] = { 121 M_PAGEDEP, 122 M_INODEDEP, 123 M_NEWBLK, 124 M_BMSAFEMAP, 125 M_ALLOCDIRECT, 126 M_INDIRDEP, 127 M_ALLOCINDIR, 128 M_FREEFRAG, 129 M_FREEBLKS, 130 M_FREEFILE, 131 M_DIRADD, 132 M_MKDIR, 133 M_DIRREM, 134 M_NEWDIRBLK 135 }; 136 137 #define DtoM(type) (memtype[type]) 138 139 /* 140 * Names of malloc types. 141 */ 142 #define TYPENAME(type) \ 143 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 144 /* 145 * End system adaptaion definitions. 146 */ 147 148 /* 149 * Internal function prototypes. 150 */ 151 static void softdep_error(char *, int); 152 static void drain_output(struct vnode *, int); 153 static int getdirtybuf(struct buf **, int); 154 static void clear_remove(struct thread *); 155 static void clear_inodedeps(struct thread *); 156 static int flush_pagedep_deps(struct vnode *, struct mount *, 157 struct diraddhd *); 158 static int flush_inodedep_deps(struct fs *, ino_t); 159 static int handle_written_filepage(struct pagedep *, struct buf *); 160 static void diradd_inode_written(struct diradd *, struct inodedep *); 161 static int handle_written_inodeblock(struct inodedep *, struct buf *); 162 static void handle_allocdirect_partdone(struct allocdirect *); 163 static void handle_allocindir_partdone(struct allocindir *); 164 static void initiate_write_filepage(struct pagedep *, struct buf *); 165 static void handle_written_mkdir(struct mkdir *, int); 166 static void initiate_write_inodeblock(struct inodedep *, struct buf *); 167 static void handle_workitem_freefile(struct freefile *); 168 static void handle_workitem_remove(struct dirrem *, struct vnode *); 169 static struct dirrem *newdirrem(struct buf *, struct inode *, 170 struct inode *, int, struct dirrem **); 171 static void free_diradd(struct diradd *); 172 static void free_allocindir(struct allocindir *, struct inodedep *); 173 static void free_newdirblk(struct newdirblk *); 174 static int indir_trunc(struct freeblks *, ufs_daddr_t, int, ufs_lbn_t, long *); 175 static void deallocate_dependencies(struct buf *, struct inodedep *); 176 static void free_allocdirect(struct allocdirectlst *, 177 struct allocdirect *, int); 178 static int check_inode_unwritten(struct inodedep *); 179 static int free_inodedep(struct inodedep *); 180 static void handle_workitem_freeblocks(struct freeblks *, int); 181 static void merge_inode_lists(struct inodedep *); 182 static void setup_allocindir_phase2(struct buf *, struct inode *, 183 struct allocindir *); 184 static struct allocindir *newallocindir(struct inode *, int, ufs_daddr_t, 185 ufs_daddr_t); 186 static void handle_workitem_freefrag(struct freefrag *); 187 static struct freefrag *newfreefrag(struct inode *, ufs_daddr_t, long); 188 static void allocdirect_merge(struct allocdirectlst *, 189 struct allocdirect *, struct allocdirect *); 190 static struct bmsafemap *bmsafemap_lookup(struct buf *); 191 static int newblk_lookup(struct fs *, ufs_daddr_t, int, struct newblk **); 192 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 193 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, struct pagedep **); 194 static void pause_timer(void *); 195 static int request_cleanup(int, int); 196 static int process_worklist_item(struct mount *, int); 197 static void add_to_worklist(struct worklist *); 198 199 /* 200 * Exported softdep operations. 201 */ 202 static void softdep_disk_io_initiation(struct buf *); 203 static void softdep_disk_write_complete(struct buf *); 204 static void softdep_deallocate_dependencies(struct buf *); 205 static void softdep_move_dependencies(struct buf *, struct buf *); 206 static int softdep_count_dependencies(struct buf *bp, int); 207 208 /* 209 * Locking primitives. 210 * 211 * For a uniprocessor, all we need to do is protect against disk 212 * interrupts. For a multiprocessor, this lock would have to be 213 * a mutex. A single mutex is used throughout this file, though 214 * finer grain locking could be used if contention warranted it. 215 * 216 * For a multiprocessor, the sleep call would accept a lock and 217 * release it after the sleep processing was complete. In a uniprocessor 218 * implementation there is no such interlock, so we simple mark 219 * the places where it needs to be done with the `interlocked' form 220 * of the lock calls. Since the uniprocessor sleep already interlocks 221 * the spl, there is nothing that really needs to be done. 222 */ 223 #ifndef /* NOT */ DEBUG 224 static struct lockit { 225 int lkt_spl; 226 } lk = { 0 }; 227 #define ACQUIRE_LOCK(lk) (lk)->lkt_spl = splbio() 228 #define FREE_LOCK(lk) splx((lk)->lkt_spl) 229 230 #else /* DEBUG */ 231 #define NOHOLDER ((struct thread *)-1) 232 #define SPECIAL_FLAG ((struct thread *)-2) 233 static struct lockit { 234 int lkt_spl; 235 struct thread *lkt_held; 236 } lk = { 0, NOHOLDER }; 237 static int lockcnt; 238 239 static void acquire_lock(struct lockit *); 240 static void free_lock(struct lockit *); 241 void softdep_panic(char *); 242 243 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 244 #define FREE_LOCK(lk) free_lock(lk) 245 246 static void 247 acquire_lock(lk) 248 struct lockit *lk; 249 { 250 struct thread *holder; 251 252 if (lk->lkt_held != NOHOLDER) { 253 holder = lk->lkt_held; 254 FREE_LOCK(lk); 255 if (holder == curthread) 256 panic("softdep_lock: locking against myself"); 257 else 258 panic("softdep_lock: lock held by %p", holder); 259 } 260 lk->lkt_spl = splbio(); 261 lk->lkt_held = curthread; 262 lockcnt++; 263 } 264 265 static void 266 free_lock(lk) 267 struct lockit *lk; 268 { 269 270 if (lk->lkt_held == NOHOLDER) 271 panic("softdep_unlock: lock not held"); 272 lk->lkt_held = NOHOLDER; 273 splx(lk->lkt_spl); 274 } 275 276 /* 277 * Function to release soft updates lock and panic. 278 */ 279 void 280 softdep_panic(msg) 281 char *msg; 282 { 283 284 if (lk.lkt_held != NOHOLDER) 285 FREE_LOCK(&lk); 286 panic(msg); 287 } 288 #endif /* DEBUG */ 289 290 static int interlocked_sleep(struct lockit *, int, void *, int, 291 const char *, int); 292 293 /* 294 * When going to sleep, we must save our SPL so that it does 295 * not get lost if some other process uses the lock while we 296 * are sleeping. We restore it after we have slept. This routine 297 * wraps the interlocking with functions that sleep. The list 298 * below enumerates the available set of operations. 299 */ 300 #define UNKNOWN 0 301 #define SLEEP 1 302 #define LOCKBUF 2 303 304 static int 305 interlocked_sleep(lk, op, ident, flags, wmesg, timo) 306 struct lockit *lk; 307 int op; 308 void *ident; 309 int flags; 310 const char *wmesg; 311 int timo; 312 { 313 struct thread *holder; 314 int s, retval; 315 316 s = lk->lkt_spl; 317 # ifdef DEBUG 318 if (lk->lkt_held == NOHOLDER) 319 panic("interlocked_sleep: lock not held"); 320 lk->lkt_held = NOHOLDER; 321 # endif /* DEBUG */ 322 switch (op) { 323 case SLEEP: 324 retval = tsleep(ident, flags, wmesg, timo); 325 break; 326 case LOCKBUF: 327 retval = BUF_LOCK((struct buf *)ident, flags); 328 break; 329 default: 330 panic("interlocked_sleep: unknown operation"); 331 } 332 # ifdef DEBUG 333 if (lk->lkt_held != NOHOLDER) { 334 holder = lk->lkt_held; 335 FREE_LOCK(lk); 336 if (holder == curthread) 337 panic("interlocked_sleep: locking against self"); 338 else 339 panic("interlocked_sleep: lock held by %p", holder); 340 } 341 lk->lkt_held = curthread; 342 lockcnt++; 343 # endif /* DEBUG */ 344 lk->lkt_spl = s; 345 return (retval); 346 } 347 348 /* 349 * Place holder for real semaphores. 350 */ 351 struct sema { 352 int value; 353 struct thread *holder; 354 char *name; 355 int prio; 356 int timo; 357 }; 358 static void sema_init(struct sema *, char *, int, int); 359 static int sema_get(struct sema *, struct lockit *); 360 static void sema_release(struct sema *); 361 362 static void 363 sema_init(semap, name, prio, timo) 364 struct sema *semap; 365 char *name; 366 int prio, timo; 367 { 368 369 semap->holder = NOHOLDER; 370 semap->value = 0; 371 semap->name = name; 372 semap->prio = prio; 373 semap->timo = timo; 374 } 375 376 static int 377 sema_get(semap, interlock) 378 struct sema *semap; 379 struct lockit *interlock; 380 { 381 382 if (semap->value++ > 0) { 383 if (interlock != NULL) { 384 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 385 semap->prio, semap->name, semap->timo); 386 FREE_LOCK(interlock); 387 } else { 388 tsleep((caddr_t)semap, semap->prio, semap->name, 389 semap->timo); 390 } 391 return (0); 392 } 393 semap->holder = curthread; 394 if (interlock != NULL) 395 FREE_LOCK(interlock); 396 return (1); 397 } 398 399 static void 400 sema_release(semap) 401 struct sema *semap; 402 { 403 404 if (semap->value <= 0 || semap->holder != curthread) { 405 if (lk.lkt_held != NOHOLDER) 406 FREE_LOCK(&lk); 407 panic("sema_release: not held"); 408 } 409 if (--semap->value > 0) { 410 semap->value = 0; 411 wakeup(semap); 412 } 413 semap->holder = NOHOLDER; 414 } 415 416 /* 417 * Worklist queue management. 418 * These routines require that the lock be held. 419 */ 420 #ifndef /* NOT */ DEBUG 421 #define WORKLIST_INSERT(head, item) do { \ 422 (item)->wk_state |= ONWORKLIST; \ 423 LIST_INSERT_HEAD(head, item, wk_list); \ 424 } while (0) 425 #define WORKLIST_REMOVE(item) do { \ 426 (item)->wk_state &= ~ONWORKLIST; \ 427 LIST_REMOVE(item, wk_list); \ 428 } while (0) 429 #define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 430 431 #else /* DEBUG */ 432 static void worklist_insert(struct workhead *, struct worklist *); 433 static void worklist_remove(struct worklist *); 434 static void workitem_free(struct worklist *, int); 435 436 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 437 #define WORKLIST_REMOVE(item) worklist_remove(item) 438 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 439 440 static void 441 worklist_insert(head, item) 442 struct workhead *head; 443 struct worklist *item; 444 { 445 446 if (lk.lkt_held == NOHOLDER) 447 panic("worklist_insert: lock not held"); 448 if (item->wk_state & ONWORKLIST) { 449 FREE_LOCK(&lk); 450 panic("worklist_insert: already on list"); 451 } 452 item->wk_state |= ONWORKLIST; 453 LIST_INSERT_HEAD(head, item, wk_list); 454 } 455 456 static void 457 worklist_remove(item) 458 struct worklist *item; 459 { 460 461 if (lk.lkt_held == NOHOLDER) 462 panic("worklist_remove: lock not held"); 463 if ((item->wk_state & ONWORKLIST) == 0) { 464 FREE_LOCK(&lk); 465 panic("worklist_remove: not on list"); 466 } 467 item->wk_state &= ~ONWORKLIST; 468 LIST_REMOVE(item, wk_list); 469 } 470 471 static void 472 workitem_free(item, type) 473 struct worklist *item; 474 int type; 475 { 476 477 if (item->wk_state & ONWORKLIST) { 478 if (lk.lkt_held != NOHOLDER) 479 FREE_LOCK(&lk); 480 panic("workitem_free: still on list"); 481 } 482 if (item->wk_type != type) { 483 if (lk.lkt_held != NOHOLDER) 484 FREE_LOCK(&lk); 485 panic("workitem_free: type mismatch"); 486 } 487 FREE(item, DtoM(type)); 488 } 489 #endif /* DEBUG */ 490 491 /* 492 * Workitem queue management 493 */ 494 static struct workhead softdep_workitem_pending; 495 static int num_on_worklist; /* number of worklist items to be processed */ 496 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 497 static int softdep_worklist_req; /* serialized waiters */ 498 static int max_softdeps; /* maximum number of structs before slowdown */ 499 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 500 static int proc_waiting; /* tracks whether we have a timeout posted */ 501 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 502 static struct callout_handle handle; /* handle on posted proc_waiting timeout */ 503 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 504 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 505 #define FLUSH_INODES 1 506 static int req_clear_remove; /* syncer process flush some freeblks */ 507 #define FLUSH_REMOVE 2 508 #define FLUSH_REMOVE_WAIT 3 509 /* 510 * runtime statistics 511 */ 512 static int stat_worklist_push; /* number of worklist cleanups */ 513 static int stat_blk_limit_push; /* number of times block limit neared */ 514 static int stat_ino_limit_push; /* number of times inode limit neared */ 515 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 516 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 517 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 518 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 519 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 520 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 521 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 522 #ifdef DEBUG 523 #include <vm/vm.h> 524 #include <sys/sysctl.h> 525 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 526 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 527 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 528 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 529 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 530 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 531 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 532 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 533 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 534 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 535 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 536 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 537 #endif /* DEBUG */ 538 539 /* 540 * Add an item to the end of the work queue. 541 * This routine requires that the lock be held. 542 * This is the only routine that adds items to the list. 543 * The following routine is the only one that removes items 544 * and does so in order from first to last. 545 */ 546 static void 547 add_to_worklist(wk) 548 struct worklist *wk; 549 { 550 static struct worklist *worklist_tail; 551 552 if (wk->wk_state & ONWORKLIST) { 553 if (lk.lkt_held != NOHOLDER) 554 FREE_LOCK(&lk); 555 panic("add_to_worklist: already on list"); 556 } 557 wk->wk_state |= ONWORKLIST; 558 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 559 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 560 else 561 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 562 worklist_tail = wk; 563 num_on_worklist += 1; 564 } 565 566 /* 567 * Process that runs once per second to handle items in the background queue. 568 * 569 * Note that we ensure that everything is done in the order in which they 570 * appear in the queue. The code below depends on this property to ensure 571 * that blocks of a file are freed before the inode itself is freed. This 572 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 573 * until all the old ones have been purged from the dependency lists. 574 */ 575 int 576 softdep_process_worklist(matchmnt) 577 struct mount *matchmnt; 578 { 579 struct thread *td = curthread; 580 int cnt, matchcnt, loopcount; 581 long starttime; 582 583 /* 584 * Record the process identifier of our caller so that we can give 585 * this process preferential treatment in request_cleanup below. 586 */ 587 filesys_syncer = td; 588 matchcnt = 0; 589 590 /* 591 * There is no danger of having multiple processes run this 592 * code, but we have to single-thread it when softdep_flushfiles() 593 * is in operation to get an accurate count of the number of items 594 * related to its mount point that are in the list. 595 */ 596 if (matchmnt == NULL) { 597 if (softdep_worklist_busy < 0) 598 return(-1); 599 softdep_worklist_busy += 1; 600 } 601 602 /* 603 * If requested, try removing inode or removal dependencies. 604 */ 605 if (req_clear_inodedeps) { 606 clear_inodedeps(td); 607 req_clear_inodedeps -= 1; 608 wakeup_one(&proc_waiting); 609 } 610 if (req_clear_remove) { 611 clear_remove(td); 612 req_clear_remove -= 1; 613 wakeup_one(&proc_waiting); 614 } 615 loopcount = 1; 616 starttime = time_second; 617 while (num_on_worklist > 0) { 618 if ((cnt = process_worklist_item(matchmnt, 0)) == -1) 619 break; 620 else 621 matchcnt += cnt; 622 623 /* 624 * If a umount operation wants to run the worklist 625 * accurately, abort. 626 */ 627 if (softdep_worklist_req && matchmnt == NULL) { 628 matchcnt = -1; 629 break; 630 } 631 632 /* 633 * If requested, try removing inode or removal dependencies. 634 */ 635 if (req_clear_inodedeps) { 636 clear_inodedeps(td); 637 req_clear_inodedeps -= 1; 638 wakeup_one(&proc_waiting); 639 } 640 if (req_clear_remove) { 641 clear_remove(td); 642 req_clear_remove -= 1; 643 wakeup_one(&proc_waiting); 644 } 645 /* 646 * We do not generally want to stop for buffer space, but if 647 * we are really being a buffer hog, we will stop and wait. 648 */ 649 if (loopcount++ % 128 == 0) 650 bwillwrite(); 651 /* 652 * Never allow processing to run for more than one 653 * second. Otherwise the other syncer tasks may get 654 * excessively backlogged. 655 */ 656 if (starttime != time_second && matchmnt == NULL) { 657 matchcnt = -1; 658 break; 659 } 660 } 661 if (matchmnt == NULL) { 662 softdep_worklist_busy -= 1; 663 if (softdep_worklist_req && softdep_worklist_busy == 0) 664 wakeup(&softdep_worklist_req); 665 } 666 return (matchcnt); 667 } 668 669 /* 670 * Process one item on the worklist. 671 */ 672 static int 673 process_worklist_item(matchmnt, flags) 674 struct mount *matchmnt; 675 int flags; 676 { 677 struct worklist *wk; 678 struct mount *mp; 679 struct vnode *vp; 680 int matchcnt = 0; 681 682 ACQUIRE_LOCK(&lk); 683 /* 684 * Normally we just process each item on the worklist in order. 685 * However, if we are in a situation where we cannot lock any 686 * inodes, we have to skip over any dirrem requests whose 687 * vnodes are resident and locked. 688 */ 689 vp = NULL; 690 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 691 if (wk->wk_state & INPROGRESS) 692 continue; 693 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 694 break; 695 wk->wk_state |= INPROGRESS; 696 FREE_LOCK(&lk); 697 VFS_VGET(WK_DIRREM(wk)->dm_mnt, WK_DIRREM(wk)->dm_oldinum, 698 LK_NOWAIT | LK_EXCLUSIVE, &vp); 699 ACQUIRE_LOCK(&lk); 700 wk->wk_state &= ~INPROGRESS; 701 if (vp != NULL) 702 break; 703 } 704 if (wk == 0) { 705 FREE_LOCK(&lk); 706 return (-1); 707 } 708 WORKLIST_REMOVE(wk); 709 num_on_worklist -= 1; 710 FREE_LOCK(&lk); 711 switch (wk->wk_type) { 712 713 case D_DIRREM: 714 /* removal of a directory entry */ 715 mp = WK_DIRREM(wk)->dm_mnt; 716 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 717 panic("%s: dirrem on suspended filesystem", 718 "process_worklist_item"); 719 if (mp == matchmnt) 720 matchcnt += 1; 721 handle_workitem_remove(WK_DIRREM(wk), vp); 722 break; 723 724 case D_FREEBLKS: 725 /* releasing blocks and/or fragments from a file */ 726 mp = WK_FREEBLKS(wk)->fb_mnt; 727 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 728 panic("%s: freeblks on suspended filesystem", 729 "process_worklist_item"); 730 if (mp == matchmnt) 731 matchcnt += 1; 732 handle_workitem_freeblocks(WK_FREEBLKS(wk), flags & LK_NOWAIT); 733 break; 734 735 case D_FREEFRAG: 736 /* releasing a fragment when replaced as a file grows */ 737 mp = WK_FREEFRAG(wk)->ff_mnt; 738 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 739 panic("%s: freefrag on suspended filesystem", 740 "process_worklist_item"); 741 if (mp == matchmnt) 742 matchcnt += 1; 743 handle_workitem_freefrag(WK_FREEFRAG(wk)); 744 break; 745 746 case D_FREEFILE: 747 /* releasing an inode when its link count drops to 0 */ 748 mp = WK_FREEFILE(wk)->fx_mnt; 749 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 750 panic("%s: freefile on suspended filesystem", 751 "process_worklist_item"); 752 if (mp == matchmnt) 753 matchcnt += 1; 754 handle_workitem_freefile(WK_FREEFILE(wk)); 755 break; 756 757 default: 758 panic("%s_process_worklist: Unknown type %s", 759 "softdep", TYPENAME(wk->wk_type)); 760 /* NOTREACHED */ 761 } 762 return (matchcnt); 763 } 764 765 /* 766 * Move dependencies from one buffer to another. 767 */ 768 static void 769 softdep_move_dependencies(oldbp, newbp) 770 struct buf *oldbp; 771 struct buf *newbp; 772 { 773 struct worklist *wk, *wktail; 774 775 if (LIST_FIRST(&newbp->b_dep) != NULL) 776 panic("softdep_move_dependencies: need merge code"); 777 wktail = 0; 778 ACQUIRE_LOCK(&lk); 779 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 780 LIST_REMOVE(wk, wk_list); 781 if (wktail == 0) 782 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 783 else 784 LIST_INSERT_AFTER(wktail, wk, wk_list); 785 wktail = wk; 786 } 787 FREE_LOCK(&lk); 788 } 789 790 /* 791 * Purge the work list of all items associated with a particular mount point. 792 */ 793 int 794 softdep_flushworklist(oldmnt, countp, td) 795 struct mount *oldmnt; 796 int *countp; 797 struct thread *td; 798 { 799 struct vnode *devvp; 800 int count, error = 0; 801 802 /* 803 * Await our turn to clear out the queue, then serialize access. 804 */ 805 while (softdep_worklist_busy) { 806 softdep_worklist_req += 1; 807 tsleep(&softdep_worklist_req, PRIBIO, "softflush", 0); 808 softdep_worklist_req -= 1; 809 } 810 softdep_worklist_busy = -1; 811 /* 812 * Alternately flush the block device associated with the mount 813 * point and process any dependencies that the flushing 814 * creates. We continue until no more worklist dependencies 815 * are found. 816 */ 817 *countp = 0; 818 devvp = VFSTOUFS(oldmnt)->um_devvp; 819 while ((count = softdep_process_worklist(oldmnt)) > 0) { 820 *countp += count; 821 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td); 822 error = VOP_FSYNC(devvp, td->td_ucred, MNT_WAIT, td); 823 VOP_UNLOCK(devvp, 0, td); 824 if (error) 825 break; 826 } 827 softdep_worklist_busy = 0; 828 if (softdep_worklist_req) 829 wakeup(&softdep_worklist_req); 830 return (error); 831 } 832 833 /* 834 * Flush all vnodes and worklist items associated with a specified mount point. 835 */ 836 int 837 softdep_flushfiles(oldmnt, flags, td) 838 struct mount *oldmnt; 839 int flags; 840 struct thread *td; 841 { 842 int error, count, loopcnt; 843 844 error = 0; 845 846 /* 847 * Alternately flush the vnodes associated with the mount 848 * point and process any dependencies that the flushing 849 * creates. In theory, this loop can happen at most twice, 850 * but we give it a few extra just to be sure. 851 */ 852 for (loopcnt = 10; loopcnt > 0; loopcnt--) { 853 /* 854 * Do another flush in case any vnodes were brought in 855 * as part of the cleanup operations. 856 */ 857 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) 858 break; 859 if ((error = softdep_flushworklist(oldmnt, &count, td)) != 0 || 860 count == 0) 861 break; 862 } 863 /* 864 * If we are unmounting then it is an error to fail. If we 865 * are simply trying to downgrade to read-only, then filesystem 866 * activity can keep us busy forever, so we just fail with EBUSY. 867 */ 868 if (loopcnt == 0) { 869 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 870 panic("softdep_flushfiles: looping"); 871 error = EBUSY; 872 } 873 return (error); 874 } 875 876 /* 877 * Structure hashing. 878 * 879 * There are three types of structures that can be looked up: 880 * 1) pagedep structures identified by mount point, inode number, 881 * and logical block. 882 * 2) inodedep structures identified by mount point and inode number. 883 * 3) newblk structures identified by mount point and 884 * physical block number. 885 * 886 * The "pagedep" and "inodedep" dependency structures are hashed 887 * separately from the file blocks and inodes to which they correspond. 888 * This separation helps when the in-memory copy of an inode or 889 * file block must be replaced. It also obviates the need to access 890 * an inode or file page when simply updating (or de-allocating) 891 * dependency structures. Lookup of newblk structures is needed to 892 * find newly allocated blocks when trying to associate them with 893 * their allocdirect or allocindir structure. 894 * 895 * The lookup routines optionally create and hash a new instance when 896 * an existing entry is not found. 897 */ 898 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 899 #define NODELAY 0x0002 /* cannot do background work */ 900 901 /* 902 * Structures and routines associated with pagedep caching. 903 */ 904 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 905 u_long pagedep_hash; /* size of hash table - 1 */ 906 #define PAGEDEP_HASH(mp, inum, lbn) \ 907 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 908 pagedep_hash]) 909 static struct sema pagedep_in_progress; 910 911 /* 912 * Look up a pagedep. Return 1 if found, 0 if not found or found 913 * when asked to allocate but not associated with any buffer. 914 * If not found, allocate if DEPALLOC flag is passed. 915 * Found or allocated entry is returned in pagedeppp. 916 * This routine must be called with splbio interrupts blocked. 917 */ 918 static int 919 pagedep_lookup(ip, lbn, flags, pagedeppp) 920 struct inode *ip; 921 ufs_lbn_t lbn; 922 int flags; 923 struct pagedep **pagedeppp; 924 { 925 struct pagedep *pagedep; 926 struct pagedep_hashhead *pagedephd; 927 struct mount *mp; 928 int i; 929 930 #ifdef DEBUG 931 if (lk.lkt_held == NOHOLDER) 932 panic("pagedep_lookup: lock not held"); 933 #endif 934 mp = ITOV(ip)->v_mount; 935 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 936 top: 937 LIST_FOREACH(pagedep, pagedephd, pd_hash) 938 if (ip->i_number == pagedep->pd_ino && 939 lbn == pagedep->pd_lbn && 940 mp == pagedep->pd_mnt) 941 break; 942 if (pagedep) { 943 *pagedeppp = pagedep; 944 if ((flags & DEPALLOC) != 0 && 945 (pagedep->pd_state & ONWORKLIST) == 0) 946 return (0); 947 return (1); 948 } 949 if ((flags & DEPALLOC) == 0) { 950 *pagedeppp = NULL; 951 return (0); 952 } 953 if (sema_get(&pagedep_in_progress, &lk) == 0) { 954 ACQUIRE_LOCK(&lk); 955 goto top; 956 } 957 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 958 M_SOFTDEP_FLAGS|M_ZERO); 959 pagedep->pd_list.wk_type = D_PAGEDEP; 960 pagedep->pd_mnt = mp; 961 pagedep->pd_ino = ip->i_number; 962 pagedep->pd_lbn = lbn; 963 LIST_INIT(&pagedep->pd_dirremhd); 964 LIST_INIT(&pagedep->pd_pendinghd); 965 for (i = 0; i < DAHASHSZ; i++) 966 LIST_INIT(&pagedep->pd_diraddhd[i]); 967 ACQUIRE_LOCK(&lk); 968 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 969 sema_release(&pagedep_in_progress); 970 *pagedeppp = pagedep; 971 return (0); 972 } 973 974 /* 975 * Structures and routines associated with inodedep caching. 976 */ 977 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 978 static u_long inodedep_hash; /* size of hash table - 1 */ 979 static long num_inodedep; /* number of inodedep allocated */ 980 #define INODEDEP_HASH(fs, inum) \ 981 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 982 static struct sema inodedep_in_progress; 983 984 /* 985 * Look up a inodedep. Return 1 if found, 0 if not found. 986 * If not found, allocate if DEPALLOC flag is passed. 987 * Found or allocated entry is returned in inodedeppp. 988 * This routine must be called with splbio interrupts blocked. 989 */ 990 static int 991 inodedep_lookup(fs, inum, flags, inodedeppp) 992 struct fs *fs; 993 ino_t inum; 994 int flags; 995 struct inodedep **inodedeppp; 996 { 997 struct inodedep *inodedep; 998 struct inodedep_hashhead *inodedephd; 999 int firsttry; 1000 1001 #ifdef DEBUG 1002 if (lk.lkt_held == NOHOLDER) 1003 panic("inodedep_lookup: lock not held"); 1004 #endif 1005 firsttry = 1; 1006 inodedephd = INODEDEP_HASH(fs, inum); 1007 top: 1008 LIST_FOREACH(inodedep, inodedephd, id_hash) 1009 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 1010 break; 1011 if (inodedep) { 1012 *inodedeppp = inodedep; 1013 return (1); 1014 } 1015 if ((flags & DEPALLOC) == 0) { 1016 *inodedeppp = NULL; 1017 return (0); 1018 } 1019 /* 1020 * If we are over our limit, try to improve the situation. 1021 */ 1022 if (num_inodedep > max_softdeps && firsttry && (flags & NODELAY) == 0 && 1023 request_cleanup(FLUSH_INODES, 1)) { 1024 firsttry = 0; 1025 goto top; 1026 } 1027 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1028 ACQUIRE_LOCK(&lk); 1029 goto top; 1030 } 1031 num_inodedep += 1; 1032 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1033 M_INODEDEP, M_SOFTDEP_FLAGS); 1034 inodedep->id_list.wk_type = D_INODEDEP; 1035 inodedep->id_fs = fs; 1036 inodedep->id_ino = inum; 1037 inodedep->id_state = ALLCOMPLETE; 1038 inodedep->id_nlinkdelta = 0; 1039 inodedep->id_savedino = NULL; 1040 inodedep->id_savedsize = -1; 1041 inodedep->id_buf = NULL; 1042 LIST_INIT(&inodedep->id_pendinghd); 1043 LIST_INIT(&inodedep->id_inowait); 1044 LIST_INIT(&inodedep->id_bufwait); 1045 TAILQ_INIT(&inodedep->id_inoupdt); 1046 TAILQ_INIT(&inodedep->id_newinoupdt); 1047 ACQUIRE_LOCK(&lk); 1048 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1049 sema_release(&inodedep_in_progress); 1050 *inodedeppp = inodedep; 1051 return (0); 1052 } 1053 1054 /* 1055 * Structures and routines associated with newblk caching. 1056 */ 1057 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1058 u_long newblk_hash; /* size of hash table - 1 */ 1059 #define NEWBLK_HASH(fs, inum) \ 1060 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1061 static struct sema newblk_in_progress; 1062 1063 /* 1064 * Look up a newblk. Return 1 if found, 0 if not found. 1065 * If not found, allocate if DEPALLOC flag is passed. 1066 * Found or allocated entry is returned in newblkpp. 1067 */ 1068 static int 1069 newblk_lookup(fs, newblkno, flags, newblkpp) 1070 struct fs *fs; 1071 ufs_daddr_t newblkno; 1072 int flags; 1073 struct newblk **newblkpp; 1074 { 1075 struct newblk *newblk; 1076 struct newblk_hashhead *newblkhd; 1077 1078 newblkhd = NEWBLK_HASH(fs, newblkno); 1079 top: 1080 LIST_FOREACH(newblk, newblkhd, nb_hash) 1081 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1082 break; 1083 if (newblk) { 1084 *newblkpp = newblk; 1085 return (1); 1086 } 1087 if ((flags & DEPALLOC) == 0) { 1088 *newblkpp = NULL; 1089 return (0); 1090 } 1091 if (sema_get(&newblk_in_progress, 0) == 0) 1092 goto top; 1093 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1094 M_NEWBLK, M_SOFTDEP_FLAGS); 1095 newblk->nb_state = 0; 1096 newblk->nb_fs = fs; 1097 newblk->nb_newblkno = newblkno; 1098 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1099 sema_release(&newblk_in_progress); 1100 *newblkpp = newblk; 1101 return (0); 1102 } 1103 1104 /* 1105 * Executed during filesystem system initialization before 1106 * mounting any file systems. 1107 */ 1108 void 1109 softdep_initialize() 1110 { 1111 1112 LIST_INIT(&mkdirlisthd); 1113 LIST_INIT(&softdep_workitem_pending); 1114 max_softdeps = min(desiredvnodes * 8, 1115 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep))); 1116 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1117 &pagedep_hash); 1118 sema_init(&pagedep_in_progress, "pagedep", PRIBIO, 0); 1119 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1120 sema_init(&inodedep_in_progress, "inodedep", PRIBIO, 0); 1121 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1122 sema_init(&newblk_in_progress, "newblk", PRIBIO, 0); 1123 1124 /* initialise bioops hack */ 1125 bioops.io_start = softdep_disk_io_initiation; 1126 bioops.io_complete = softdep_disk_write_complete; 1127 bioops.io_deallocate = softdep_deallocate_dependencies; 1128 bioops.io_movedeps = softdep_move_dependencies; 1129 bioops.io_countdeps = softdep_count_dependencies; 1130 } 1131 1132 /* 1133 * Called at mount time to notify the dependency code that a 1134 * filesystem wishes to use it. 1135 */ 1136 int 1137 softdep_mount(devvp, mp, fs, cred) 1138 struct vnode *devvp; 1139 struct mount *mp; 1140 struct fs *fs; 1141 struct ucred *cred; 1142 { 1143 struct csum cstotal; 1144 struct cg *cgp; 1145 struct buf *bp; 1146 int error, cyl; 1147 1148 mp->mnt_flag &= ~MNT_ASYNC; 1149 mp->mnt_flag |= MNT_SOFTDEP; 1150 /* 1151 * When doing soft updates, the counters in the 1152 * superblock may have gotten out of sync, so we have 1153 * to scan the cylinder groups and recalculate them. 1154 */ 1155 if (fs->fs_clean != 0) 1156 return (0); 1157 bzero(&cstotal, sizeof cstotal); 1158 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1159 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 1160 fs->fs_cgsize, cred, &bp)) != 0) { 1161 brelse(bp); 1162 return (error); 1163 } 1164 cgp = (struct cg *)bp->b_data; 1165 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1166 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1167 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1168 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1169 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1170 brelse(bp); 1171 } 1172 #ifdef DEBUG 1173 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1174 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 1175 #endif 1176 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1177 return (0); 1178 } 1179 1180 /* 1181 * Protecting the freemaps (or bitmaps). 1182 * 1183 * To eliminate the need to execute fsck before mounting a file system 1184 * after a power failure, one must (conservatively) guarantee that the 1185 * on-disk copy of the bitmaps never indicate that a live inode or block is 1186 * free. So, when a block or inode is allocated, the bitmap should be 1187 * updated (on disk) before any new pointers. When a block or inode is 1188 * freed, the bitmap should not be updated until all pointers have been 1189 * reset. The latter dependency is handled by the delayed de-allocation 1190 * approach described below for block and inode de-allocation. The former 1191 * dependency is handled by calling the following procedure when a block or 1192 * inode is allocated. When an inode is allocated an "inodedep" is created 1193 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1194 * Each "inodedep" is also inserted into the hash indexing structure so 1195 * that any additional link additions can be made dependent on the inode 1196 * allocation. 1197 * 1198 * The ufs file system maintains a number of free block counts (e.g., per 1199 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1200 * in addition to the bitmaps. These counts are used to improve efficiency 1201 * during allocation and therefore must be consistent with the bitmaps. 1202 * There is no convenient way to guarantee post-crash consistency of these 1203 * counts with simple update ordering, for two main reasons: (1) The counts 1204 * and bitmaps for a single cylinder group block are not in the same disk 1205 * sector. If a disk write is interrupted (e.g., by power failure), one may 1206 * be written and the other not. (2) Some of the counts are located in the 1207 * superblock rather than the cylinder group block. So, we focus our soft 1208 * updates implementation on protecting the bitmaps. When mounting a 1209 * filesystem, we recompute the auxiliary counts from the bitmaps. 1210 */ 1211 1212 /* 1213 * Called just after updating the cylinder group block to allocate an inode. 1214 */ 1215 void 1216 softdep_setup_inomapdep(bp, ip, newinum) 1217 struct buf *bp; /* buffer for cylgroup block with inode map */ 1218 struct inode *ip; /* inode related to allocation */ 1219 ino_t newinum; /* new inode number being allocated */ 1220 { 1221 struct inodedep *inodedep; 1222 struct bmsafemap *bmsafemap; 1223 1224 /* 1225 * Create a dependency for the newly allocated inode. 1226 * Panic if it already exists as something is seriously wrong. 1227 * Otherwise add it to the dependency list for the buffer holding 1228 * the cylinder group map from which it was allocated. 1229 */ 1230 ACQUIRE_LOCK(&lk); 1231 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1232 FREE_LOCK(&lk); 1233 panic("softdep_setup_inomapdep: found inode"); 1234 } 1235 inodedep->id_buf = bp; 1236 inodedep->id_state &= ~DEPCOMPLETE; 1237 bmsafemap = bmsafemap_lookup(bp); 1238 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1239 FREE_LOCK(&lk); 1240 } 1241 1242 /* 1243 * Called just after updating the cylinder group block to 1244 * allocate block or fragment. 1245 */ 1246 void 1247 softdep_setup_blkmapdep(bp, fs, newblkno) 1248 struct buf *bp; /* buffer for cylgroup block with block map */ 1249 struct fs *fs; /* filesystem doing allocation */ 1250 ufs_daddr_t newblkno; /* number of newly allocated block */ 1251 { 1252 struct newblk *newblk; 1253 struct bmsafemap *bmsafemap; 1254 1255 /* 1256 * Create a dependency for the newly allocated block. 1257 * Add it to the dependency list for the buffer holding 1258 * the cylinder group map from which it was allocated. 1259 */ 1260 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1261 panic("softdep_setup_blkmapdep: found block"); 1262 ACQUIRE_LOCK(&lk); 1263 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1264 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1265 FREE_LOCK(&lk); 1266 } 1267 1268 /* 1269 * Find the bmsafemap associated with a cylinder group buffer. 1270 * If none exists, create one. The buffer must be locked when 1271 * this routine is called and this routine must be called with 1272 * splbio interrupts blocked. 1273 */ 1274 static struct bmsafemap * 1275 bmsafemap_lookup(bp) 1276 struct buf *bp; 1277 { 1278 struct bmsafemap *bmsafemap; 1279 struct worklist *wk; 1280 1281 #ifdef DEBUG 1282 if (lk.lkt_held == NOHOLDER) 1283 panic("bmsafemap_lookup: lock not held"); 1284 #endif 1285 LIST_FOREACH(wk, &bp->b_dep, wk_list) 1286 if (wk->wk_type == D_BMSAFEMAP) 1287 return (WK_BMSAFEMAP(wk)); 1288 FREE_LOCK(&lk); 1289 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1290 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1291 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1292 bmsafemap->sm_list.wk_state = 0; 1293 bmsafemap->sm_buf = bp; 1294 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1295 LIST_INIT(&bmsafemap->sm_allocindirhd); 1296 LIST_INIT(&bmsafemap->sm_inodedephd); 1297 LIST_INIT(&bmsafemap->sm_newblkhd); 1298 ACQUIRE_LOCK(&lk); 1299 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 1300 return (bmsafemap); 1301 } 1302 1303 /* 1304 * Direct block allocation dependencies. 1305 * 1306 * When a new block is allocated, the corresponding disk locations must be 1307 * initialized (with zeros or new data) before the on-disk inode points to 1308 * them. Also, the freemap from which the block was allocated must be 1309 * updated (on disk) before the inode's pointer. These two dependencies are 1310 * independent of each other and are needed for all file blocks and indirect 1311 * blocks that are pointed to directly by the inode. Just before the 1312 * "in-core" version of the inode is updated with a newly allocated block 1313 * number, a procedure (below) is called to setup allocation dependency 1314 * structures. These structures are removed when the corresponding 1315 * dependencies are satisfied or when the block allocation becomes obsolete 1316 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1317 * fragment that gets upgraded). All of these cases are handled in 1318 * procedures described later. 1319 * 1320 * When a file extension causes a fragment to be upgraded, either to a larger 1321 * fragment or to a full block, the on-disk location may change (if the 1322 * previous fragment could not simply be extended). In this case, the old 1323 * fragment must be de-allocated, but not until after the inode's pointer has 1324 * been updated. In most cases, this is handled by later procedures, which 1325 * will construct a "freefrag" structure to be added to the workitem queue 1326 * when the inode update is complete (or obsolete). The main exception to 1327 * this is when an allocation occurs while a pending allocation dependency 1328 * (for the same block pointer) remains. This case is handled in the main 1329 * allocation dependency setup procedure by immediately freeing the 1330 * unreferenced fragments. 1331 */ 1332 void 1333 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1334 struct inode *ip; /* inode to which block is being added */ 1335 ufs_lbn_t lbn; /* block pointer within inode */ 1336 ufs_daddr_t newblkno; /* disk block number being added */ 1337 ufs_daddr_t oldblkno; /* previous block number, 0 unless frag */ 1338 long newsize; /* size of new block */ 1339 long oldsize; /* size of new block */ 1340 struct buf *bp; /* bp for allocated block */ 1341 { 1342 struct allocdirect *adp, *oldadp; 1343 struct allocdirectlst *adphead; 1344 struct bmsafemap *bmsafemap; 1345 struct inodedep *inodedep; 1346 struct pagedep *pagedep; 1347 struct newblk *newblk; 1348 1349 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1350 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1351 adp->ad_list.wk_type = D_ALLOCDIRECT; 1352 adp->ad_lbn = lbn; 1353 adp->ad_newblkno = newblkno; 1354 adp->ad_oldblkno = oldblkno; 1355 adp->ad_newsize = newsize; 1356 adp->ad_oldsize = oldsize; 1357 adp->ad_state = ATTACHED; 1358 LIST_INIT(&adp->ad_newdirblk); 1359 if (newblkno == oldblkno) 1360 adp->ad_freefrag = NULL; 1361 else 1362 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1363 1364 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1365 panic("softdep_setup_allocdirect: lost block"); 1366 1367 ACQUIRE_LOCK(&lk); 1368 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1369 adp->ad_inodedep = inodedep; 1370 1371 if (newblk->nb_state == DEPCOMPLETE) { 1372 adp->ad_state |= DEPCOMPLETE; 1373 adp->ad_buf = NULL; 1374 } else { 1375 bmsafemap = newblk->nb_bmsafemap; 1376 adp->ad_buf = bmsafemap->sm_buf; 1377 LIST_REMOVE(newblk, nb_deps); 1378 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1379 } 1380 LIST_REMOVE(newblk, nb_hash); 1381 FREE(newblk, M_NEWBLK); 1382 1383 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1384 if (lbn >= NDADDR) { 1385 /* allocating an indirect block */ 1386 if (oldblkno != 0) { 1387 FREE_LOCK(&lk); 1388 panic("softdep_setup_allocdirect: non-zero indir"); 1389 } 1390 } else { 1391 /* 1392 * Allocating a direct block. 1393 * 1394 * If we are allocating a directory block, then we must 1395 * allocate an associated pagedep to track additions and 1396 * deletions. 1397 */ 1398 if ((ip->i_mode & IFMT) == IFDIR && 1399 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1400 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 1401 } 1402 /* 1403 * The list of allocdirects must be kept in sorted and ascending 1404 * order so that the rollback routines can quickly determine the 1405 * first uncommitted block (the size of the file stored on disk 1406 * ends at the end of the lowest committed fragment, or if there 1407 * are no fragments, at the end of the highest committed block). 1408 * Since files generally grow, the typical case is that the new 1409 * block is to be added at the end of the list. We speed this 1410 * special case by checking against the last allocdirect in the 1411 * list before laboriously traversing the list looking for the 1412 * insertion point. 1413 */ 1414 adphead = &inodedep->id_newinoupdt; 1415 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1416 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1417 /* insert at end of list */ 1418 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1419 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1420 allocdirect_merge(adphead, adp, oldadp); 1421 FREE_LOCK(&lk); 1422 return; 1423 } 1424 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1425 if (oldadp->ad_lbn >= lbn) 1426 break; 1427 } 1428 if (oldadp == NULL) { 1429 FREE_LOCK(&lk); 1430 panic("softdep_setup_allocdirect: lost entry"); 1431 } 1432 /* insert in middle of list */ 1433 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1434 if (oldadp->ad_lbn == lbn) 1435 allocdirect_merge(adphead, adp, oldadp); 1436 FREE_LOCK(&lk); 1437 } 1438 1439 /* 1440 * Replace an old allocdirect dependency with a newer one. 1441 * This routine must be called with splbio interrupts blocked. 1442 */ 1443 static void 1444 allocdirect_merge(adphead, newadp, oldadp) 1445 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 1446 struct allocdirect *newadp; /* allocdirect being added */ 1447 struct allocdirect *oldadp; /* existing allocdirect being checked */ 1448 { 1449 struct worklist *wk; 1450 struct freefrag *freefrag; 1451 struct newdirblk *newdirblk; 1452 1453 #ifdef DEBUG 1454 if (lk.lkt_held == NOHOLDER) 1455 panic("allocdirect_merge: lock not held"); 1456 #endif 1457 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1458 newadp->ad_oldsize != oldadp->ad_newsize || 1459 newadp->ad_lbn >= NDADDR) { 1460 FREE_LOCK(&lk); 1461 panic("allocdirect_merge: old %d != new %d || lbn %ld >= %d", 1462 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn, 1463 NDADDR); 1464 } 1465 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1466 newadp->ad_oldsize = oldadp->ad_oldsize; 1467 /* 1468 * If the old dependency had a fragment to free or had never 1469 * previously had a block allocated, then the new dependency 1470 * can immediately post its freefrag and adopt the old freefrag. 1471 * This action is done by swapping the freefrag dependencies. 1472 * The new dependency gains the old one's freefrag, and the 1473 * old one gets the new one and then immediately puts it on 1474 * the worklist when it is freed by free_allocdirect. It is 1475 * not possible to do this swap when the old dependency had a 1476 * non-zero size but no previous fragment to free. This condition 1477 * arises when the new block is an extension of the old block. 1478 * Here, the first part of the fragment allocated to the new 1479 * dependency is part of the block currently claimed on disk by 1480 * the old dependency, so cannot legitimately be freed until the 1481 * conditions for the new dependency are fulfilled. 1482 */ 1483 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1484 freefrag = newadp->ad_freefrag; 1485 newadp->ad_freefrag = oldadp->ad_freefrag; 1486 oldadp->ad_freefrag = freefrag; 1487 } 1488 /* 1489 * If we are tracking a new directory-block allocation, 1490 * move it from the old allocdirect to the new allocdirect. 1491 */ 1492 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 1493 newdirblk = WK_NEWDIRBLK(wk); 1494 WORKLIST_REMOVE(&newdirblk->db_list); 1495 if (LIST_FIRST(&oldadp->ad_newdirblk) != NULL) 1496 panic("allocdirect_merge: extra newdirblk"); 1497 WORKLIST_INSERT(&newadp->ad_newdirblk, &newdirblk->db_list); 1498 } 1499 free_allocdirect(adphead, oldadp, 0); 1500 } 1501 1502 /* 1503 * Allocate a new freefrag structure if needed. 1504 */ 1505 static struct freefrag * 1506 newfreefrag(ip, blkno, size) 1507 struct inode *ip; 1508 ufs_daddr_t blkno; 1509 long size; 1510 { 1511 struct freefrag *freefrag; 1512 struct fs *fs; 1513 1514 if (blkno == 0) 1515 return (NULL); 1516 fs = ip->i_fs; 1517 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1518 panic("newfreefrag: frag size"); 1519 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1520 M_FREEFRAG, M_SOFTDEP_FLAGS); 1521 freefrag->ff_list.wk_type = D_FREEFRAG; 1522 freefrag->ff_state = 0; 1523 freefrag->ff_inum = ip->i_number; 1524 freefrag->ff_mnt = ITOV(ip)->v_mount; 1525 freefrag->ff_devvp = ip->i_devvp; 1526 freefrag->ff_blkno = blkno; 1527 freefrag->ff_fragsize = size; 1528 return (freefrag); 1529 } 1530 1531 /* 1532 * This workitem de-allocates fragments that were replaced during 1533 * file block allocation. 1534 */ 1535 static void 1536 handle_workitem_freefrag(freefrag) 1537 struct freefrag *freefrag; 1538 { 1539 1540 ffs_blkfree(VFSTOUFS(freefrag->ff_mnt)->um_fs, freefrag->ff_devvp, 1541 freefrag->ff_blkno, freefrag->ff_fragsize, freefrag->ff_inum); 1542 FREE(freefrag, M_FREEFRAG); 1543 } 1544 1545 /* 1546 * Indirect block allocation dependencies. 1547 * 1548 * The same dependencies that exist for a direct block also exist when 1549 * a new block is allocated and pointed to by an entry in a block of 1550 * indirect pointers. The undo/redo states described above are also 1551 * used here. Because an indirect block contains many pointers that 1552 * may have dependencies, a second copy of the entire in-memory indirect 1553 * block is kept. The buffer cache copy is always completely up-to-date. 1554 * The second copy, which is used only as a source for disk writes, 1555 * contains only the safe pointers (i.e., those that have no remaining 1556 * update dependencies). The second copy is freed when all pointers 1557 * are safe. The cache is not allowed to replace indirect blocks with 1558 * pending update dependencies. If a buffer containing an indirect 1559 * block with dependencies is written, these routines will mark it 1560 * dirty again. It can only be successfully written once all the 1561 * dependencies are removed. The ffs_fsync routine in conjunction with 1562 * softdep_sync_metadata work together to get all the dependencies 1563 * removed so that a file can be successfully written to disk. Three 1564 * procedures are used when setting up indirect block pointer 1565 * dependencies. The division is necessary because of the organization 1566 * of the "balloc" routine and because of the distinction between file 1567 * pages and file metadata blocks. 1568 */ 1569 1570 /* 1571 * Allocate a new allocindir structure. 1572 */ 1573 static struct allocindir * 1574 newallocindir(ip, ptrno, newblkno, oldblkno) 1575 struct inode *ip; /* inode for file being extended */ 1576 int ptrno; /* offset of pointer in indirect block */ 1577 ufs_daddr_t newblkno; /* disk block number being added */ 1578 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1579 { 1580 struct allocindir *aip; 1581 1582 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1583 M_ALLOCINDIR, M_SOFTDEP_FLAGS|M_ZERO); 1584 aip->ai_list.wk_type = D_ALLOCINDIR; 1585 aip->ai_state = ATTACHED; 1586 aip->ai_offset = ptrno; 1587 aip->ai_newblkno = newblkno; 1588 aip->ai_oldblkno = oldblkno; 1589 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1590 return (aip); 1591 } 1592 1593 /* 1594 * Called just before setting an indirect block pointer 1595 * to a newly allocated file page. 1596 */ 1597 void 1598 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 1599 struct inode *ip; /* inode for file being extended */ 1600 ufs_lbn_t lbn; /* allocated block number within file */ 1601 struct buf *bp; /* buffer with indirect blk referencing page */ 1602 int ptrno; /* offset of pointer in indirect block */ 1603 ufs_daddr_t newblkno; /* disk block number being added */ 1604 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1605 struct buf *nbp; /* buffer holding allocated page */ 1606 { 1607 struct allocindir *aip; 1608 struct pagedep *pagedep; 1609 1610 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1611 ACQUIRE_LOCK(&lk); 1612 /* 1613 * If we are allocating a directory page, then we must 1614 * allocate an associated pagedep to track additions and 1615 * deletions. 1616 */ 1617 if ((ip->i_mode & IFMT) == IFDIR && 1618 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1619 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list); 1620 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1621 FREE_LOCK(&lk); 1622 setup_allocindir_phase2(bp, ip, aip); 1623 } 1624 1625 /* 1626 * Called just before setting an indirect block pointer to a 1627 * newly allocated indirect block. 1628 */ 1629 void 1630 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 1631 struct buf *nbp; /* newly allocated indirect block */ 1632 struct inode *ip; /* inode for file being extended */ 1633 struct buf *bp; /* indirect block referencing allocated block */ 1634 int ptrno; /* offset of pointer in indirect block */ 1635 ufs_daddr_t newblkno; /* disk block number being added */ 1636 { 1637 struct allocindir *aip; 1638 1639 aip = newallocindir(ip, ptrno, newblkno, 0); 1640 ACQUIRE_LOCK(&lk); 1641 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1642 FREE_LOCK(&lk); 1643 setup_allocindir_phase2(bp, ip, aip); 1644 } 1645 1646 /* 1647 * Called to finish the allocation of the "aip" allocated 1648 * by one of the two routines above. 1649 */ 1650 static void 1651 setup_allocindir_phase2(bp, ip, aip) 1652 struct buf *bp; /* in-memory copy of the indirect block */ 1653 struct inode *ip; /* inode for file being extended */ 1654 struct allocindir *aip; /* allocindir allocated by the above routines */ 1655 { 1656 struct worklist *wk; 1657 struct indirdep *indirdep, *newindirdep; 1658 struct bmsafemap *bmsafemap; 1659 struct allocindir *oldaip; 1660 struct freefrag *freefrag; 1661 struct newblk *newblk; 1662 daddr_t blkno; 1663 1664 if (bp->b_lblkno >= 0) 1665 panic("setup_allocindir_phase2: not indir blk"); 1666 for (indirdep = NULL, newindirdep = NULL; ; ) { 1667 ACQUIRE_LOCK(&lk); 1668 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1669 if (wk->wk_type != D_INDIRDEP) 1670 continue; 1671 indirdep = WK_INDIRDEP(wk); 1672 break; 1673 } 1674 if (indirdep == NULL && newindirdep) { 1675 indirdep = newindirdep; 1676 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 1677 newindirdep = NULL; 1678 } 1679 FREE_LOCK(&lk); 1680 if (indirdep) { 1681 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1682 &newblk) == 0) 1683 panic("setup_allocindir: lost block"); 1684 ACQUIRE_LOCK(&lk); 1685 if (newblk->nb_state == DEPCOMPLETE) { 1686 aip->ai_state |= DEPCOMPLETE; 1687 aip->ai_buf = NULL; 1688 } else { 1689 bmsafemap = newblk->nb_bmsafemap; 1690 aip->ai_buf = bmsafemap->sm_buf; 1691 LIST_REMOVE(newblk, nb_deps); 1692 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1693 aip, ai_deps); 1694 } 1695 LIST_REMOVE(newblk, nb_hash); 1696 FREE(newblk, M_NEWBLK); 1697 aip->ai_indirdep = indirdep; 1698 /* 1699 * Check to see if there is an existing dependency 1700 * for this block. If there is, merge the old 1701 * dependency into the new one. 1702 */ 1703 if (aip->ai_oldblkno == 0) 1704 oldaip = NULL; 1705 else 1706 1707 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1708 if (oldaip->ai_offset == aip->ai_offset) 1709 break; 1710 freefrag = NULL; 1711 if (oldaip != NULL) { 1712 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1713 FREE_LOCK(&lk); 1714 panic("setup_allocindir_phase2: blkno"); 1715 } 1716 aip->ai_oldblkno = oldaip->ai_oldblkno; 1717 freefrag = aip->ai_freefrag; 1718 aip->ai_freefrag = oldaip->ai_freefrag; 1719 oldaip->ai_freefrag = NULL; 1720 free_allocindir(oldaip, NULL); 1721 } 1722 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1723 ((ufs_daddr_t *)indirdep->ir_savebp->b_data) 1724 [aip->ai_offset] = aip->ai_oldblkno; 1725 FREE_LOCK(&lk); 1726 if (freefrag != NULL) 1727 handle_workitem_freefrag(freefrag); 1728 } 1729 if (newindirdep) { 1730 if (indirdep->ir_savebp != NULL) 1731 brelse(newindirdep->ir_savebp); 1732 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1733 } 1734 if (indirdep) 1735 break; 1736 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1737 M_INDIRDEP, M_SOFTDEP_FLAGS); 1738 newindirdep->ir_list.wk_type = D_INDIRDEP; 1739 newindirdep->ir_state = ATTACHED; 1740 LIST_INIT(&newindirdep->ir_deplisthd); 1741 LIST_INIT(&newindirdep->ir_donehd); 1742 if (bp->b_blkno == bp->b_lblkno) { 1743 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, NULL, NULL); 1744 bp->b_blkno = blkno; 1745 } 1746 newindirdep->ir_savebp = 1747 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0); 1748 BUF_KERNPROC(newindirdep->ir_savebp); 1749 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1750 } 1751 } 1752 1753 /* 1754 * Block de-allocation dependencies. 1755 * 1756 * When blocks are de-allocated, the on-disk pointers must be nullified before 1757 * the blocks are made available for use by other files. (The true 1758 * requirement is that old pointers must be nullified before new on-disk 1759 * pointers are set. We chose this slightly more stringent requirement to 1760 * reduce complexity.) Our implementation handles this dependency by updating 1761 * the inode (or indirect block) appropriately but delaying the actual block 1762 * de-allocation (i.e., freemap and free space count manipulation) until 1763 * after the updated versions reach stable storage. After the disk is 1764 * updated, the blocks can be safely de-allocated whenever it is convenient. 1765 * This implementation handles only the common case of reducing a file's 1766 * length to zero. Other cases are handled by the conventional synchronous 1767 * write approach. 1768 * 1769 * The ffs implementation with which we worked double-checks 1770 * the state of the block pointers and file size as it reduces 1771 * a file's length. Some of this code is replicated here in our 1772 * soft updates implementation. The freeblks->fb_chkcnt field is 1773 * used to transfer a part of this information to the procedure 1774 * that eventually de-allocates the blocks. 1775 * 1776 * This routine should be called from the routine that shortens 1777 * a file's length, before the inode's size or block pointers 1778 * are modified. It will save the block pointer information for 1779 * later release and zero the inode so that the calling routine 1780 * can release it. 1781 */ 1782 void 1783 softdep_setup_freeblocks(ip, length) 1784 struct inode *ip; /* The inode whose length is to be reduced */ 1785 off_t length; /* The new length for the file */ 1786 { 1787 struct freeblks *freeblks; 1788 struct inodedep *inodedep; 1789 struct allocdirect *adp; 1790 struct vnode *vp; 1791 struct buf *bp; 1792 struct fs *fs; 1793 int i, delay, error; 1794 1795 fs = ip->i_fs; 1796 if (length != 0) 1797 panic("softdep_setup_freeblocks: non-zero length"); 1798 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1799 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 1800 freeblks->fb_list.wk_type = D_FREEBLKS; 1801 freeblks->fb_uid = ip->i_uid; 1802 freeblks->fb_previousinum = ip->i_number; 1803 freeblks->fb_devvp = ip->i_devvp; 1804 freeblks->fb_mnt = ITOV(ip)->v_mount; 1805 freeblks->fb_oldsize = ip->i_size; 1806 freeblks->fb_newsize = length; 1807 freeblks->fb_chkcnt = ip->i_blocks; 1808 for (i = 0; i < NDADDR; i++) { 1809 freeblks->fb_dblks[i] = ip->i_db[i]; 1810 ip->i_db[i] = 0; 1811 } 1812 for (i = 0; i < NIADDR; i++) { 1813 freeblks->fb_iblks[i] = ip->i_ib[i]; 1814 ip->i_ib[i] = 0; 1815 } 1816 ip->i_blocks = 0; 1817 ip->i_size = 0; 1818 /* 1819 * If the file was removed, then the space being freed was 1820 * accounted for then (see softdep_filereleased()). If the 1821 * file is merely being truncated, then we account for it now. 1822 */ 1823 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 1824 fs->fs_pendingblocks += freeblks->fb_chkcnt; 1825 /* 1826 * Push the zero'ed inode to to its disk buffer so that we are free 1827 * to delete its dependencies below. Once the dependencies are gone 1828 * the buffer can be safely released. 1829 */ 1830 if ((error = bread(ip->i_devvp, 1831 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 1832 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 1833 brelse(bp); 1834 softdep_error("softdep_setup_freeblocks", error); 1835 } 1836 *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = 1837 ip->i_din; 1838 /* 1839 * Find and eliminate any inode dependencies. 1840 */ 1841 ACQUIRE_LOCK(&lk); 1842 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 1843 if ((inodedep->id_state & IOSTARTED) != 0) { 1844 FREE_LOCK(&lk); 1845 panic("softdep_setup_freeblocks: inode busy"); 1846 } 1847 /* 1848 * Add the freeblks structure to the list of operations that 1849 * must await the zero'ed inode being written to disk. If we 1850 * still have a bitmap dependency (delay == 0), then the inode 1851 * has never been written to disk, so we can process the 1852 * freeblks below once we have deleted the dependencies. 1853 */ 1854 delay = (inodedep->id_state & DEPCOMPLETE); 1855 if (delay) 1856 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 1857 /* 1858 * Because the file length has been truncated to zero, any 1859 * pending block allocation dependency structures associated 1860 * with this inode are obsolete and can simply be de-allocated. 1861 * We must first merge the two dependency lists to get rid of 1862 * any duplicate freefrag structures, then purge the merged list. 1863 * If we still have a bitmap dependency, then the inode has never 1864 * been written to disk, so we can free any fragments without delay. 1865 */ 1866 merge_inode_lists(inodedep); 1867 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 1868 free_allocdirect(&inodedep->id_inoupdt, adp, delay); 1869 FREE_LOCK(&lk); 1870 bdwrite(bp); 1871 /* 1872 * We must wait for any I/O in progress to finish so that 1873 * all potential buffers on the dirty list will be visible. 1874 * Once they are all there, walk the list and get rid of 1875 * any dependencies. 1876 */ 1877 vp = ITOV(ip); 1878 ACQUIRE_LOCK(&lk); 1879 drain_output(vp, 1); 1880 while (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT)) { 1881 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 1882 (void) inodedep_lookup(fs, ip->i_number, 0, &inodedep); 1883 deallocate_dependencies(bp, inodedep); 1884 bp->b_flags |= B_INVAL | B_NOCACHE; 1885 FREE_LOCK(&lk); 1886 brelse(bp); 1887 ACQUIRE_LOCK(&lk); 1888 } 1889 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 1890 (void) free_inodedep(inodedep); 1891 FREE_LOCK(&lk); 1892 /* 1893 * If the inode has never been written to disk (delay == 0), 1894 * then we can process the freeblks now that we have deleted 1895 * the dependencies. 1896 */ 1897 if (!delay) 1898 handle_workitem_freeblocks(freeblks, 0); 1899 } 1900 1901 /* 1902 * Reclaim any dependency structures from a buffer that is about to 1903 * be reallocated to a new vnode. The buffer must be locked, thus, 1904 * no I/O completion operations can occur while we are manipulating 1905 * its associated dependencies. The mutex is held so that other I/O's 1906 * associated with related dependencies do not occur. 1907 */ 1908 static void 1909 deallocate_dependencies(bp, inodedep) 1910 struct buf *bp; 1911 struct inodedep *inodedep; 1912 { 1913 struct worklist *wk; 1914 struct indirdep *indirdep; 1915 struct allocindir *aip; 1916 struct pagedep *pagedep; 1917 struct dirrem *dirrem; 1918 struct diradd *dap; 1919 int i; 1920 1921 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 1922 switch (wk->wk_type) { 1923 1924 case D_INDIRDEP: 1925 indirdep = WK_INDIRDEP(wk); 1926 /* 1927 * None of the indirect pointers will ever be visible, 1928 * so they can simply be tossed. GOINGAWAY ensures 1929 * that allocated pointers will be saved in the buffer 1930 * cache until they are freed. Note that they will 1931 * only be able to be found by their physical address 1932 * since the inode mapping the logical address will 1933 * be gone. The save buffer used for the safe copy 1934 * was allocated in setup_allocindir_phase2 using 1935 * the physical address so it could be used for this 1936 * purpose. Hence we swap the safe copy with the real 1937 * copy, allowing the safe copy to be freed and holding 1938 * on to the real copy for later use in indir_trunc. 1939 */ 1940 if (indirdep->ir_state & GOINGAWAY) { 1941 FREE_LOCK(&lk); 1942 panic("deallocate_dependencies: already gone"); 1943 } 1944 indirdep->ir_state |= GOINGAWAY; 1945 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 1946 free_allocindir(aip, inodedep); 1947 if (bp->b_lblkno >= 0 || 1948 bp->b_blkno != indirdep->ir_savebp->b_lblkno) { 1949 FREE_LOCK(&lk); 1950 panic("deallocate_dependencies: not indir"); 1951 } 1952 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 1953 bp->b_bcount); 1954 WORKLIST_REMOVE(wk); 1955 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk); 1956 continue; 1957 1958 case D_PAGEDEP: 1959 pagedep = WK_PAGEDEP(wk); 1960 /* 1961 * None of the directory additions will ever be 1962 * visible, so they can simply be tossed. 1963 */ 1964 for (i = 0; i < DAHASHSZ; i++) 1965 while ((dap = 1966 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 1967 free_diradd(dap); 1968 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 1969 free_diradd(dap); 1970 /* 1971 * Copy any directory remove dependencies to the list 1972 * to be processed after the zero'ed inode is written. 1973 * If the inode has already been written, then they 1974 * can be dumped directly onto the work list. 1975 */ 1976 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 1977 LIST_REMOVE(dirrem, dm_next); 1978 dirrem->dm_dirinum = pagedep->pd_ino; 1979 if (inodedep == NULL || 1980 (inodedep->id_state & ALLCOMPLETE) == 1981 ALLCOMPLETE) 1982 add_to_worklist(&dirrem->dm_list); 1983 else 1984 WORKLIST_INSERT(&inodedep->id_bufwait, 1985 &dirrem->dm_list); 1986 } 1987 if ((pagedep->pd_state & NEWBLOCK) != 0) { 1988 LIST_FOREACH(wk, &inodedep->id_bufwait, wk_list) 1989 if (wk->wk_type == D_NEWDIRBLK && 1990 WK_NEWDIRBLK(wk)->db_pagedep == 1991 pagedep) 1992 break; 1993 if (wk != NULL) { 1994 WORKLIST_REMOVE(wk); 1995 free_newdirblk(WK_NEWDIRBLK(wk)); 1996 } else { 1997 FREE_LOCK(&lk); 1998 panic("deallocate_dependencies: " 1999 "lost pagedep"); 2000 } 2001 } 2002 WORKLIST_REMOVE(&pagedep->pd_list); 2003 LIST_REMOVE(pagedep, pd_hash); 2004 WORKITEM_FREE(pagedep, D_PAGEDEP); 2005 continue; 2006 2007 case D_ALLOCINDIR: 2008 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 2009 continue; 2010 2011 case D_ALLOCDIRECT: 2012 case D_INODEDEP: 2013 FREE_LOCK(&lk); 2014 panic("deallocate_dependencies: Unexpected type %s", 2015 TYPENAME(wk->wk_type)); 2016 /* NOTREACHED */ 2017 2018 default: 2019 FREE_LOCK(&lk); 2020 panic("deallocate_dependencies: Unknown type %s", 2021 TYPENAME(wk->wk_type)); 2022 /* NOTREACHED */ 2023 } 2024 } 2025 } 2026 2027 /* 2028 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2029 * This routine must be called with splbio interrupts blocked. 2030 */ 2031 static void 2032 free_allocdirect(adphead, adp, delay) 2033 struct allocdirectlst *adphead; 2034 struct allocdirect *adp; 2035 int delay; 2036 { 2037 struct newdirblk *newdirblk; 2038 struct worklist *wk; 2039 2040 #ifdef DEBUG 2041 if (lk.lkt_held == NOHOLDER) 2042 panic("free_allocdirect: lock not held"); 2043 #endif 2044 if ((adp->ad_state & DEPCOMPLETE) == 0) 2045 LIST_REMOVE(adp, ad_deps); 2046 TAILQ_REMOVE(adphead, adp, ad_next); 2047 if ((adp->ad_state & COMPLETE) == 0) 2048 WORKLIST_REMOVE(&adp->ad_list); 2049 if (adp->ad_freefrag != NULL) { 2050 if (delay) 2051 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2052 &adp->ad_freefrag->ff_list); 2053 else 2054 add_to_worklist(&adp->ad_freefrag->ff_list); 2055 } 2056 if ((wk = LIST_FIRST(&adp->ad_newdirblk)) != NULL) { 2057 newdirblk = WK_NEWDIRBLK(wk); 2058 WORKLIST_REMOVE(&newdirblk->db_list); 2059 if (LIST_FIRST(&adp->ad_newdirblk) != NULL) 2060 panic("free_allocdirect: extra newdirblk"); 2061 if (delay) 2062 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2063 &newdirblk->db_list); 2064 else 2065 free_newdirblk(newdirblk); 2066 } 2067 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2068 } 2069 2070 /* 2071 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 2072 * This routine must be called with splbio interrupts blocked. 2073 */ 2074 static void 2075 free_newdirblk(newdirblk) 2076 struct newdirblk *newdirblk; 2077 { 2078 struct pagedep *pagedep; 2079 struct diradd *dap; 2080 int i; 2081 2082 #ifdef DEBUG 2083 if (lk.lkt_held == NOHOLDER) 2084 panic("free_newdirblk: lock not held"); 2085 #endif 2086 /* 2087 * If the pagedep is still linked onto the directory buffer 2088 * dependency chain, then some of the entries on the 2089 * pd_pendinghd list may not be committed to disk yet. In 2090 * this case, we will simply clear the NEWBLOCK flag and 2091 * let the pd_pendinghd list be processed when the pagedep 2092 * is next written. If the pagedep is no longer on the buffer 2093 * dependency chain, then all the entries on the pd_pending 2094 * list are committed to disk and we can free them here. 2095 */ 2096 pagedep = newdirblk->db_pagedep; 2097 pagedep->pd_state &= ~NEWBLOCK; 2098 if ((pagedep->pd_state & ONWORKLIST) == 0) 2099 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 2100 free_diradd(dap); 2101 /* 2102 * If no dependencies remain, the pagedep will be freed. 2103 */ 2104 for (i = 0; i < DAHASHSZ; i++) 2105 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 2106 break; 2107 if (i == DAHASHSZ && (pagedep->pd_state & ONWORKLIST) == 0) { 2108 LIST_REMOVE(pagedep, pd_hash); 2109 WORKITEM_FREE(pagedep, D_PAGEDEP); 2110 } 2111 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2112 } 2113 2114 /* 2115 * Prepare an inode to be freed. The actual free operation is not 2116 * done until the zero'ed inode has been written to disk. 2117 */ 2118 void 2119 softdep_freefile(pvp, ino, mode) 2120 struct vnode *pvp; 2121 ino_t ino; 2122 int mode; 2123 { 2124 struct inode *ip = VTOI(pvp); 2125 struct inodedep *inodedep; 2126 struct freefile *freefile; 2127 2128 /* 2129 * This sets up the inode de-allocation dependency. 2130 */ 2131 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2132 M_FREEFILE, M_SOFTDEP_FLAGS); 2133 freefile->fx_list.wk_type = D_FREEFILE; 2134 freefile->fx_list.wk_state = 0; 2135 freefile->fx_mode = mode; 2136 freefile->fx_oldinum = ino; 2137 freefile->fx_devvp = ip->i_devvp; 2138 freefile->fx_mnt = ITOV(ip)->v_mount; 2139 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 2140 ip->i_fs->fs_pendinginodes += 1; 2141 2142 /* 2143 * If the inodedep does not exist, then the zero'ed inode has 2144 * been written to disk. If the allocated inode has never been 2145 * written to disk, then the on-disk inode is zero'ed. In either 2146 * case we can free the file immediately. 2147 */ 2148 ACQUIRE_LOCK(&lk); 2149 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2150 check_inode_unwritten(inodedep)) { 2151 FREE_LOCK(&lk); 2152 handle_workitem_freefile(freefile); 2153 return; 2154 } 2155 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2156 FREE_LOCK(&lk); 2157 } 2158 2159 /* 2160 * Check to see if an inode has never been written to disk. If 2161 * so free the inodedep and return success, otherwise return failure. 2162 * This routine must be called with splbio interrupts blocked. 2163 * 2164 * If we still have a bitmap dependency, then the inode has never 2165 * been written to disk. Drop the dependency as it is no longer 2166 * necessary since the inode is being deallocated. We set the 2167 * ALLCOMPLETE flags since the bitmap now properly shows that the 2168 * inode is not allocated. Even if the inode is actively being 2169 * written, it has been rolled back to its zero'ed state, so we 2170 * are ensured that a zero inode is what is on the disk. For short 2171 * lived files, this change will usually result in removing all the 2172 * dependencies from the inode so that it can be freed immediately. 2173 */ 2174 static int 2175 check_inode_unwritten(inodedep) 2176 struct inodedep *inodedep; 2177 { 2178 2179 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2180 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2181 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2182 LIST_FIRST(&inodedep->id_inowait) != NULL || 2183 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2184 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2185 inodedep->id_nlinkdelta != 0) 2186 return (0); 2187 inodedep->id_state |= ALLCOMPLETE; 2188 LIST_REMOVE(inodedep, id_deps); 2189 inodedep->id_buf = NULL; 2190 if (inodedep->id_state & ONWORKLIST) 2191 WORKLIST_REMOVE(&inodedep->id_list); 2192 if (inodedep->id_savedino != NULL) { 2193 FREE(inodedep->id_savedino, M_INODEDEP); 2194 inodedep->id_savedino = NULL; 2195 } 2196 if (free_inodedep(inodedep) == 0) { 2197 FREE_LOCK(&lk); 2198 panic("check_inode_unwritten: busy inode"); 2199 } 2200 return (1); 2201 } 2202 2203 /* 2204 * Try to free an inodedep structure. Return 1 if it could be freed. 2205 */ 2206 static int 2207 free_inodedep(inodedep) 2208 struct inodedep *inodedep; 2209 { 2210 2211 if ((inodedep->id_state & ONWORKLIST) != 0 || 2212 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2213 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2214 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2215 LIST_FIRST(&inodedep->id_inowait) != NULL || 2216 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2217 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2218 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL) 2219 return (0); 2220 LIST_REMOVE(inodedep, id_hash); 2221 WORKITEM_FREE(inodedep, D_INODEDEP); 2222 num_inodedep -= 1; 2223 return (1); 2224 } 2225 2226 /* 2227 * This workitem routine performs the block de-allocation. 2228 * The workitem is added to the pending list after the updated 2229 * inode block has been written to disk. As mentioned above, 2230 * checks regarding the number of blocks de-allocated (compared 2231 * to the number of blocks allocated for the file) are also 2232 * performed in this function. 2233 */ 2234 static void 2235 handle_workitem_freeblocks(freeblks, flags) 2236 struct freeblks *freeblks; 2237 int flags; 2238 { 2239 struct inode *ip; 2240 struct vnode *vp; 2241 ufs_daddr_t bn; 2242 struct fs *fs; 2243 int i, level, bsize; 2244 long nblocks, blocksreleased = 0; 2245 int error, allerror = 0; 2246 ufs_lbn_t baselbns[NIADDR], tmpval; 2247 2248 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2249 tmpval = 1; 2250 baselbns[0] = NDADDR; 2251 for (i = 1; i < NIADDR; i++) { 2252 tmpval *= NINDIR(fs); 2253 baselbns[i] = baselbns[i - 1] + tmpval; 2254 } 2255 nblocks = btodb(fs->fs_bsize); 2256 blocksreleased = 0; 2257 /* 2258 * Indirect blocks first. 2259 */ 2260 for (level = (NIADDR - 1); level >= 0; level--) { 2261 if ((bn = freeblks->fb_iblks[level]) == 0) 2262 continue; 2263 if ((error = indir_trunc(freeblks, fsbtodb(fs, bn), level, 2264 baselbns[level], &blocksreleased)) == 0) 2265 allerror = error; 2266 ffs_blkfree(fs, freeblks->fb_devvp, bn, fs->fs_bsize, 2267 freeblks->fb_previousinum); 2268 fs->fs_pendingblocks -= nblocks; 2269 blocksreleased += nblocks; 2270 } 2271 /* 2272 * All direct blocks or frags. 2273 */ 2274 for (i = (NDADDR - 1); i >= 0; i--) { 2275 if ((bn = freeblks->fb_dblks[i]) == 0) 2276 continue; 2277 bsize = sblksize(fs, freeblks->fb_oldsize, i); 2278 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2279 freeblks->fb_previousinum); 2280 fs->fs_pendingblocks -= btodb(bsize); 2281 blocksreleased += btodb(bsize); 2282 } 2283 /* 2284 * If we still have not finished background cleanup, then check 2285 * to see if the block count needs to be adjusted. 2286 */ 2287 if (freeblks->fb_chkcnt != blocksreleased && 2288 (fs->fs_flags & FS_UNCLEAN) != 0 && 2289 VFS_VGET(freeblks->fb_mnt, freeblks->fb_previousinum, 2290 (flags & LK_NOWAIT) | LK_EXCLUSIVE, &vp) == 0) { 2291 ip = VTOI(vp); 2292 ip->i_blocks += freeblks->fb_chkcnt - blocksreleased; 2293 ip->i_flag |= IN_CHANGE; 2294 vput(vp); 2295 } 2296 2297 #ifdef DIAGNOSTIC 2298 if (freeblks->fb_chkcnt != blocksreleased && 2299 ((fs->fs_flags & FS_UNCLEAN) == 0 || (flags & LK_NOWAIT) != 0)) 2300 printf("handle_workitem_freeblocks: block count"); 2301 if (allerror) 2302 softdep_error("handle_workitem_freeblks", allerror); 2303 #endif /* DIAGNOSTIC */ 2304 2305 WORKITEM_FREE(freeblks, D_FREEBLKS); 2306 } 2307 2308 /* 2309 * Release blocks associated with the inode ip and stored in the indirect 2310 * block dbn. If level is greater than SINGLE, the block is an indirect block 2311 * and recursive calls to indirtrunc must be used to cleanse other indirect 2312 * blocks. 2313 */ 2314 static int 2315 indir_trunc(freeblks, dbn, level, lbn, countp) 2316 struct freeblks *freeblks; 2317 ufs_daddr_t dbn; 2318 int level; 2319 ufs_lbn_t lbn; 2320 long *countp; 2321 { 2322 struct buf *bp; 2323 ufs_daddr_t *bap; 2324 ufs_daddr_t nb; 2325 struct fs *fs; 2326 struct worklist *wk; 2327 struct indirdep *indirdep; 2328 int i, lbnadd, nblocks; 2329 int error, allerror = 0; 2330 2331 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2332 lbnadd = 1; 2333 for (i = level; i > 0; i--) 2334 lbnadd *= NINDIR(fs); 2335 /* 2336 * Get buffer of block pointers to be freed. This routine is not 2337 * called until the zero'ed inode has been written, so it is safe 2338 * to free blocks as they are encountered. Because the inode has 2339 * been zero'ed, calls to bmap on these blocks will fail. So, we 2340 * have to use the on-disk address and the block device for the 2341 * filesystem to look them up. If the file was deleted before its 2342 * indirect blocks were all written to disk, the routine that set 2343 * us up (deallocate_dependencies) will have arranged to leave 2344 * a complete copy of the indirect block in memory for our use. 2345 * Otherwise we have to read the blocks in from the disk. 2346 */ 2347 ACQUIRE_LOCK(&lk); 2348 if ((bp = incore(freeblks->fb_devvp, dbn)) != NULL && 2349 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2350 if (wk->wk_type != D_INDIRDEP || 2351 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2352 (indirdep->ir_state & GOINGAWAY) == 0) { 2353 FREE_LOCK(&lk); 2354 panic("indir_trunc: lost indirdep"); 2355 } 2356 WORKLIST_REMOVE(wk); 2357 WORKITEM_FREE(indirdep, D_INDIRDEP); 2358 if (LIST_FIRST(&bp->b_dep) != NULL) { 2359 FREE_LOCK(&lk); 2360 panic("indir_trunc: dangling dep"); 2361 } 2362 FREE_LOCK(&lk); 2363 } else { 2364 FREE_LOCK(&lk); 2365 error = bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 2366 NOCRED, &bp); 2367 if (error) { 2368 brelse(bp); 2369 return (error); 2370 } 2371 } 2372 /* 2373 * Recursively free indirect blocks. 2374 */ 2375 bap = (ufs_daddr_t *)bp->b_data; 2376 nblocks = btodb(fs->fs_bsize); 2377 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2378 if ((nb = bap[i]) == 0) 2379 continue; 2380 if (level != 0) { 2381 if ((error = indir_trunc(freeblks, fsbtodb(fs, nb), 2382 level - 1, lbn + (i * lbnadd), countp)) != 0) 2383 allerror = error; 2384 } 2385 ffs_blkfree(fs, freeblks->fb_devvp, nb, fs->fs_bsize, 2386 freeblks->fb_previousinum); 2387 fs->fs_pendingblocks -= nblocks; 2388 *countp += nblocks; 2389 } 2390 bp->b_flags |= B_INVAL | B_NOCACHE; 2391 brelse(bp); 2392 return (allerror); 2393 } 2394 2395 /* 2396 * Free an allocindir. 2397 * This routine must be called with splbio interrupts blocked. 2398 */ 2399 static void 2400 free_allocindir(aip, inodedep) 2401 struct allocindir *aip; 2402 struct inodedep *inodedep; 2403 { 2404 struct freefrag *freefrag; 2405 2406 #ifdef DEBUG 2407 if (lk.lkt_held == NOHOLDER) 2408 panic("free_allocindir: lock not held"); 2409 #endif 2410 if ((aip->ai_state & DEPCOMPLETE) == 0) 2411 LIST_REMOVE(aip, ai_deps); 2412 if (aip->ai_state & ONWORKLIST) 2413 WORKLIST_REMOVE(&aip->ai_list); 2414 LIST_REMOVE(aip, ai_next); 2415 if ((freefrag = aip->ai_freefrag) != NULL) { 2416 if (inodedep == NULL) 2417 add_to_worklist(&freefrag->ff_list); 2418 else 2419 WORKLIST_INSERT(&inodedep->id_bufwait, 2420 &freefrag->ff_list); 2421 } 2422 WORKITEM_FREE(aip, D_ALLOCINDIR); 2423 } 2424 2425 /* 2426 * Directory entry addition dependencies. 2427 * 2428 * When adding a new directory entry, the inode (with its incremented link 2429 * count) must be written to disk before the directory entry's pointer to it. 2430 * Also, if the inode is newly allocated, the corresponding freemap must be 2431 * updated (on disk) before the directory entry's pointer. These requirements 2432 * are met via undo/redo on the directory entry's pointer, which consists 2433 * simply of the inode number. 2434 * 2435 * As directory entries are added and deleted, the free space within a 2436 * directory block can become fragmented. The ufs file system will compact 2437 * a fragmented directory block to make space for a new entry. When this 2438 * occurs, the offsets of previously added entries change. Any "diradd" 2439 * dependency structures corresponding to these entries must be updated with 2440 * the new offsets. 2441 */ 2442 2443 /* 2444 * This routine is called after the in-memory inode's link 2445 * count has been incremented, but before the directory entry's 2446 * pointer to the inode has been set. 2447 */ 2448 int 2449 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 2450 struct buf *bp; /* buffer containing directory block */ 2451 struct inode *dp; /* inode for directory */ 2452 off_t diroffset; /* offset of new entry in directory */ 2453 long newinum; /* inode referenced by new directory entry */ 2454 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 2455 int isnewblk; /* entry is in a newly allocated block */ 2456 { 2457 int offset; /* offset of new entry within directory block */ 2458 ufs_lbn_t lbn; /* block in directory containing new entry */ 2459 struct fs *fs; 2460 struct diradd *dap; 2461 struct allocdirect *adp; 2462 struct pagedep *pagedep; 2463 struct inodedep *inodedep; 2464 struct newdirblk *newdirblk = 0; 2465 struct mkdir *mkdir1, *mkdir2; 2466 2467 /* 2468 * Whiteouts have no dependencies. 2469 */ 2470 if (newinum == WINO) { 2471 if (newdirbp != NULL) 2472 bdwrite(newdirbp); 2473 return (0); 2474 } 2475 2476 fs = dp->i_fs; 2477 lbn = lblkno(fs, diroffset); 2478 offset = blkoff(fs, diroffset); 2479 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2480 M_SOFTDEP_FLAGS|M_ZERO); 2481 dap->da_list.wk_type = D_DIRADD; 2482 dap->da_offset = offset; 2483 dap->da_newinum = newinum; 2484 dap->da_state = ATTACHED; 2485 if (isnewblk && lbn < NDADDR && fragoff(fs, diroffset) == 0) { 2486 MALLOC(newdirblk, struct newdirblk *, sizeof(struct newdirblk), 2487 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 2488 newdirblk->db_list.wk_type = D_NEWDIRBLK; 2489 newdirblk->db_state = 0; 2490 } 2491 if (newdirbp == NULL) { 2492 dap->da_state |= DEPCOMPLETE; 2493 ACQUIRE_LOCK(&lk); 2494 } else { 2495 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2496 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2497 M_SOFTDEP_FLAGS); 2498 mkdir1->md_list.wk_type = D_MKDIR; 2499 mkdir1->md_state = MKDIR_BODY; 2500 mkdir1->md_diradd = dap; 2501 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2502 M_SOFTDEP_FLAGS); 2503 mkdir2->md_list.wk_type = D_MKDIR; 2504 mkdir2->md_state = MKDIR_PARENT; 2505 mkdir2->md_diradd = dap; 2506 /* 2507 * Dependency on "." and ".." being written to disk. 2508 */ 2509 mkdir1->md_buf = newdirbp; 2510 ACQUIRE_LOCK(&lk); 2511 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2512 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list); 2513 FREE_LOCK(&lk); 2514 bdwrite(newdirbp); 2515 /* 2516 * Dependency on link count increase for parent directory 2517 */ 2518 ACQUIRE_LOCK(&lk); 2519 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0 2520 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2521 dap->da_state &= ~MKDIR_PARENT; 2522 WORKITEM_FREE(mkdir2, D_MKDIR); 2523 } else { 2524 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2525 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2526 } 2527 } 2528 /* 2529 * Link into parent directory pagedep to await its being written. 2530 */ 2531 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2532 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2533 dap->da_pagedep = pagedep; 2534 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2535 da_pdlist); 2536 /* 2537 * Link into its inodedep. Put it on the id_bufwait list if the inode 2538 * is not yet written. If it is written, do the post-inode write 2539 * processing to put it on the id_pendinghd list. 2540 */ 2541 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2542 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2543 diradd_inode_written(dap, inodedep); 2544 else 2545 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2546 if (isnewblk) { 2547 /* 2548 * Directories growing into indirect blocks are rare 2549 * enough and the frequency of new block allocation 2550 * in those cases even more rare, that we choose not 2551 * to bother tracking them. Rather we simply force the 2552 * new directory entry to disk. 2553 */ 2554 if (lbn >= NDADDR) { 2555 FREE_LOCK(&lk); 2556 /* 2557 * We only have a new allocation when at the 2558 * beginning of a new block, not when we are 2559 * expanding into an existing block. 2560 */ 2561 if (blkoff(fs, diroffset) == 0) 2562 return (1); 2563 return (0); 2564 } 2565 /* 2566 * We only have a new allocation when at the beginning 2567 * of a new fragment, not when we are expanding into an 2568 * existing fragment. Also, there is nothing to do if we 2569 * are already tracking this block. 2570 */ 2571 if (fragoff(fs, diroffset) != 0) { 2572 FREE_LOCK(&lk); 2573 return (0); 2574 } 2575 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2576 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2577 FREE_LOCK(&lk); 2578 return (0); 2579 } 2580 /* 2581 * Find our associated allocdirect and have it track us. 2582 */ 2583 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0) 2584 panic("softdep_setup_directory_add: lost inodedep"); 2585 adp = TAILQ_LAST(&inodedep->id_newinoupdt, allocdirectlst); 2586 if (adp == NULL || adp->ad_lbn != lbn) { 2587 FREE_LOCK(&lk); 2588 panic("softdep_setup_directory_add: lost entry"); 2589 } 2590 pagedep->pd_state |= NEWBLOCK; 2591 newdirblk->db_pagedep = pagedep; 2592 WORKLIST_INSERT(&adp->ad_newdirblk, &newdirblk->db_list); 2593 } 2594 FREE_LOCK(&lk); 2595 return (0); 2596 } 2597 2598 /* 2599 * This procedure is called to change the offset of a directory 2600 * entry when compacting a directory block which must be owned 2601 * exclusively by the caller. Note that the actual entry movement 2602 * must be done in this procedure to ensure that no I/O completions 2603 * occur while the move is in progress. 2604 */ 2605 void 2606 softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize) 2607 struct inode *dp; /* inode for directory */ 2608 caddr_t base; /* address of dp->i_offset */ 2609 caddr_t oldloc; /* address of old directory location */ 2610 caddr_t newloc; /* address of new directory location */ 2611 int entrysize; /* size of directory entry */ 2612 { 2613 int offset, oldoffset, newoffset; 2614 struct pagedep *pagedep; 2615 struct diradd *dap; 2616 ufs_lbn_t lbn; 2617 2618 ACQUIRE_LOCK(&lk); 2619 lbn = lblkno(dp->i_fs, dp->i_offset); 2620 offset = blkoff(dp->i_fs, dp->i_offset); 2621 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2622 goto done; 2623 oldoffset = offset + (oldloc - base); 2624 newoffset = offset + (newloc - base); 2625 2626 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2627 if (dap->da_offset != oldoffset) 2628 continue; 2629 dap->da_offset = newoffset; 2630 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2631 break; 2632 LIST_REMOVE(dap, da_pdlist); 2633 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2634 dap, da_pdlist); 2635 break; 2636 } 2637 if (dap == NULL) { 2638 2639 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2640 if (dap->da_offset == oldoffset) { 2641 dap->da_offset = newoffset; 2642 break; 2643 } 2644 } 2645 } 2646 done: 2647 bcopy(oldloc, newloc, entrysize); 2648 FREE_LOCK(&lk); 2649 } 2650 2651 /* 2652 * Free a diradd dependency structure. This routine must be called 2653 * with splbio interrupts blocked. 2654 */ 2655 static void 2656 free_diradd(dap) 2657 struct diradd *dap; 2658 { 2659 struct dirrem *dirrem; 2660 struct pagedep *pagedep; 2661 struct inodedep *inodedep; 2662 struct mkdir *mkdir, *nextmd; 2663 2664 #ifdef DEBUG 2665 if (lk.lkt_held == NOHOLDER) 2666 panic("free_diradd: lock not held"); 2667 #endif 2668 WORKLIST_REMOVE(&dap->da_list); 2669 LIST_REMOVE(dap, da_pdlist); 2670 if ((dap->da_state & DIRCHG) == 0) { 2671 pagedep = dap->da_pagedep; 2672 } else { 2673 dirrem = dap->da_previous; 2674 pagedep = dirrem->dm_pagedep; 2675 dirrem->dm_dirinum = pagedep->pd_ino; 2676 add_to_worklist(&dirrem->dm_list); 2677 } 2678 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2679 0, &inodedep) != 0) 2680 (void) free_inodedep(inodedep); 2681 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2682 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2683 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2684 if (mkdir->md_diradd != dap) 2685 continue; 2686 dap->da_state &= ~mkdir->md_state; 2687 WORKLIST_REMOVE(&mkdir->md_list); 2688 LIST_REMOVE(mkdir, md_mkdirs); 2689 WORKITEM_FREE(mkdir, D_MKDIR); 2690 } 2691 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2692 FREE_LOCK(&lk); 2693 panic("free_diradd: unfound ref"); 2694 } 2695 } 2696 WORKITEM_FREE(dap, D_DIRADD); 2697 } 2698 2699 /* 2700 * Directory entry removal dependencies. 2701 * 2702 * When removing a directory entry, the entry's inode pointer must be 2703 * zero'ed on disk before the corresponding inode's link count is decremented 2704 * (possibly freeing the inode for re-use). This dependency is handled by 2705 * updating the directory entry but delaying the inode count reduction until 2706 * after the directory block has been written to disk. After this point, the 2707 * inode count can be decremented whenever it is convenient. 2708 */ 2709 2710 /* 2711 * This routine should be called immediately after removing 2712 * a directory entry. The inode's link count should not be 2713 * decremented by the calling procedure -- the soft updates 2714 * code will do this task when it is safe. 2715 */ 2716 void 2717 softdep_setup_remove(bp, dp, ip, isrmdir) 2718 struct buf *bp; /* buffer containing directory block */ 2719 struct inode *dp; /* inode for the directory being modified */ 2720 struct inode *ip; /* inode for directory entry being removed */ 2721 int isrmdir; /* indicates if doing RMDIR */ 2722 { 2723 struct dirrem *dirrem, *prevdirrem; 2724 2725 /* 2726 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2727 */ 2728 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2729 2730 /* 2731 * If the COMPLETE flag is clear, then there were no active 2732 * entries and we want to roll back to a zeroed entry until 2733 * the new inode is committed to disk. If the COMPLETE flag is 2734 * set then we have deleted an entry that never made it to 2735 * disk. If the entry we deleted resulted from a name change, 2736 * then the old name still resides on disk. We cannot delete 2737 * its inode (returned to us in prevdirrem) until the zeroed 2738 * directory entry gets to disk. The new inode has never been 2739 * referenced on the disk, so can be deleted immediately. 2740 */ 2741 if ((dirrem->dm_state & COMPLETE) == 0) { 2742 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2743 dm_next); 2744 FREE_LOCK(&lk); 2745 } else { 2746 if (prevdirrem != NULL) 2747 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2748 prevdirrem, dm_next); 2749 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2750 FREE_LOCK(&lk); 2751 handle_workitem_remove(dirrem, NULL); 2752 } 2753 } 2754 2755 /* 2756 * Allocate a new dirrem if appropriate and return it along with 2757 * its associated pagedep. Called without a lock, returns with lock. 2758 */ 2759 static long num_dirrem; /* number of dirrem allocated */ 2760 static struct dirrem * 2761 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 2762 struct buf *bp; /* buffer containing directory block */ 2763 struct inode *dp; /* inode for the directory being modified */ 2764 struct inode *ip; /* inode for directory entry being removed */ 2765 int isrmdir; /* indicates if doing RMDIR */ 2766 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 2767 { 2768 int offset; 2769 ufs_lbn_t lbn; 2770 struct diradd *dap; 2771 struct dirrem *dirrem; 2772 struct pagedep *pagedep; 2773 2774 /* 2775 * Whiteouts have no deletion dependencies. 2776 */ 2777 if (ip == NULL) 2778 panic("newdirrem: whiteout"); 2779 /* 2780 * If we are over our limit, try to improve the situation. 2781 * Limiting the number of dirrem structures will also limit 2782 * the number of freefile and freeblks structures. 2783 */ 2784 if (num_dirrem > max_softdeps / 2) 2785 (void) request_cleanup(FLUSH_REMOVE, 0); 2786 num_dirrem += 1; 2787 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 2788 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 2789 dirrem->dm_list.wk_type = D_DIRREM; 2790 dirrem->dm_state = isrmdir ? RMDIR : 0; 2791 dirrem->dm_mnt = ITOV(ip)->v_mount; 2792 dirrem->dm_oldinum = ip->i_number; 2793 *prevdirremp = NULL; 2794 2795 ACQUIRE_LOCK(&lk); 2796 lbn = lblkno(dp->i_fs, dp->i_offset); 2797 offset = blkoff(dp->i_fs, dp->i_offset); 2798 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2799 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2800 dirrem->dm_pagedep = pagedep; 2801 /* 2802 * Check for a diradd dependency for the same directory entry. 2803 * If present, then both dependencies become obsolete and can 2804 * be de-allocated. Check for an entry on both the pd_dirraddhd 2805 * list and the pd_pendinghd list. 2806 */ 2807 2808 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 2809 if (dap->da_offset == offset) 2810 break; 2811 if (dap == NULL) { 2812 2813 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 2814 if (dap->da_offset == offset) 2815 break; 2816 if (dap == NULL) 2817 return (dirrem); 2818 } 2819 /* 2820 * Must be ATTACHED at this point. 2821 */ 2822 if ((dap->da_state & ATTACHED) == 0) { 2823 FREE_LOCK(&lk); 2824 panic("newdirrem: not ATTACHED"); 2825 } 2826 if (dap->da_newinum != ip->i_number) { 2827 FREE_LOCK(&lk); 2828 panic("newdirrem: inum %d should be %d", 2829 ip->i_number, dap->da_newinum); 2830 } 2831 /* 2832 * If we are deleting a changed name that never made it to disk, 2833 * then return the dirrem describing the previous inode (which 2834 * represents the inode currently referenced from this entry on disk). 2835 */ 2836 if ((dap->da_state & DIRCHG) != 0) { 2837 *prevdirremp = dap->da_previous; 2838 dap->da_state &= ~DIRCHG; 2839 dap->da_pagedep = pagedep; 2840 } 2841 /* 2842 * We are deleting an entry that never made it to disk. 2843 * Mark it COMPLETE so we can delete its inode immediately. 2844 */ 2845 dirrem->dm_state |= COMPLETE; 2846 free_diradd(dap); 2847 return (dirrem); 2848 } 2849 2850 /* 2851 * Directory entry change dependencies. 2852 * 2853 * Changing an existing directory entry requires that an add operation 2854 * be completed first followed by a deletion. The semantics for the addition 2855 * are identical to the description of adding a new entry above except 2856 * that the rollback is to the old inode number rather than zero. Once 2857 * the addition dependency is completed, the removal is done as described 2858 * in the removal routine above. 2859 */ 2860 2861 /* 2862 * This routine should be called immediately after changing 2863 * a directory entry. The inode's link count should not be 2864 * decremented by the calling procedure -- the soft updates 2865 * code will perform this task when it is safe. 2866 */ 2867 void 2868 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 2869 struct buf *bp; /* buffer containing directory block */ 2870 struct inode *dp; /* inode for the directory being modified */ 2871 struct inode *ip; /* inode for directory entry being removed */ 2872 long newinum; /* new inode number for changed entry */ 2873 int isrmdir; /* indicates if doing RMDIR */ 2874 { 2875 int offset; 2876 struct diradd *dap = NULL; 2877 struct dirrem *dirrem, *prevdirrem; 2878 struct pagedep *pagedep; 2879 struct inodedep *inodedep; 2880 2881 offset = blkoff(dp->i_fs, dp->i_offset); 2882 2883 /* 2884 * Whiteouts do not need diradd dependencies. 2885 */ 2886 if (newinum != WINO) { 2887 MALLOC(dap, struct diradd *, sizeof(struct diradd), 2888 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 2889 dap->da_list.wk_type = D_DIRADD; 2890 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 2891 dap->da_offset = offset; 2892 dap->da_newinum = newinum; 2893 } 2894 2895 /* 2896 * Allocate a new dirrem and ACQUIRE_LOCK. 2897 */ 2898 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2899 pagedep = dirrem->dm_pagedep; 2900 /* 2901 * The possible values for isrmdir: 2902 * 0 - non-directory file rename 2903 * 1 - directory rename within same directory 2904 * inum - directory rename to new directory of given inode number 2905 * When renaming to a new directory, we are both deleting and 2906 * creating a new directory entry, so the link count on the new 2907 * directory should not change. Thus we do not need the followup 2908 * dirrem which is usually done in handle_workitem_remove. We set 2909 * the DIRCHG flag to tell handle_workitem_remove to skip the 2910 * followup dirrem. 2911 */ 2912 if (isrmdir > 1) 2913 dirrem->dm_state |= DIRCHG; 2914 2915 /* 2916 * Whiteouts have no additional dependencies, 2917 * so just put the dirrem on the correct list. 2918 */ 2919 if (newinum == WINO) { 2920 if ((dirrem->dm_state & COMPLETE) == 0) { 2921 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 2922 dm_next); 2923 } else { 2924 dirrem->dm_dirinum = pagedep->pd_ino; 2925 add_to_worklist(&dirrem->dm_list); 2926 } 2927 FREE_LOCK(&lk); 2928 return; 2929 } 2930 2931 /* 2932 * If the COMPLETE flag is clear, then there were no active 2933 * entries and we want to roll back to the previous inode until 2934 * the new inode is committed to disk. If the COMPLETE flag is 2935 * set, then we have deleted an entry that never made it to disk. 2936 * If the entry we deleted resulted from a name change, then the old 2937 * inode reference still resides on disk. Any rollback that we do 2938 * needs to be to that old inode (returned to us in prevdirrem). If 2939 * the entry we deleted resulted from a create, then there is 2940 * no entry on the disk, so we want to roll back to zero rather 2941 * than the uncommitted inode. In either of the COMPLETE cases we 2942 * want to immediately free the unwritten and unreferenced inode. 2943 */ 2944 if ((dirrem->dm_state & COMPLETE) == 0) { 2945 dap->da_previous = dirrem; 2946 } else { 2947 if (prevdirrem != NULL) { 2948 dap->da_previous = prevdirrem; 2949 } else { 2950 dap->da_state &= ~DIRCHG; 2951 dap->da_pagedep = pagedep; 2952 } 2953 dirrem->dm_dirinum = pagedep->pd_ino; 2954 add_to_worklist(&dirrem->dm_list); 2955 } 2956 /* 2957 * Link into its inodedep. Put it on the id_bufwait list if the inode 2958 * is not yet written. If it is written, do the post-inode write 2959 * processing to put it on the id_pendinghd list. 2960 */ 2961 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 2962 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2963 dap->da_state |= COMPLETE; 2964 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 2965 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 2966 } else { 2967 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 2968 dap, da_pdlist); 2969 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2970 } 2971 FREE_LOCK(&lk); 2972 } 2973 2974 /* 2975 * Called whenever the link count on an inode is changed. 2976 * It creates an inode dependency so that the new reference(s) 2977 * to the inode cannot be committed to disk until the updated 2978 * inode has been written. 2979 */ 2980 void 2981 softdep_change_linkcnt(ip) 2982 struct inode *ip; /* the inode with the increased link count */ 2983 { 2984 struct inodedep *inodedep; 2985 2986 ACQUIRE_LOCK(&lk); 2987 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 2988 if (ip->i_nlink < ip->i_effnlink) { 2989 FREE_LOCK(&lk); 2990 panic("softdep_change_linkcnt: bad delta"); 2991 } 2992 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2993 FREE_LOCK(&lk); 2994 } 2995 2996 /* 2997 * Called when the effective link count and the reference count 2998 * on an inode drops to zero. At this point there are no names 2999 * referencing the file in the filesystem and no active file 3000 * references. The space associated with the file will be freed 3001 * as soon as the necessary soft dependencies are cleared. 3002 */ 3003 void 3004 softdep_releasefile(ip) 3005 struct inode *ip; /* inode with the zero effective link count */ 3006 { 3007 struct inodedep *inodedep; 3008 3009 if (ip->i_effnlink > 0) 3010 panic("softdep_filerelease: file still referenced"); 3011 /* 3012 * We may be called several times as the real reference count 3013 * drops to zero. We only want to account for the space once. 3014 */ 3015 if (ip->i_flag & IN_SPACECOUNTED) 3016 return; 3017 /* 3018 * We have to deactivate a snapshot otherwise copyonwrites may 3019 * add blocks and the cleanup may remove blocks after we have 3020 * tried to account for them. 3021 */ 3022 if ((ip->i_flags & SF_SNAPSHOT) != 0) 3023 ffs_snapremove(ITOV(ip)); 3024 /* 3025 * If we are tracking an nlinkdelta, we have to also remember 3026 * whether we accounted for the freed space yet. 3027 */ 3028 ACQUIRE_LOCK(&lk); 3029 if ((inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep))) 3030 inodedep->id_state |= SPACECOUNTED; 3031 FREE_LOCK(&lk); 3032 ip->i_fs->fs_pendingblocks += ip->i_blocks; 3033 ip->i_fs->fs_pendinginodes += 1; 3034 ip->i_flag |= IN_SPACECOUNTED; 3035 } 3036 3037 /* 3038 * This workitem decrements the inode's link count. 3039 * If the link count reaches zero, the file is removed. 3040 */ 3041 static void 3042 handle_workitem_remove(dirrem, xp) 3043 struct dirrem *dirrem; 3044 struct vnode *xp; 3045 { 3046 struct thread *td = curthread; 3047 struct inodedep *inodedep; 3048 struct vnode *vp; 3049 struct inode *ip; 3050 ino_t oldinum; 3051 int error; 3052 3053 if ((vp = xp) == NULL && 3054 (error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, LK_EXCLUSIVE, 3055 &vp)) != 0) { 3056 softdep_error("handle_workitem_remove: vget", error); 3057 return; 3058 } 3059 ip = VTOI(vp); 3060 ACQUIRE_LOCK(&lk); 3061 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 3062 FREE_LOCK(&lk); 3063 panic("handle_workitem_remove: lost inodedep"); 3064 } 3065 /* 3066 * Normal file deletion. 3067 */ 3068 if ((dirrem->dm_state & RMDIR) == 0) { 3069 ip->i_nlink--; 3070 ip->i_flag |= IN_CHANGE; 3071 if (ip->i_nlink < ip->i_effnlink) { 3072 FREE_LOCK(&lk); 3073 panic("handle_workitem_remove: bad file delta"); 3074 } 3075 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3076 FREE_LOCK(&lk); 3077 vput(vp); 3078 num_dirrem -= 1; 3079 WORKITEM_FREE(dirrem, D_DIRREM); 3080 return; 3081 } 3082 /* 3083 * Directory deletion. Decrement reference count for both the 3084 * just deleted parent directory entry and the reference for ".". 3085 * Next truncate the directory to length zero. When the 3086 * truncation completes, arrange to have the reference count on 3087 * the parent decremented to account for the loss of "..". 3088 */ 3089 ip->i_nlink -= 2; 3090 ip->i_flag |= IN_CHANGE; 3091 if (ip->i_nlink < ip->i_effnlink) { 3092 FREE_LOCK(&lk); 3093 panic("handle_workitem_remove: bad dir delta"); 3094 } 3095 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3096 FREE_LOCK(&lk); 3097 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, td->td_ucred, td)) != 0) 3098 softdep_error("handle_workitem_remove: truncate", error); 3099 /* 3100 * Rename a directory to a new parent. Since, we are both deleting 3101 * and creating a new directory entry, the link count on the new 3102 * directory should not change. Thus we skip the followup dirrem. 3103 */ 3104 if (dirrem->dm_state & DIRCHG) { 3105 vput(vp); 3106 num_dirrem -= 1; 3107 WORKITEM_FREE(dirrem, D_DIRREM); 3108 return; 3109 } 3110 /* 3111 * If the inodedep does not exist, then the zero'ed inode has 3112 * been written to disk. If the allocated inode has never been 3113 * written to disk, then the on-disk inode is zero'ed. In either 3114 * case we can remove the file immediately. 3115 */ 3116 ACQUIRE_LOCK(&lk); 3117 dirrem->dm_state = 0; 3118 oldinum = dirrem->dm_oldinum; 3119 dirrem->dm_oldinum = dirrem->dm_dirinum; 3120 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 3121 check_inode_unwritten(inodedep)) { 3122 FREE_LOCK(&lk); 3123 vput(vp); 3124 handle_workitem_remove(dirrem, NULL); 3125 return; 3126 } 3127 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 3128 FREE_LOCK(&lk); 3129 vput(vp); 3130 } 3131 3132 /* 3133 * Inode de-allocation dependencies. 3134 * 3135 * When an inode's link count is reduced to zero, it can be de-allocated. We 3136 * found it convenient to postpone de-allocation until after the inode is 3137 * written to disk with its new link count (zero). At this point, all of the 3138 * on-disk inode's block pointers are nullified and, with careful dependency 3139 * list ordering, all dependencies related to the inode will be satisfied and 3140 * the corresponding dependency structures de-allocated. So, if/when the 3141 * inode is reused, there will be no mixing of old dependencies with new 3142 * ones. This artificial dependency is set up by the block de-allocation 3143 * procedure above (softdep_setup_freeblocks) and completed by the 3144 * following procedure. 3145 */ 3146 static void 3147 handle_workitem_freefile(freefile) 3148 struct freefile *freefile; 3149 { 3150 struct fs *fs; 3151 struct inodedep *idp; 3152 int error; 3153 3154 fs = VFSTOUFS(freefile->fx_mnt)->um_fs; 3155 #ifdef DEBUG 3156 ACQUIRE_LOCK(&lk); 3157 error = inodedep_lookup(fs, freefile->fx_oldinum, 0, &idp); 3158 FREE_LOCK(&lk); 3159 if (error) 3160 panic("handle_workitem_freefile: inodedep survived"); 3161 #endif 3162 fs->fs_pendinginodes -= 1; 3163 if ((error = ffs_freefile(fs, freefile->fx_devvp, freefile->fx_oldinum, 3164 freefile->fx_mode)) != 0) 3165 softdep_error("handle_workitem_freefile", error); 3166 WORKITEM_FREE(freefile, D_FREEFILE); 3167 } 3168 3169 /* 3170 * Disk writes. 3171 * 3172 * The dependency structures constructed above are most actively used when file 3173 * system blocks are written to disk. No constraints are placed on when a 3174 * block can be written, but unsatisfied update dependencies are made safe by 3175 * modifying (or replacing) the source memory for the duration of the disk 3176 * write. When the disk write completes, the memory block is again brought 3177 * up-to-date. 3178 * 3179 * In-core inode structure reclamation. 3180 * 3181 * Because there are a finite number of "in-core" inode structures, they are 3182 * reused regularly. By transferring all inode-related dependencies to the 3183 * in-memory inode block and indexing them separately (via "inodedep"s), we 3184 * can allow "in-core" inode structures to be reused at any time and avoid 3185 * any increase in contention. 3186 * 3187 * Called just before entering the device driver to initiate a new disk I/O. 3188 * The buffer must be locked, thus, no I/O completion operations can occur 3189 * while we are manipulating its associated dependencies. 3190 */ 3191 static void 3192 softdep_disk_io_initiation(bp) 3193 struct buf *bp; /* structure describing disk write to occur */ 3194 { 3195 struct worklist *wk, *nextwk; 3196 struct indirdep *indirdep; 3197 3198 /* 3199 * We only care about write operations. There should never 3200 * be dependencies for reads. 3201 */ 3202 if (bp->b_iocmd == BIO_READ) 3203 panic("softdep_disk_io_initiation: read"); 3204 /* 3205 * Do any necessary pre-I/O processing. 3206 */ 3207 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = nextwk) { 3208 nextwk = LIST_NEXT(wk, wk_list); 3209 switch (wk->wk_type) { 3210 3211 case D_PAGEDEP: 3212 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3213 continue; 3214 3215 case D_INODEDEP: 3216 initiate_write_inodeblock(WK_INODEDEP(wk), bp); 3217 continue; 3218 3219 case D_INDIRDEP: 3220 indirdep = WK_INDIRDEP(wk); 3221 if (indirdep->ir_state & GOINGAWAY) 3222 panic("disk_io_initiation: indirdep gone"); 3223 /* 3224 * If there are no remaining dependencies, this 3225 * will be writing the real pointers, so the 3226 * dependency can be freed. 3227 */ 3228 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3229 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3230 brelse(indirdep->ir_savebp); 3231 /* inline expand WORKLIST_REMOVE(wk); */ 3232 wk->wk_state &= ~ONWORKLIST; 3233 LIST_REMOVE(wk, wk_list); 3234 WORKITEM_FREE(indirdep, D_INDIRDEP); 3235 continue; 3236 } 3237 /* 3238 * Replace up-to-date version with safe version. 3239 */ 3240 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3241 M_INDIRDEP, M_SOFTDEP_FLAGS); 3242 ACQUIRE_LOCK(&lk); 3243 indirdep->ir_state &= ~ATTACHED; 3244 indirdep->ir_state |= UNDONE; 3245 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3246 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3247 bp->b_bcount); 3248 FREE_LOCK(&lk); 3249 continue; 3250 3251 case D_MKDIR: 3252 case D_BMSAFEMAP: 3253 case D_ALLOCDIRECT: 3254 case D_ALLOCINDIR: 3255 continue; 3256 3257 default: 3258 panic("handle_disk_io_initiation: Unexpected type %s", 3259 TYPENAME(wk->wk_type)); 3260 /* NOTREACHED */ 3261 } 3262 } 3263 } 3264 3265 /* 3266 * Called from within the procedure above to deal with unsatisfied 3267 * allocation dependencies in a directory. The buffer must be locked, 3268 * thus, no I/O completion operations can occur while we are 3269 * manipulating its associated dependencies. 3270 */ 3271 static void 3272 initiate_write_filepage(pagedep, bp) 3273 struct pagedep *pagedep; 3274 struct buf *bp; 3275 { 3276 struct diradd *dap; 3277 struct direct *ep; 3278 int i; 3279 3280 if (pagedep->pd_state & IOSTARTED) { 3281 /* 3282 * This can only happen if there is a driver that does not 3283 * understand chaining. Here biodone will reissue the call 3284 * to strategy for the incomplete buffers. 3285 */ 3286 printf("initiate_write_filepage: already started\n"); 3287 return; 3288 } 3289 pagedep->pd_state |= IOSTARTED; 3290 ACQUIRE_LOCK(&lk); 3291 for (i = 0; i < DAHASHSZ; i++) { 3292 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3293 ep = (struct direct *) 3294 ((char *)bp->b_data + dap->da_offset); 3295 if (ep->d_ino != dap->da_newinum) { 3296 FREE_LOCK(&lk); 3297 panic("%s: dir inum %d != new %d", 3298 "initiate_write_filepage", 3299 ep->d_ino, dap->da_newinum); 3300 } 3301 if (dap->da_state & DIRCHG) 3302 ep->d_ino = dap->da_previous->dm_oldinum; 3303 else 3304 ep->d_ino = 0; 3305 dap->da_state &= ~ATTACHED; 3306 dap->da_state |= UNDONE; 3307 } 3308 } 3309 FREE_LOCK(&lk); 3310 } 3311 3312 /* 3313 * Called from within the procedure above to deal with unsatisfied 3314 * allocation dependencies in an inodeblock. The buffer must be 3315 * locked, thus, no I/O completion operations can occur while we 3316 * are manipulating its associated dependencies. 3317 */ 3318 static void 3319 initiate_write_inodeblock(inodedep, bp) 3320 struct inodedep *inodedep; 3321 struct buf *bp; /* The inode block */ 3322 { 3323 struct allocdirect *adp, *lastadp; 3324 struct dinode *dp; 3325 struct fs *fs; 3326 ufs_lbn_t prevlbn = 0; 3327 int i, deplist; 3328 3329 if (inodedep->id_state & IOSTARTED) 3330 panic("initiate_write_inodeblock: already started"); 3331 inodedep->id_state |= IOSTARTED; 3332 fs = inodedep->id_fs; 3333 dp = (struct dinode *)bp->b_data + 3334 ino_to_fsbo(fs, inodedep->id_ino); 3335 /* 3336 * If the bitmap is not yet written, then the allocated 3337 * inode cannot be written to disk. 3338 */ 3339 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3340 if (inodedep->id_savedino != NULL) 3341 panic("initiate_write_inodeblock: already doing I/O"); 3342 MALLOC(inodedep->id_savedino, struct dinode *, 3343 sizeof(struct dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3344 *inodedep->id_savedino = *dp; 3345 bzero((caddr_t)dp, sizeof(struct dinode)); 3346 return; 3347 } 3348 /* 3349 * If no dependencies, then there is nothing to roll back. 3350 */ 3351 inodedep->id_savedsize = dp->di_size; 3352 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3353 return; 3354 /* 3355 * Set the dependencies to busy. 3356 */ 3357 ACQUIRE_LOCK(&lk); 3358 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3359 adp = TAILQ_NEXT(adp, ad_next)) { 3360 #ifdef DIAGNOSTIC 3361 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3362 FREE_LOCK(&lk); 3363 panic("softdep_write_inodeblock: lbn order"); 3364 } 3365 prevlbn = adp->ad_lbn; 3366 if (adp->ad_lbn < NDADDR && 3367 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3368 FREE_LOCK(&lk); 3369 panic("%s: direct pointer #%ld mismatch %d != %d", 3370 "softdep_write_inodeblock", adp->ad_lbn, 3371 dp->di_db[adp->ad_lbn], adp->ad_newblkno); 3372 } 3373 if (adp->ad_lbn >= NDADDR && 3374 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3375 FREE_LOCK(&lk); 3376 panic("%s: indirect pointer #%ld mismatch %d != %d", 3377 "softdep_write_inodeblock", adp->ad_lbn - NDADDR, 3378 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno); 3379 } 3380 deplist |= 1 << adp->ad_lbn; 3381 if ((adp->ad_state & ATTACHED) == 0) { 3382 FREE_LOCK(&lk); 3383 panic("softdep_write_inodeblock: Unknown state 0x%x", 3384 adp->ad_state); 3385 } 3386 #endif /* DIAGNOSTIC */ 3387 adp->ad_state &= ~ATTACHED; 3388 adp->ad_state |= UNDONE; 3389 } 3390 /* 3391 * The on-disk inode cannot claim to be any larger than the last 3392 * fragment that has been written. Otherwise, the on-disk inode 3393 * might have fragments that were not the last block in the file 3394 * which would corrupt the filesystem. 3395 */ 3396 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3397 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3398 if (adp->ad_lbn >= NDADDR) 3399 break; 3400 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3401 /* keep going until hitting a rollback to a frag */ 3402 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3403 continue; 3404 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3405 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3406 #ifdef DIAGNOSTIC 3407 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3408 FREE_LOCK(&lk); 3409 panic("softdep_write_inodeblock: lost dep1"); 3410 } 3411 #endif /* DIAGNOSTIC */ 3412 dp->di_db[i] = 0; 3413 } 3414 for (i = 0; i < NIADDR; i++) { 3415 #ifdef DIAGNOSTIC 3416 if (dp->di_ib[i] != 0 && 3417 (deplist & ((1 << NDADDR) << i)) == 0) { 3418 FREE_LOCK(&lk); 3419 panic("softdep_write_inodeblock: lost dep2"); 3420 } 3421 #endif /* DIAGNOSTIC */ 3422 dp->di_ib[i] = 0; 3423 } 3424 FREE_LOCK(&lk); 3425 return; 3426 } 3427 /* 3428 * If we have zero'ed out the last allocated block of the file, 3429 * roll back the size to the last currently allocated block. 3430 * We know that this last allocated block is a full-sized as 3431 * we already checked for fragments in the loop above. 3432 */ 3433 if (lastadp != NULL && 3434 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3435 for (i = lastadp->ad_lbn; i >= 0; i--) 3436 if (dp->di_db[i] != 0) 3437 break; 3438 dp->di_size = (i + 1) * fs->fs_bsize; 3439 } 3440 /* 3441 * The only dependencies are for indirect blocks. 3442 * 3443 * The file size for indirect block additions is not guaranteed. 3444 * Such a guarantee would be non-trivial to achieve. The conventional 3445 * synchronous write implementation also does not make this guarantee. 3446 * Fsck should catch and fix discrepancies. Arguably, the file size 3447 * can be over-estimated without destroying integrity when the file 3448 * moves into the indirect blocks (i.e., is large). If we want to 3449 * postpone fsck, we are stuck with this argument. 3450 */ 3451 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3452 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3453 FREE_LOCK(&lk); 3454 } 3455 3456 /* 3457 * This routine is called during the completion interrupt 3458 * service routine for a disk write (from the procedure called 3459 * by the device driver to inform the file system caches of 3460 * a request completion). It should be called early in this 3461 * procedure, before the block is made available to other 3462 * processes or other routines are called. 3463 */ 3464 static void 3465 softdep_disk_write_complete(bp) 3466 struct buf *bp; /* describes the completed disk write */ 3467 { 3468 struct worklist *wk; 3469 struct workhead reattach; 3470 struct newblk *newblk; 3471 struct allocindir *aip; 3472 struct allocdirect *adp; 3473 struct indirdep *indirdep; 3474 struct inodedep *inodedep; 3475 struct bmsafemap *bmsafemap; 3476 3477 #ifdef DEBUG 3478 if (lk.lkt_held != NOHOLDER) 3479 panic("softdep_disk_write_complete: lock is held"); 3480 lk.lkt_held = SPECIAL_FLAG; 3481 #endif 3482 LIST_INIT(&reattach); 3483 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3484 WORKLIST_REMOVE(wk); 3485 switch (wk->wk_type) { 3486 3487 case D_PAGEDEP: 3488 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3489 WORKLIST_INSERT(&reattach, wk); 3490 continue; 3491 3492 case D_INODEDEP: 3493 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3494 WORKLIST_INSERT(&reattach, wk); 3495 continue; 3496 3497 case D_BMSAFEMAP: 3498 bmsafemap = WK_BMSAFEMAP(wk); 3499 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3500 newblk->nb_state |= DEPCOMPLETE; 3501 newblk->nb_bmsafemap = NULL; 3502 LIST_REMOVE(newblk, nb_deps); 3503 } 3504 while ((adp = 3505 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3506 adp->ad_state |= DEPCOMPLETE; 3507 adp->ad_buf = NULL; 3508 LIST_REMOVE(adp, ad_deps); 3509 handle_allocdirect_partdone(adp); 3510 } 3511 while ((aip = 3512 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3513 aip->ai_state |= DEPCOMPLETE; 3514 aip->ai_buf = NULL; 3515 LIST_REMOVE(aip, ai_deps); 3516 handle_allocindir_partdone(aip); 3517 } 3518 while ((inodedep = 3519 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3520 inodedep->id_state |= DEPCOMPLETE; 3521 LIST_REMOVE(inodedep, id_deps); 3522 inodedep->id_buf = NULL; 3523 } 3524 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3525 continue; 3526 3527 case D_MKDIR: 3528 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3529 continue; 3530 3531 case D_ALLOCDIRECT: 3532 adp = WK_ALLOCDIRECT(wk); 3533 adp->ad_state |= COMPLETE; 3534 handle_allocdirect_partdone(adp); 3535 continue; 3536 3537 case D_ALLOCINDIR: 3538 aip = WK_ALLOCINDIR(wk); 3539 aip->ai_state |= COMPLETE; 3540 handle_allocindir_partdone(aip); 3541 continue; 3542 3543 case D_INDIRDEP: 3544 indirdep = WK_INDIRDEP(wk); 3545 if (indirdep->ir_state & GOINGAWAY) { 3546 lk.lkt_held = NOHOLDER; 3547 panic("disk_write_complete: indirdep gone"); 3548 } 3549 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 3550 FREE(indirdep->ir_saveddata, M_INDIRDEP); 3551 indirdep->ir_saveddata = 0; 3552 indirdep->ir_state &= ~UNDONE; 3553 indirdep->ir_state |= ATTACHED; 3554 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 3555 handle_allocindir_partdone(aip); 3556 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 3557 lk.lkt_held = NOHOLDER; 3558 panic("disk_write_complete: not gone"); 3559 } 3560 } 3561 WORKLIST_INSERT(&reattach, wk); 3562 if ((bp->b_flags & B_DELWRI) == 0) 3563 stat_indir_blk_ptrs++; 3564 bdirty(bp); 3565 continue; 3566 3567 default: 3568 lk.lkt_held = NOHOLDER; 3569 panic("handle_disk_write_complete: Unknown type %s", 3570 TYPENAME(wk->wk_type)); 3571 /* NOTREACHED */ 3572 } 3573 } 3574 /* 3575 * Reattach any requests that must be redone. 3576 */ 3577 while ((wk = LIST_FIRST(&reattach)) != NULL) { 3578 WORKLIST_REMOVE(wk); 3579 WORKLIST_INSERT(&bp->b_dep, wk); 3580 } 3581 #ifdef DEBUG 3582 if (lk.lkt_held != SPECIAL_FLAG) 3583 panic("softdep_disk_write_complete: lock lost"); 3584 lk.lkt_held = NOHOLDER; 3585 #endif 3586 } 3587 3588 /* 3589 * Called from within softdep_disk_write_complete above. Note that 3590 * this routine is always called from interrupt level with further 3591 * splbio interrupts blocked. 3592 */ 3593 static void 3594 handle_allocdirect_partdone(adp) 3595 struct allocdirect *adp; /* the completed allocdirect */ 3596 { 3597 struct allocdirect *listadp; 3598 struct inodedep *inodedep; 3599 long bsize, delay; 3600 3601 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3602 return; 3603 if (adp->ad_buf != NULL) { 3604 lk.lkt_held = NOHOLDER; 3605 panic("handle_allocdirect_partdone: dangling dep"); 3606 } 3607 /* 3608 * The on-disk inode cannot claim to be any larger than the last 3609 * fragment that has been written. Otherwise, the on-disk inode 3610 * might have fragments that were not the last block in the file 3611 * which would corrupt the filesystem. Thus, we cannot free any 3612 * allocdirects after one whose ad_oldblkno claims a fragment as 3613 * these blocks must be rolled back to zero before writing the inode. 3614 * We check the currently active set of allocdirects in id_inoupdt. 3615 */ 3616 inodedep = adp->ad_inodedep; 3617 bsize = inodedep->id_fs->fs_bsize; 3618 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) { 3619 /* found our block */ 3620 if (listadp == adp) 3621 break; 3622 /* continue if ad_oldlbn is not a fragment */ 3623 if (listadp->ad_oldsize == 0 || 3624 listadp->ad_oldsize == bsize) 3625 continue; 3626 /* hit a fragment */ 3627 return; 3628 } 3629 /* 3630 * If we have reached the end of the current list without 3631 * finding the just finished dependency, then it must be 3632 * on the future dependency list. Future dependencies cannot 3633 * be freed until they are moved to the current list. 3634 */ 3635 if (listadp == NULL) { 3636 #ifdef DEBUG 3637 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next) 3638 /* found our block */ 3639 if (listadp == adp) 3640 break; 3641 if (listadp == NULL) { 3642 lk.lkt_held = NOHOLDER; 3643 panic("handle_allocdirect_partdone: lost dep"); 3644 } 3645 #endif /* DEBUG */ 3646 return; 3647 } 3648 /* 3649 * If we have found the just finished dependency, then free 3650 * it along with anything that follows it that is complete. 3651 * If the inode still has a bitmap dependency, then it has 3652 * never been written to disk, hence the on-disk inode cannot 3653 * reference the old fragment so we can free it without delay. 3654 */ 3655 delay = (inodedep->id_state & DEPCOMPLETE); 3656 for (; adp; adp = listadp) { 3657 listadp = TAILQ_NEXT(adp, ad_next); 3658 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3659 return; 3660 free_allocdirect(&inodedep->id_inoupdt, adp, delay); 3661 } 3662 } 3663 3664 /* 3665 * Called from within softdep_disk_write_complete above. Note that 3666 * this routine is always called from interrupt level with further 3667 * splbio interrupts blocked. 3668 */ 3669 static void 3670 handle_allocindir_partdone(aip) 3671 struct allocindir *aip; /* the completed allocindir */ 3672 { 3673 struct indirdep *indirdep; 3674 3675 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 3676 return; 3677 if (aip->ai_buf != NULL) { 3678 lk.lkt_held = NOHOLDER; 3679 panic("handle_allocindir_partdone: dangling dependency"); 3680 } 3681 indirdep = aip->ai_indirdep; 3682 if (indirdep->ir_state & UNDONE) { 3683 LIST_REMOVE(aip, ai_next); 3684 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 3685 return; 3686 } 3687 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 3688 aip->ai_newblkno; 3689 LIST_REMOVE(aip, ai_next); 3690 if (aip->ai_freefrag != NULL) 3691 add_to_worklist(&aip->ai_freefrag->ff_list); 3692 WORKITEM_FREE(aip, D_ALLOCINDIR); 3693 } 3694 3695 /* 3696 * Called from within softdep_disk_write_complete above to restore 3697 * in-memory inode block contents to their most up-to-date state. Note 3698 * that this routine is always called from interrupt level with further 3699 * splbio interrupts blocked. 3700 */ 3701 static int 3702 handle_written_inodeblock(inodedep, bp) 3703 struct inodedep *inodedep; 3704 struct buf *bp; /* buffer containing the inode block */ 3705 { 3706 struct worklist *wk, *filefree; 3707 struct allocdirect *adp, *nextadp; 3708 struct dinode *dp; 3709 int hadchanges; 3710 3711 if ((inodedep->id_state & IOSTARTED) == 0) { 3712 lk.lkt_held = NOHOLDER; 3713 panic("handle_written_inodeblock: not started"); 3714 } 3715 inodedep->id_state &= ~IOSTARTED; 3716 inodedep->id_state |= COMPLETE; 3717 dp = (struct dinode *)bp->b_data + 3718 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 3719 /* 3720 * If we had to rollback the inode allocation because of 3721 * bitmaps being incomplete, then simply restore it. 3722 * Keep the block dirty so that it will not be reclaimed until 3723 * all associated dependencies have been cleared and the 3724 * corresponding updates written to disk. 3725 */ 3726 if (inodedep->id_savedino != NULL) { 3727 *dp = *inodedep->id_savedino; 3728 FREE(inodedep->id_savedino, M_INODEDEP); 3729 inodedep->id_savedino = NULL; 3730 if ((bp->b_flags & B_DELWRI) == 0) 3731 stat_inode_bitmap++; 3732 bdirty(bp); 3733 return (1); 3734 } 3735 /* 3736 * Roll forward anything that had to be rolled back before 3737 * the inode could be updated. 3738 */ 3739 hadchanges = 0; 3740 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 3741 nextadp = TAILQ_NEXT(adp, ad_next); 3742 if (adp->ad_state & ATTACHED) { 3743 lk.lkt_held = NOHOLDER; 3744 panic("handle_written_inodeblock: new entry"); 3745 } 3746 if (adp->ad_lbn < NDADDR) { 3747 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) { 3748 lk.lkt_held = NOHOLDER; 3749 panic("%s: %s #%ld mismatch %d != %d", 3750 "handle_written_inodeblock", 3751 "direct pointer", adp->ad_lbn, 3752 dp->di_db[adp->ad_lbn], adp->ad_oldblkno); 3753 } 3754 dp->di_db[adp->ad_lbn] = adp->ad_newblkno; 3755 } else { 3756 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) { 3757 lk.lkt_held = NOHOLDER; 3758 panic("%s: %s #%ld allocated as %d", 3759 "handle_written_inodeblock", 3760 "indirect pointer", adp->ad_lbn - NDADDR, 3761 dp->di_ib[adp->ad_lbn - NDADDR]); 3762 } 3763 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno; 3764 } 3765 adp->ad_state &= ~UNDONE; 3766 adp->ad_state |= ATTACHED; 3767 hadchanges = 1; 3768 } 3769 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 3770 stat_direct_blk_ptrs++; 3771 /* 3772 * Reset the file size to its most up-to-date value. 3773 */ 3774 if (inodedep->id_savedsize == -1) { 3775 lk.lkt_held = NOHOLDER; 3776 panic("handle_written_inodeblock: bad size"); 3777 } 3778 if (dp->di_size != inodedep->id_savedsize) { 3779 dp->di_size = inodedep->id_savedsize; 3780 hadchanges = 1; 3781 } 3782 inodedep->id_savedsize = -1; 3783 /* 3784 * If there were any rollbacks in the inode block, then it must be 3785 * marked dirty so that its will eventually get written back in 3786 * its correct form. 3787 */ 3788 if (hadchanges) 3789 bdirty(bp); 3790 /* 3791 * Process any allocdirects that completed during the update. 3792 */ 3793 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 3794 handle_allocdirect_partdone(adp); 3795 /* 3796 * Process deallocations that were held pending until the 3797 * inode had been written to disk. Freeing of the inode 3798 * is delayed until after all blocks have been freed to 3799 * avoid creation of new <vfsid, inum, lbn> triples 3800 * before the old ones have been deleted. 3801 */ 3802 filefree = NULL; 3803 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 3804 WORKLIST_REMOVE(wk); 3805 switch (wk->wk_type) { 3806 3807 case D_FREEFILE: 3808 /* 3809 * We defer adding filefree to the worklist until 3810 * all other additions have been made to ensure 3811 * that it will be done after all the old blocks 3812 * have been freed. 3813 */ 3814 if (filefree != NULL) { 3815 lk.lkt_held = NOHOLDER; 3816 panic("handle_written_inodeblock: filefree"); 3817 } 3818 filefree = wk; 3819 continue; 3820 3821 case D_MKDIR: 3822 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 3823 continue; 3824 3825 case D_DIRADD: 3826 diradd_inode_written(WK_DIRADD(wk), inodedep); 3827 continue; 3828 3829 case D_FREEBLKS: 3830 case D_FREEFRAG: 3831 case D_DIRREM: 3832 add_to_worklist(wk); 3833 continue; 3834 3835 case D_NEWDIRBLK: 3836 free_newdirblk(WK_NEWDIRBLK(wk)); 3837 continue; 3838 3839 default: 3840 lk.lkt_held = NOHOLDER; 3841 panic("handle_written_inodeblock: Unknown type %s", 3842 TYPENAME(wk->wk_type)); 3843 /* NOTREACHED */ 3844 } 3845 } 3846 if (filefree != NULL) { 3847 if (free_inodedep(inodedep) == 0) { 3848 lk.lkt_held = NOHOLDER; 3849 panic("handle_written_inodeblock: live inodedep"); 3850 } 3851 add_to_worklist(filefree); 3852 return (0); 3853 } 3854 3855 /* 3856 * If no outstanding dependencies, free it. 3857 */ 3858 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0) 3859 return (0); 3860 return (hadchanges); 3861 } 3862 3863 /* 3864 * Process a diradd entry after its dependent inode has been written. 3865 * This routine must be called with splbio interrupts blocked. 3866 */ 3867 static void 3868 diradd_inode_written(dap, inodedep) 3869 struct diradd *dap; 3870 struct inodedep *inodedep; 3871 { 3872 struct pagedep *pagedep; 3873 3874 dap->da_state |= COMPLETE; 3875 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3876 if (dap->da_state & DIRCHG) 3877 pagedep = dap->da_previous->dm_pagedep; 3878 else 3879 pagedep = dap->da_pagedep; 3880 LIST_REMOVE(dap, da_pdlist); 3881 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3882 } 3883 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3884 } 3885 3886 /* 3887 * Handle the completion of a mkdir dependency. 3888 */ 3889 static void 3890 handle_written_mkdir(mkdir, type) 3891 struct mkdir *mkdir; 3892 int type; 3893 { 3894 struct diradd *dap; 3895 struct pagedep *pagedep; 3896 3897 if (mkdir->md_state != type) { 3898 lk.lkt_held = NOHOLDER; 3899 panic("handle_written_mkdir: bad type"); 3900 } 3901 dap = mkdir->md_diradd; 3902 dap->da_state &= ~type; 3903 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 3904 dap->da_state |= DEPCOMPLETE; 3905 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3906 if (dap->da_state & DIRCHG) 3907 pagedep = dap->da_previous->dm_pagedep; 3908 else 3909 pagedep = dap->da_pagedep; 3910 LIST_REMOVE(dap, da_pdlist); 3911 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3912 } 3913 LIST_REMOVE(mkdir, md_mkdirs); 3914 WORKITEM_FREE(mkdir, D_MKDIR); 3915 } 3916 3917 /* 3918 * Called from within softdep_disk_write_complete above. 3919 * A write operation was just completed. Removed inodes can 3920 * now be freed and associated block pointers may be committed. 3921 * Note that this routine is always called from interrupt level 3922 * with further splbio interrupts blocked. 3923 */ 3924 static int 3925 handle_written_filepage(pagedep, bp) 3926 struct pagedep *pagedep; 3927 struct buf *bp; /* buffer containing the written page */ 3928 { 3929 struct dirrem *dirrem; 3930 struct diradd *dap, *nextdap; 3931 struct direct *ep; 3932 int i, chgs; 3933 3934 if ((pagedep->pd_state & IOSTARTED) == 0) { 3935 lk.lkt_held = NOHOLDER; 3936 panic("handle_written_filepage: not started"); 3937 } 3938 pagedep->pd_state &= ~IOSTARTED; 3939 /* 3940 * Process any directory removals that have been committed. 3941 */ 3942 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 3943 LIST_REMOVE(dirrem, dm_next); 3944 dirrem->dm_dirinum = pagedep->pd_ino; 3945 add_to_worklist(&dirrem->dm_list); 3946 } 3947 /* 3948 * Free any directory additions that have been committed. 3949 * If it is a newly allocated block, we have to wait until 3950 * the on-disk directory inode claims the new block. 3951 */ 3952 if ((pagedep->pd_state & NEWBLOCK) == 0) 3953 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 3954 free_diradd(dap); 3955 /* 3956 * Uncommitted directory entries must be restored. 3957 */ 3958 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 3959 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 3960 dap = nextdap) { 3961 nextdap = LIST_NEXT(dap, da_pdlist); 3962 if (dap->da_state & ATTACHED) { 3963 lk.lkt_held = NOHOLDER; 3964 panic("handle_written_filepage: attached"); 3965 } 3966 ep = (struct direct *) 3967 ((char *)bp->b_data + dap->da_offset); 3968 ep->d_ino = dap->da_newinum; 3969 dap->da_state &= ~UNDONE; 3970 dap->da_state |= ATTACHED; 3971 chgs = 1; 3972 /* 3973 * If the inode referenced by the directory has 3974 * been written out, then the dependency can be 3975 * moved to the pending list. 3976 */ 3977 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3978 LIST_REMOVE(dap, da_pdlist); 3979 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 3980 da_pdlist); 3981 } 3982 } 3983 } 3984 /* 3985 * If there were any rollbacks in the directory, then it must be 3986 * marked dirty so that its will eventually get written back in 3987 * its correct form. 3988 */ 3989 if (chgs) { 3990 if ((bp->b_flags & B_DELWRI) == 0) 3991 stat_dir_entry++; 3992 bdirty(bp); 3993 return (1); 3994 } 3995 /* 3996 * If we are not waiting for a new directory block to be 3997 * claimed by its inode, then the pagedep will be freed. 3998 * Otherwise it will remain to track any new entries on 3999 * the page in case they are fsync'ed. 4000 */ 4001 if ((pagedep->pd_state & NEWBLOCK) == 0) { 4002 LIST_REMOVE(pagedep, pd_hash); 4003 WORKITEM_FREE(pagedep, D_PAGEDEP); 4004 } 4005 return (0); 4006 } 4007 4008 /* 4009 * Writing back in-core inode structures. 4010 * 4011 * The file system only accesses an inode's contents when it occupies an 4012 * "in-core" inode structure. These "in-core" structures are separate from 4013 * the page frames used to cache inode blocks. Only the latter are 4014 * transferred to/from the disk. So, when the updated contents of the 4015 * "in-core" inode structure are copied to the corresponding in-memory inode 4016 * block, the dependencies are also transferred. The following procedure is 4017 * called when copying a dirty "in-core" inode to a cached inode block. 4018 */ 4019 4020 /* 4021 * Called when an inode is loaded from disk. If the effective link count 4022 * differed from the actual link count when it was last flushed, then we 4023 * need to ensure that the correct effective link count is put back. 4024 */ 4025 void 4026 softdep_load_inodeblock(ip) 4027 struct inode *ip; /* the "in_core" copy of the inode */ 4028 { 4029 struct inodedep *inodedep; 4030 4031 /* 4032 * Check for alternate nlink count. 4033 */ 4034 ip->i_effnlink = ip->i_nlink; 4035 ACQUIRE_LOCK(&lk); 4036 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4037 FREE_LOCK(&lk); 4038 return; 4039 } 4040 ip->i_effnlink -= inodedep->id_nlinkdelta; 4041 if (inodedep->id_state & SPACECOUNTED) 4042 ip->i_flag |= IN_SPACECOUNTED; 4043 FREE_LOCK(&lk); 4044 } 4045 4046 /* 4047 * This routine is called just before the "in-core" inode 4048 * information is to be copied to the in-memory inode block. 4049 * Recall that an inode block contains several inodes. If 4050 * the force flag is set, then the dependencies will be 4051 * cleared so that the update can always be made. Note that 4052 * the buffer is locked when this routine is called, so we 4053 * will never be in the middle of writing the inode block 4054 * to disk. 4055 */ 4056 void 4057 softdep_update_inodeblock(ip, bp, waitfor) 4058 struct inode *ip; /* the "in_core" copy of the inode */ 4059 struct buf *bp; /* the buffer containing the inode block */ 4060 int waitfor; /* nonzero => update must be allowed */ 4061 { 4062 struct inodedep *inodedep; 4063 struct worklist *wk; 4064 int error, gotit; 4065 4066 /* 4067 * If the effective link count is not equal to the actual link 4068 * count, then we must track the difference in an inodedep while 4069 * the inode is (potentially) tossed out of the cache. Otherwise, 4070 * if there is no existing inodedep, then there are no dependencies 4071 * to track. 4072 */ 4073 ACQUIRE_LOCK(&lk); 4074 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4075 FREE_LOCK(&lk); 4076 if (ip->i_effnlink != ip->i_nlink) 4077 panic("softdep_update_inodeblock: bad link count"); 4078 return; 4079 } 4080 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 4081 FREE_LOCK(&lk); 4082 panic("softdep_update_inodeblock: bad delta"); 4083 } 4084 /* 4085 * Changes have been initiated. Anything depending on these 4086 * changes cannot occur until this inode has been written. 4087 */ 4088 inodedep->id_state &= ~COMPLETE; 4089 if ((inodedep->id_state & ONWORKLIST) == 0) 4090 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 4091 /* 4092 * Any new dependencies associated with the incore inode must 4093 * now be moved to the list associated with the buffer holding 4094 * the in-memory copy of the inode. Once merged process any 4095 * allocdirects that are completed by the merger. 4096 */ 4097 merge_inode_lists(inodedep); 4098 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 4099 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 4100 /* 4101 * Now that the inode has been pushed into the buffer, the 4102 * operations dependent on the inode being written to disk 4103 * can be moved to the id_bufwait so that they will be 4104 * processed when the buffer I/O completes. 4105 */ 4106 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 4107 WORKLIST_REMOVE(wk); 4108 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4109 } 4110 /* 4111 * Newly allocated inodes cannot be written until the bitmap 4112 * that allocates them have been written (indicated by 4113 * DEPCOMPLETE being set in id_state). If we are doing a 4114 * forced sync (e.g., an fsync on a file), we force the bitmap 4115 * to be written so that the update can be done. 4116 */ 4117 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4118 FREE_LOCK(&lk); 4119 return; 4120 } 4121 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4122 FREE_LOCK(&lk); 4123 if (gotit && 4124 (error = BUF_WRITE(inodedep->id_buf)) != 0) 4125 softdep_error("softdep_update_inodeblock: bwrite", error); 4126 if ((inodedep->id_state & DEPCOMPLETE) == 0) 4127 panic("softdep_update_inodeblock: update failed"); 4128 } 4129 4130 /* 4131 * Merge the new inode dependency list (id_newinoupdt) into the old 4132 * inode dependency list (id_inoupdt). This routine must be called 4133 * with splbio interrupts blocked. 4134 */ 4135 static void 4136 merge_inode_lists(inodedep) 4137 struct inodedep *inodedep; 4138 { 4139 struct allocdirect *listadp, *newadp; 4140 4141 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4142 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) { 4143 if (listadp->ad_lbn < newadp->ad_lbn) { 4144 listadp = TAILQ_NEXT(listadp, ad_next); 4145 continue; 4146 } 4147 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4148 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4149 if (listadp->ad_lbn == newadp->ad_lbn) { 4150 allocdirect_merge(&inodedep->id_inoupdt, newadp, 4151 listadp); 4152 listadp = newadp; 4153 } 4154 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4155 } 4156 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) { 4157 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4158 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next); 4159 } 4160 } 4161 4162 /* 4163 * If we are doing an fsync, then we must ensure that any directory 4164 * entries for the inode have been written after the inode gets to disk. 4165 */ 4166 int 4167 softdep_fsync(vp) 4168 struct vnode *vp; /* the "in_core" copy of the inode */ 4169 { 4170 struct inodedep *inodedep; 4171 struct pagedep *pagedep; 4172 struct worklist *wk; 4173 struct diradd *dap; 4174 struct mount *mnt; 4175 struct vnode *pvp; 4176 struct inode *ip; 4177 struct buf *bp; 4178 struct fs *fs; 4179 struct thread *td = curthread; 4180 int error, flushparent; 4181 ino_t parentino; 4182 ufs_lbn_t lbn; 4183 4184 ip = VTOI(vp); 4185 fs = ip->i_fs; 4186 ACQUIRE_LOCK(&lk); 4187 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4188 FREE_LOCK(&lk); 4189 return (0); 4190 } 4191 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4192 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4193 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4194 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4195 FREE_LOCK(&lk); 4196 panic("softdep_fsync: pending ops"); 4197 } 4198 for (error = 0, flushparent = 0; ; ) { 4199 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4200 break; 4201 if (wk->wk_type != D_DIRADD) { 4202 FREE_LOCK(&lk); 4203 panic("softdep_fsync: Unexpected type %s", 4204 TYPENAME(wk->wk_type)); 4205 } 4206 dap = WK_DIRADD(wk); 4207 /* 4208 * Flush our parent if this directory entry has a MKDIR_PARENT 4209 * dependency or is contained in a newly allocated block. 4210 */ 4211 if (dap->da_state & DIRCHG) 4212 pagedep = dap->da_previous->dm_pagedep; 4213 else 4214 pagedep = dap->da_pagedep; 4215 mnt = pagedep->pd_mnt; 4216 parentino = pagedep->pd_ino; 4217 lbn = pagedep->pd_lbn; 4218 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4219 FREE_LOCK(&lk); 4220 panic("softdep_fsync: dirty"); 4221 } 4222 if ((dap->da_state & MKDIR_PARENT) || 4223 (pagedep->pd_state & NEWBLOCK)) 4224 flushparent = 1; 4225 else 4226 flushparent = 0; 4227 /* 4228 * If we are being fsync'ed as part of vgone'ing this vnode, 4229 * then we will not be able to release and recover the 4230 * vnode below, so we just have to give up on writing its 4231 * directory entry out. It will eventually be written, just 4232 * not now, but then the user was not asking to have it 4233 * written, so we are not breaking any promises. 4234 */ 4235 if (vp->v_flag & VXLOCK) 4236 break; 4237 /* 4238 * We prevent deadlock by always fetching inodes from the 4239 * root, moving down the directory tree. Thus, when fetching 4240 * our parent directory, we first try to get the lock. If 4241 * that fails, we must unlock ourselves before requesting 4242 * the lock on our parent. See the comment in ufs_lookup 4243 * for details on possible races. 4244 */ 4245 FREE_LOCK(&lk); 4246 if (VFS_VGET(mnt, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp)) { 4247 VOP_UNLOCK(vp, 0, td); 4248 error = VFS_VGET(mnt, parentino, LK_EXCLUSIVE, &pvp); 4249 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 4250 if (error != 0) 4251 return (error); 4252 } 4253 /* 4254 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 4255 * that are contained in direct blocks will be resolved by 4256 * doing a UFS_UPDATE. Pagedeps contained in indirect blocks 4257 * may require a complete sync'ing of the directory. So, we 4258 * try the cheap and fast UFS_UPDATE first, and if that fails, 4259 * then we do the slower VOP_FSYNC of the directory. 4260 */ 4261 if (flushparent) { 4262 if ((error = UFS_UPDATE(pvp, 1)) != 0) { 4263 vput(pvp); 4264 return (error); 4265 } 4266 if ((pagedep->pd_state & NEWBLOCK) && 4267 (error = VOP_FSYNC(pvp, td->td_ucred, MNT_WAIT, td))) { 4268 vput(pvp); 4269 return (error); 4270 } 4271 } 4272 /* 4273 * Flush directory page containing the inode's name. 4274 */ 4275 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 4276 &bp); 4277 if (error == 0) 4278 error = BUF_WRITE(bp); 4279 else 4280 brelse(bp); 4281 vput(pvp); 4282 if (error != 0) 4283 return (error); 4284 ACQUIRE_LOCK(&lk); 4285 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4286 break; 4287 } 4288 FREE_LOCK(&lk); 4289 return (0); 4290 } 4291 4292 /* 4293 * Flush all the dirty bitmaps associated with the block device 4294 * before flushing the rest of the dirty blocks so as to reduce 4295 * the number of dependencies that will have to be rolled back. 4296 */ 4297 void 4298 softdep_fsync_mountdev(vp) 4299 struct vnode *vp; 4300 { 4301 struct buf *bp, *nbp; 4302 struct worklist *wk; 4303 4304 if (!vn_isdisk(vp, NULL)) 4305 panic("softdep_fsync_mountdev: vnode not a disk"); 4306 ACQUIRE_LOCK(&lk); 4307 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 4308 nbp = TAILQ_NEXT(bp, b_vnbufs); 4309 /* 4310 * If it is already scheduled, skip to the next buffer. 4311 */ 4312 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4313 continue; 4314 if ((bp->b_flags & B_DELWRI) == 0) { 4315 FREE_LOCK(&lk); 4316 panic("softdep_fsync_mountdev: not dirty"); 4317 } 4318 /* 4319 * We are only interested in bitmaps with outstanding 4320 * dependencies. 4321 */ 4322 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4323 wk->wk_type != D_BMSAFEMAP || 4324 (bp->b_xflags & BX_BKGRDINPROG)) { 4325 BUF_UNLOCK(bp); 4326 continue; 4327 } 4328 bremfree(bp); 4329 FREE_LOCK(&lk); 4330 (void) bawrite(bp); 4331 ACQUIRE_LOCK(&lk); 4332 /* 4333 * Since we may have slept during the I/O, we need 4334 * to start from a known point. 4335 */ 4336 nbp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4337 } 4338 drain_output(vp, 1); 4339 FREE_LOCK(&lk); 4340 } 4341 4342 /* 4343 * This routine is called when we are trying to synchronously flush a 4344 * file. This routine must eliminate any filesystem metadata dependencies 4345 * so that the syncing routine can succeed by pushing the dirty blocks 4346 * associated with the file. If any I/O errors occur, they are returned. 4347 */ 4348 int 4349 softdep_sync_metadata(ap) 4350 struct vop_fsync_args /* { 4351 struct vnode *a_vp; 4352 struct ucred *a_cred; 4353 int a_waitfor; 4354 struct thread *a_td; 4355 } */ *ap; 4356 { 4357 struct vnode *vp = ap->a_vp; 4358 struct pagedep *pagedep; 4359 struct allocdirect *adp; 4360 struct allocindir *aip; 4361 struct buf *bp, *nbp; 4362 struct worklist *wk; 4363 int i, error, waitfor; 4364 4365 /* 4366 * Check whether this vnode is involved in a filesystem 4367 * that is doing soft dependency processing. 4368 */ 4369 if (!vn_isdisk(vp, NULL)) { 4370 if (!DOINGSOFTDEP(vp)) 4371 return (0); 4372 } else 4373 if (vp->v_rdev->si_mountpoint == NULL || 4374 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4375 return (0); 4376 /* 4377 * Ensure that any direct block dependencies have been cleared. 4378 */ 4379 ACQUIRE_LOCK(&lk); 4380 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4381 FREE_LOCK(&lk); 4382 return (error); 4383 } 4384 /* 4385 * For most files, the only metadata dependencies are the 4386 * cylinder group maps that allocate their inode or blocks. 4387 * The block allocation dependencies can be found by traversing 4388 * the dependency lists for any buffers that remain on their 4389 * dirty buffer list. The inode allocation dependency will 4390 * be resolved when the inode is updated with MNT_WAIT. 4391 * This work is done in two passes. The first pass grabs most 4392 * of the buffers and begins asynchronously writing them. The 4393 * only way to wait for these asynchronous writes is to sleep 4394 * on the filesystem vnode which may stay busy for a long time 4395 * if the filesystem is active. So, instead, we make a second 4396 * pass over the dependencies blocking on each write. In the 4397 * usual case we will be blocking against a write that we 4398 * initiated, so when it is done the dependency will have been 4399 * resolved. Thus the second pass is expected to end quickly. 4400 */ 4401 waitfor = MNT_NOWAIT; 4402 top: 4403 /* 4404 * We must wait for any I/O in progress to finish so that 4405 * all potential buffers on the dirty list will be visible. 4406 */ 4407 drain_output(vp, 1); 4408 if (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT) == 0) { 4409 FREE_LOCK(&lk); 4410 return (0); 4411 } 4412 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4413 /* While syncing snapshots, we must allow recursive lookups */ 4414 bp->b_lock.lk_flags |= LK_CANRECURSE; 4415 loop: 4416 /* 4417 * As we hold the buffer locked, none of its dependencies 4418 * will disappear. 4419 */ 4420 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4421 switch (wk->wk_type) { 4422 4423 case D_ALLOCDIRECT: 4424 adp = WK_ALLOCDIRECT(wk); 4425 if (adp->ad_state & DEPCOMPLETE) 4426 continue; 4427 nbp = adp->ad_buf; 4428 if (getdirtybuf(&nbp, waitfor) == 0) 4429 continue; 4430 FREE_LOCK(&lk); 4431 if (waitfor == MNT_NOWAIT) { 4432 bawrite(nbp); 4433 } else if ((error = BUF_WRITE(nbp)) != 0) { 4434 break; 4435 } 4436 ACQUIRE_LOCK(&lk); 4437 continue; 4438 4439 case D_ALLOCINDIR: 4440 aip = WK_ALLOCINDIR(wk); 4441 if (aip->ai_state & DEPCOMPLETE) 4442 continue; 4443 nbp = aip->ai_buf; 4444 if (getdirtybuf(&nbp, waitfor) == 0) 4445 continue; 4446 FREE_LOCK(&lk); 4447 if (waitfor == MNT_NOWAIT) { 4448 bawrite(nbp); 4449 } else if ((error = BUF_WRITE(nbp)) != 0) { 4450 break; 4451 } 4452 ACQUIRE_LOCK(&lk); 4453 continue; 4454 4455 case D_INDIRDEP: 4456 restart: 4457 4458 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 4459 if (aip->ai_state & DEPCOMPLETE) 4460 continue; 4461 nbp = aip->ai_buf; 4462 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 4463 goto restart; 4464 FREE_LOCK(&lk); 4465 if ((error = BUF_WRITE(nbp)) != 0) { 4466 break; 4467 } 4468 ACQUIRE_LOCK(&lk); 4469 goto restart; 4470 } 4471 continue; 4472 4473 case D_INODEDEP: 4474 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 4475 WK_INODEDEP(wk)->id_ino)) != 0) { 4476 FREE_LOCK(&lk); 4477 break; 4478 } 4479 continue; 4480 4481 case D_PAGEDEP: 4482 /* 4483 * We are trying to sync a directory that may 4484 * have dependencies on both its own metadata 4485 * and/or dependencies on the inodes of any 4486 * recently allocated files. We walk its diradd 4487 * lists pushing out the associated inode. 4488 */ 4489 pagedep = WK_PAGEDEP(wk); 4490 for (i = 0; i < DAHASHSZ; i++) { 4491 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 4492 continue; 4493 if ((error = 4494 flush_pagedep_deps(vp, pagedep->pd_mnt, 4495 &pagedep->pd_diraddhd[i]))) { 4496 FREE_LOCK(&lk); 4497 break; 4498 } 4499 } 4500 continue; 4501 4502 case D_MKDIR: 4503 /* 4504 * This case should never happen if the vnode has 4505 * been properly sync'ed. However, if this function 4506 * is used at a place where the vnode has not yet 4507 * been sync'ed, this dependency can show up. So, 4508 * rather than panic, just flush it. 4509 */ 4510 nbp = WK_MKDIR(wk)->md_buf; 4511 if (getdirtybuf(&nbp, waitfor) == 0) 4512 continue; 4513 FREE_LOCK(&lk); 4514 if (waitfor == MNT_NOWAIT) { 4515 bawrite(nbp); 4516 } else if ((error = BUF_WRITE(nbp)) != 0) { 4517 break; 4518 } 4519 ACQUIRE_LOCK(&lk); 4520 continue; 4521 4522 case D_BMSAFEMAP: 4523 /* 4524 * This case should never happen if the vnode has 4525 * been properly sync'ed. However, if this function 4526 * is used at a place where the vnode has not yet 4527 * been sync'ed, this dependency can show up. So, 4528 * rather than panic, just flush it. 4529 */ 4530 nbp = WK_BMSAFEMAP(wk)->sm_buf; 4531 if (getdirtybuf(&nbp, waitfor) == 0) 4532 continue; 4533 FREE_LOCK(&lk); 4534 if (waitfor == MNT_NOWAIT) { 4535 bawrite(nbp); 4536 } else if ((error = BUF_WRITE(nbp)) != 0) { 4537 break; 4538 } 4539 ACQUIRE_LOCK(&lk); 4540 continue; 4541 4542 default: 4543 FREE_LOCK(&lk); 4544 panic("softdep_sync_metadata: Unknown type %s", 4545 TYPENAME(wk->wk_type)); 4546 /* NOTREACHED */ 4547 } 4548 /* We reach here only in error and unlocked */ 4549 if (error == 0) 4550 panic("softdep_sync_metadata: zero error"); 4551 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 4552 bawrite(bp); 4553 return (error); 4554 } 4555 (void) getdirtybuf(&TAILQ_NEXT(bp, b_vnbufs), MNT_WAIT); 4556 nbp = TAILQ_NEXT(bp, b_vnbufs); 4557 FREE_LOCK(&lk); 4558 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 4559 bawrite(bp); 4560 ACQUIRE_LOCK(&lk); 4561 if (nbp != NULL) { 4562 bp = nbp; 4563 goto loop; 4564 } 4565 /* 4566 * The brief unlock is to allow any pent up dependency 4567 * processing to be done. Then proceed with the second pass. 4568 */ 4569 if (waitfor == MNT_NOWAIT) { 4570 waitfor = MNT_WAIT; 4571 FREE_LOCK(&lk); 4572 ACQUIRE_LOCK(&lk); 4573 goto top; 4574 } 4575 4576 /* 4577 * If we have managed to get rid of all the dirty buffers, 4578 * then we are done. For certain directories and block 4579 * devices, we may need to do further work. 4580 * 4581 * We must wait for any I/O in progress to finish so that 4582 * all potential buffers on the dirty list will be visible. 4583 */ 4584 drain_output(vp, 1); 4585 if (TAILQ_FIRST(&vp->v_dirtyblkhd) == NULL) { 4586 FREE_LOCK(&lk); 4587 return (0); 4588 } 4589 4590 FREE_LOCK(&lk); 4591 /* 4592 * If we are trying to sync a block device, some of its buffers may 4593 * contain metadata that cannot be written until the contents of some 4594 * partially written files have been written to disk. The only easy 4595 * way to accomplish this is to sync the entire filesystem (luckily 4596 * this happens rarely). 4597 */ 4598 if (vn_isdisk(vp, NULL) && 4599 vp->v_rdev->si_mountpoint && !VOP_ISLOCKED(vp, NULL) && 4600 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT, ap->a_cred, 4601 ap->a_td)) != 0) 4602 return (error); 4603 return (0); 4604 } 4605 4606 /* 4607 * Flush the dependencies associated with an inodedep. 4608 * Called with splbio blocked. 4609 */ 4610 static int 4611 flush_inodedep_deps(fs, ino) 4612 struct fs *fs; 4613 ino_t ino; 4614 { 4615 struct inodedep *inodedep; 4616 struct allocdirect *adp; 4617 int error, waitfor; 4618 struct buf *bp; 4619 4620 /* 4621 * This work is done in two passes. The first pass grabs most 4622 * of the buffers and begins asynchronously writing them. The 4623 * only way to wait for these asynchronous writes is to sleep 4624 * on the filesystem vnode which may stay busy for a long time 4625 * if the filesystem is active. So, instead, we make a second 4626 * pass over the dependencies blocking on each write. In the 4627 * usual case we will be blocking against a write that we 4628 * initiated, so when it is done the dependency will have been 4629 * resolved. Thus the second pass is expected to end quickly. 4630 * We give a brief window at the top of the loop to allow 4631 * any pending I/O to complete. 4632 */ 4633 for (waitfor = MNT_NOWAIT; ; ) { 4634 FREE_LOCK(&lk); 4635 ACQUIRE_LOCK(&lk); 4636 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4637 return (0); 4638 TAILQ_FOREACH(adp, &inodedep->id_inoupdt, ad_next) { 4639 if (adp->ad_state & DEPCOMPLETE) 4640 continue; 4641 bp = adp->ad_buf; 4642 if (getdirtybuf(&bp, waitfor) == 0) { 4643 if (waitfor == MNT_NOWAIT) 4644 continue; 4645 break; 4646 } 4647 FREE_LOCK(&lk); 4648 if (waitfor == MNT_NOWAIT) { 4649 bawrite(bp); 4650 } else if ((error = BUF_WRITE(bp)) != 0) { 4651 ACQUIRE_LOCK(&lk); 4652 return (error); 4653 } 4654 ACQUIRE_LOCK(&lk); 4655 break; 4656 } 4657 if (adp != NULL) 4658 continue; 4659 TAILQ_FOREACH(adp, &inodedep->id_newinoupdt, ad_next) { 4660 if (adp->ad_state & DEPCOMPLETE) 4661 continue; 4662 bp = adp->ad_buf; 4663 if (getdirtybuf(&bp, waitfor) == 0) { 4664 if (waitfor == MNT_NOWAIT) 4665 continue; 4666 break; 4667 } 4668 FREE_LOCK(&lk); 4669 if (waitfor == MNT_NOWAIT) { 4670 bawrite(bp); 4671 } else if ((error = BUF_WRITE(bp)) != 0) { 4672 ACQUIRE_LOCK(&lk); 4673 return (error); 4674 } 4675 ACQUIRE_LOCK(&lk); 4676 break; 4677 } 4678 if (adp != NULL) 4679 continue; 4680 /* 4681 * If pass2, we are done, otherwise do pass 2. 4682 */ 4683 if (waitfor == MNT_WAIT) 4684 break; 4685 waitfor = MNT_WAIT; 4686 } 4687 /* 4688 * Try freeing inodedep in case all dependencies have been removed. 4689 */ 4690 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 4691 (void) free_inodedep(inodedep); 4692 return (0); 4693 } 4694 4695 /* 4696 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 4697 * Called with splbio blocked. 4698 */ 4699 static int 4700 flush_pagedep_deps(pvp, mp, diraddhdp) 4701 struct vnode *pvp; 4702 struct mount *mp; 4703 struct diraddhd *diraddhdp; 4704 { 4705 struct thread *td = curthread; 4706 struct inodedep *inodedep; 4707 struct ufsmount *ump; 4708 struct diradd *dap; 4709 struct vnode *vp; 4710 int gotit, error = 0; 4711 struct buf *bp; 4712 ino_t inum; 4713 4714 ump = VFSTOUFS(mp); 4715 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 4716 /* 4717 * Flush ourselves if this directory entry 4718 * has a MKDIR_PARENT dependency. 4719 */ 4720 if (dap->da_state & MKDIR_PARENT) { 4721 FREE_LOCK(&lk); 4722 if ((error = UFS_UPDATE(pvp, 1)) != 0) 4723 break; 4724 ACQUIRE_LOCK(&lk); 4725 /* 4726 * If that cleared dependencies, go on to next. 4727 */ 4728 if (dap != LIST_FIRST(diraddhdp)) 4729 continue; 4730 if (dap->da_state & MKDIR_PARENT) { 4731 FREE_LOCK(&lk); 4732 panic("flush_pagedep_deps: MKDIR_PARENT"); 4733 } 4734 } 4735 /* 4736 * A newly allocated directory must have its "." and 4737 * ".." entries written out before its name can be 4738 * committed in its parent. We do not want or need 4739 * the full semantics of a synchronous VOP_FSYNC as 4740 * that may end up here again, once for each directory 4741 * level in the filesystem. Instead, we push the blocks 4742 * and wait for them to clear. We have to fsync twice 4743 * because the first call may choose to defer blocks 4744 * that still have dependencies, but deferral will 4745 * happen at most once. 4746 */ 4747 inum = dap->da_newinum; 4748 if (dap->da_state & MKDIR_BODY) { 4749 FREE_LOCK(&lk); 4750 if ((error = VFS_VGET(mp, inum, LK_EXCLUSIVE, &vp))) 4751 break; 4752 if ((error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td)) || 4753 (error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) { 4754 vput(vp); 4755 break; 4756 } 4757 drain_output(vp, 0); 4758 vput(vp); 4759 ACQUIRE_LOCK(&lk); 4760 /* 4761 * If that cleared dependencies, go on to next. 4762 */ 4763 if (dap != LIST_FIRST(diraddhdp)) 4764 continue; 4765 if (dap->da_state & MKDIR_BODY) { 4766 FREE_LOCK(&lk); 4767 panic("flush_pagedep_deps: MKDIR_BODY"); 4768 } 4769 } 4770 /* 4771 * Flush the inode on which the directory entry depends. 4772 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 4773 * the only remaining dependency is that the updated inode 4774 * count must get pushed to disk. The inode has already 4775 * been pushed into its inode buffer (via VOP_UPDATE) at 4776 * the time of the reference count change. So we need only 4777 * locate that buffer, ensure that there will be no rollback 4778 * caused by a bitmap dependency, then write the inode buffer. 4779 */ 4780 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 4781 FREE_LOCK(&lk); 4782 panic("flush_pagedep_deps: lost inode"); 4783 } 4784 /* 4785 * If the inode still has bitmap dependencies, 4786 * push them to disk. 4787 */ 4788 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4789 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4790 FREE_LOCK(&lk); 4791 if (gotit && 4792 (error = BUF_WRITE(inodedep->id_buf)) != 0) 4793 break; 4794 ACQUIRE_LOCK(&lk); 4795 if (dap != LIST_FIRST(diraddhdp)) 4796 continue; 4797 } 4798 /* 4799 * If the inode is still sitting in a buffer waiting 4800 * to be written, push it to disk. 4801 */ 4802 FREE_LOCK(&lk); 4803 if ((error = bread(ump->um_devvp, 4804 fsbtodb(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 4805 (int)ump->um_fs->fs_bsize, NOCRED, &bp)) != 0) { 4806 brelse(bp); 4807 break; 4808 } 4809 if ((error = BUF_WRITE(bp)) != 0) 4810 break; 4811 ACQUIRE_LOCK(&lk); 4812 /* 4813 * If we have failed to get rid of all the dependencies 4814 * then something is seriously wrong. 4815 */ 4816 if (dap == LIST_FIRST(diraddhdp)) { 4817 FREE_LOCK(&lk); 4818 panic("flush_pagedep_deps: flush failed"); 4819 } 4820 } 4821 if (error) 4822 ACQUIRE_LOCK(&lk); 4823 return (error); 4824 } 4825 4826 /* 4827 * A large burst of file addition or deletion activity can drive the 4828 * memory load excessively high. First attempt to slow things down 4829 * using the techniques below. If that fails, this routine requests 4830 * the offending operations to fall back to running synchronously 4831 * until the memory load returns to a reasonable level. 4832 */ 4833 int 4834 softdep_slowdown(vp) 4835 struct vnode *vp; 4836 { 4837 int max_softdeps_hard; 4838 4839 max_softdeps_hard = max_softdeps * 11 / 10; 4840 if (num_dirrem < max_softdeps_hard / 2 && 4841 num_inodedep < max_softdeps_hard) 4842 return (0); 4843 stat_sync_limit_hit += 1; 4844 return (1); 4845 } 4846 4847 /* 4848 * Called by the allocation routines when they are about to fail 4849 * in the hope that we can free up some disk space. 4850 * 4851 * First check to see if the work list has anything on it. If it has, 4852 * clean up entries until we successfully free some space. Because this 4853 * process holds inodes locked, we cannot handle any remove requests 4854 * that might block on a locked inode as that could lead to deadlock. 4855 * If the worklist yields no free space, encourage the syncer daemon 4856 * to help us. In no event will we try for longer than tickdelay seconds. 4857 */ 4858 int 4859 softdep_request_cleanup(fs, vp) 4860 struct fs *fs; 4861 struct vnode *vp; 4862 { 4863 long starttime, needed; 4864 4865 needed = fs->fs_cstotal.cs_nbfree + fs->fs_contigsumsize; 4866 starttime = time_second + tickdelay; 4867 if (UFS_UPDATE(vp, 1) != 0) 4868 return (0); 4869 while (fs->fs_pendingblocks > 0 && fs->fs_cstotal.cs_nbfree <= needed) { 4870 if (time_second > starttime) 4871 return (0); 4872 if (num_on_worklist > 0 && 4873 process_worklist_item(NULL, LK_NOWAIT) != -1) { 4874 stat_worklist_push += 1; 4875 continue; 4876 } 4877 request_cleanup(FLUSH_REMOVE_WAIT, 0); 4878 } 4879 return (1); 4880 } 4881 4882 /* 4883 * If memory utilization has gotten too high, deliberately slow things 4884 * down and speed up the I/O processing. 4885 */ 4886 static int 4887 request_cleanup(resource, islocked) 4888 int resource; 4889 int islocked; 4890 { 4891 struct thread *td = curthread; 4892 4893 /* 4894 * We never hold up the filesystem syncer process. 4895 */ 4896 if (td == filesys_syncer) 4897 return (0); 4898 /* 4899 * First check to see if the work list has gotten backlogged. 4900 * If it has, co-opt this process to help clean up two entries. 4901 * Because this process may hold inodes locked, we cannot 4902 * handle any remove requests that might block on a locked 4903 * inode as that could lead to deadlock. 4904 */ 4905 if (num_on_worklist > max_softdeps / 10) { 4906 if (islocked) 4907 FREE_LOCK(&lk); 4908 process_worklist_item(NULL, LK_NOWAIT); 4909 process_worklist_item(NULL, LK_NOWAIT); 4910 stat_worklist_push += 2; 4911 if (islocked) 4912 ACQUIRE_LOCK(&lk); 4913 return(1); 4914 } 4915 /* 4916 * Next, we attempt to speed up the syncer process. If that 4917 * is successful, then we allow the process to continue. 4918 */ 4919 if (speedup_syncer() && resource != FLUSH_REMOVE_WAIT) 4920 return(0); 4921 /* 4922 * If we are resource constrained on inode dependencies, try 4923 * flushing some dirty inodes. Otherwise, we are constrained 4924 * by file deletions, so try accelerating flushes of directories 4925 * with removal dependencies. We would like to do the cleanup 4926 * here, but we probably hold an inode locked at this point and 4927 * that might deadlock against one that we try to clean. So, 4928 * the best that we can do is request the syncer daemon to do 4929 * the cleanup for us. 4930 */ 4931 switch (resource) { 4932 4933 case FLUSH_INODES: 4934 stat_ino_limit_push += 1; 4935 req_clear_inodedeps += 1; 4936 stat_countp = &stat_ino_limit_hit; 4937 break; 4938 4939 case FLUSH_REMOVE: 4940 case FLUSH_REMOVE_WAIT: 4941 stat_blk_limit_push += 1; 4942 req_clear_remove += 1; 4943 stat_countp = &stat_blk_limit_hit; 4944 break; 4945 4946 default: 4947 if (islocked) 4948 FREE_LOCK(&lk); 4949 panic("request_cleanup: unknown type"); 4950 } 4951 /* 4952 * Hopefully the syncer daemon will catch up and awaken us. 4953 * We wait at most tickdelay before proceeding in any case. 4954 */ 4955 if (islocked == 0) 4956 ACQUIRE_LOCK(&lk); 4957 proc_waiting += 1; 4958 if (handle.callout == NULL) 4959 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 4960 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, PPAUSE, 4961 "softupdate", 0); 4962 proc_waiting -= 1; 4963 if (islocked == 0) 4964 FREE_LOCK(&lk); 4965 return (1); 4966 } 4967 4968 /* 4969 * Awaken processes pausing in request_cleanup and clear proc_waiting 4970 * to indicate that there is no longer a timer running. 4971 */ 4972 void 4973 pause_timer(arg) 4974 void *arg; 4975 { 4976 4977 *stat_countp += 1; 4978 wakeup_one(&proc_waiting); 4979 if (proc_waiting > 0) 4980 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 4981 else 4982 handle.callout = NULL; 4983 } 4984 4985 /* 4986 * Flush out a directory with at least one removal dependency in an effort to 4987 * reduce the number of dirrem, freefile, and freeblks dependency structures. 4988 */ 4989 static void 4990 clear_remove(td) 4991 struct thread *td; 4992 { 4993 struct pagedep_hashhead *pagedephd; 4994 struct pagedep *pagedep; 4995 static int next = 0; 4996 struct mount *mp; 4997 struct vnode *vp; 4998 int error, cnt; 4999 ino_t ino; 5000 5001 ACQUIRE_LOCK(&lk); 5002 for (cnt = 0; cnt < pagedep_hash; cnt++) { 5003 pagedephd = &pagedep_hashtbl[next++]; 5004 if (next >= pagedep_hash) 5005 next = 0; 5006 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 5007 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 5008 continue; 5009 mp = pagedep->pd_mnt; 5010 ino = pagedep->pd_ino; 5011 FREE_LOCK(&lk); 5012 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5013 continue; 5014 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp))) { 5015 softdep_error("clear_remove: vget", error); 5016 vn_finished_write(mp); 5017 return; 5018 } 5019 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5020 softdep_error("clear_remove: fsync", error); 5021 drain_output(vp, 0); 5022 vput(vp); 5023 vn_finished_write(mp); 5024 return; 5025 } 5026 } 5027 FREE_LOCK(&lk); 5028 } 5029 5030 /* 5031 * Clear out a block of dirty inodes in an effort to reduce 5032 * the number of inodedep dependency structures. 5033 */ 5034 static void 5035 clear_inodedeps(td) 5036 struct thread *td; 5037 { 5038 struct inodedep_hashhead *inodedephd; 5039 struct inodedep *inodedep; 5040 static int next = 0; 5041 struct mount *mp; 5042 struct vnode *vp; 5043 struct fs *fs; 5044 int error, cnt; 5045 ino_t firstino, lastino, ino; 5046 5047 ACQUIRE_LOCK(&lk); 5048 /* 5049 * Pick a random inode dependency to be cleared. 5050 * We will then gather up all the inodes in its block 5051 * that have dependencies and flush them out. 5052 */ 5053 for (cnt = 0; cnt < inodedep_hash; cnt++) { 5054 inodedephd = &inodedep_hashtbl[next++]; 5055 if (next >= inodedep_hash) 5056 next = 0; 5057 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 5058 break; 5059 } 5060 if (inodedep == NULL) 5061 return; 5062 /* 5063 * Ugly code to find mount point given pointer to superblock. 5064 */ 5065 fs = inodedep->id_fs; 5066 TAILQ_FOREACH(mp, &mountlist, mnt_list) 5067 if ((mp->mnt_flag & MNT_SOFTDEP) && fs == VFSTOUFS(mp)->um_fs) 5068 break; 5069 /* 5070 * Find the last inode in the block with dependencies. 5071 */ 5072 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 5073 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 5074 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 5075 break; 5076 /* 5077 * Asynchronously push all but the last inode with dependencies. 5078 * Synchronously push the last inode with dependencies to ensure 5079 * that the inode block gets written to free up the inodedeps. 5080 */ 5081 for (ino = firstino; ino <= lastino; ino++) { 5082 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5083 continue; 5084 FREE_LOCK(&lk); 5085 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5086 continue; 5087 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp)) != 0) { 5088 softdep_error("clear_inodedeps: vget", error); 5089 vn_finished_write(mp); 5090 return; 5091 } 5092 if (ino == lastino) { 5093 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_WAIT, td))) 5094 softdep_error("clear_inodedeps: fsync1", error); 5095 } else { 5096 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5097 softdep_error("clear_inodedeps: fsync2", error); 5098 drain_output(vp, 0); 5099 } 5100 vput(vp); 5101 vn_finished_write(mp); 5102 ACQUIRE_LOCK(&lk); 5103 } 5104 FREE_LOCK(&lk); 5105 } 5106 5107 /* 5108 * Function to determine if the buffer has outstanding dependencies 5109 * that will cause a roll-back if the buffer is written. If wantcount 5110 * is set, return number of dependencies, otherwise just yes or no. 5111 */ 5112 static int 5113 softdep_count_dependencies(bp, wantcount) 5114 struct buf *bp; 5115 int wantcount; 5116 { 5117 struct worklist *wk; 5118 struct inodedep *inodedep; 5119 struct indirdep *indirdep; 5120 struct allocindir *aip; 5121 struct pagedep *pagedep; 5122 struct diradd *dap; 5123 int i, retval; 5124 5125 retval = 0; 5126 ACQUIRE_LOCK(&lk); 5127 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5128 switch (wk->wk_type) { 5129 5130 case D_INODEDEP: 5131 inodedep = WK_INODEDEP(wk); 5132 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5133 /* bitmap allocation dependency */ 5134 retval += 1; 5135 if (!wantcount) 5136 goto out; 5137 } 5138 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5139 /* direct block pointer dependency */ 5140 retval += 1; 5141 if (!wantcount) 5142 goto out; 5143 } 5144 continue; 5145 5146 case D_INDIRDEP: 5147 indirdep = WK_INDIRDEP(wk); 5148 5149 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5150 /* indirect block pointer dependency */ 5151 retval += 1; 5152 if (!wantcount) 5153 goto out; 5154 } 5155 continue; 5156 5157 case D_PAGEDEP: 5158 pagedep = WK_PAGEDEP(wk); 5159 for (i = 0; i < DAHASHSZ; i++) { 5160 5161 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5162 /* directory entry dependency */ 5163 retval += 1; 5164 if (!wantcount) 5165 goto out; 5166 } 5167 } 5168 continue; 5169 5170 case D_BMSAFEMAP: 5171 case D_ALLOCDIRECT: 5172 case D_ALLOCINDIR: 5173 case D_MKDIR: 5174 /* never a dependency on these blocks */ 5175 continue; 5176 5177 default: 5178 FREE_LOCK(&lk); 5179 panic("softdep_check_for_rollback: Unexpected type %s", 5180 TYPENAME(wk->wk_type)); 5181 /* NOTREACHED */ 5182 } 5183 } 5184 out: 5185 FREE_LOCK(&lk); 5186 return retval; 5187 } 5188 5189 /* 5190 * Acquire exclusive access to a buffer. 5191 * Must be called with splbio blocked. 5192 * Return 1 if buffer was acquired. 5193 */ 5194 static int 5195 getdirtybuf(bpp, waitfor) 5196 struct buf **bpp; 5197 int waitfor; 5198 { 5199 struct buf *bp; 5200 int error; 5201 5202 for (;;) { 5203 if ((bp = *bpp) == NULL) 5204 return (0); 5205 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 5206 if ((bp->b_xflags & BX_BKGRDINPROG) == 0) 5207 break; 5208 BUF_UNLOCK(bp); 5209 if (waitfor != MNT_WAIT) 5210 return (0); 5211 bp->b_xflags |= BX_BKGRDWAIT; 5212 interlocked_sleep(&lk, SLEEP, &bp->b_xflags, PRIBIO, 5213 "getbuf", 0); 5214 continue; 5215 } 5216 if (waitfor != MNT_WAIT) 5217 return (0); 5218 error = interlocked_sleep(&lk, LOCKBUF, bp, 5219 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5220 if (error != ENOLCK) { 5221 FREE_LOCK(&lk); 5222 panic("getdirtybuf: inconsistent lock"); 5223 } 5224 } 5225 if ((bp->b_flags & B_DELWRI) == 0) { 5226 BUF_UNLOCK(bp); 5227 return (0); 5228 } 5229 bremfree(bp); 5230 return (1); 5231 } 5232 5233 /* 5234 * Wait for pending output on a vnode to complete. 5235 * Must be called with vnode locked. 5236 */ 5237 static void 5238 drain_output(vp, islocked) 5239 struct vnode *vp; 5240 int islocked; 5241 { 5242 5243 if (!islocked) 5244 ACQUIRE_LOCK(&lk); 5245 while (vp->v_numoutput) { 5246 vp->v_flag |= VBWAIT; 5247 interlocked_sleep(&lk, SLEEP, (caddr_t)&vp->v_numoutput, 5248 PRIBIO + 1, "drainvp", 0); 5249 } 5250 if (!islocked) 5251 FREE_LOCK(&lk); 5252 } 5253 5254 /* 5255 * Called whenever a buffer that is being invalidated or reallocated 5256 * contains dependencies. This should only happen if an I/O error has 5257 * occurred. The routine is called with the buffer locked. 5258 */ 5259 static void 5260 softdep_deallocate_dependencies(bp) 5261 struct buf *bp; 5262 { 5263 5264 if ((bp->b_ioflags & BIO_ERROR) == 0) 5265 panic("softdep_deallocate_dependencies: dangling deps"); 5266 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 5267 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5268 } 5269 5270 /* 5271 * Function to handle asynchronous write errors in the filesystem. 5272 */ 5273 void 5274 softdep_error(func, error) 5275 char *func; 5276 int error; 5277 { 5278 5279 /* XXX should do something better! */ 5280 printf("%s: got error %d while accessing filesystem\n", func, error); 5281 } 5282