1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 * $FreeBSD$ 40 */ 41 42 /* 43 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 44 */ 45 #ifndef DIAGNOSTIC 46 #define DIAGNOSTIC 47 #endif 48 #ifndef DEBUG 49 #define DEBUG 50 #endif 51 52 #include <sys/param.h> 53 #include <sys/kernel.h> 54 #include <sys/systm.h> 55 #include <sys/bio.h> 56 #include <sys/buf.h> 57 #include <sys/malloc.h> 58 #include <sys/mount.h> 59 #include <sys/proc.h> 60 #include <sys/stat.h> 61 #include <sys/syslog.h> 62 #include <sys/vnode.h> 63 #include <sys/conf.h> 64 #include <ufs/ufs/dir.h> 65 #include <ufs/ufs/extattr.h> 66 #include <ufs/ufs/quota.h> 67 #include <ufs/ufs/inode.h> 68 #include <ufs/ufs/ufsmount.h> 69 #include <ufs/ffs/fs.h> 70 #include <ufs/ffs/softdep.h> 71 #include <ufs/ffs/ffs_extern.h> 72 #include <ufs/ufs/ufs_extern.h> 73 74 /* 75 * These definitions need to be adapted to the system to which 76 * this file is being ported. 77 */ 78 /* 79 * malloc types defined for the softdep system. 80 */ 81 static MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 82 static MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 83 static MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 84 static MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 85 static MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 86 static MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 87 static MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 88 static MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 89 static MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 90 static MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 91 static MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 92 static MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 93 static MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 94 static MALLOC_DEFINE(M_NEWDIRBLK, "newdirblk","Unclaimed new directory block"); 95 96 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 97 98 #define D_PAGEDEP 0 99 #define D_INODEDEP 1 100 #define D_NEWBLK 2 101 #define D_BMSAFEMAP 3 102 #define D_ALLOCDIRECT 4 103 #define D_INDIRDEP 5 104 #define D_ALLOCINDIR 6 105 #define D_FREEFRAG 7 106 #define D_FREEBLKS 8 107 #define D_FREEFILE 9 108 #define D_DIRADD 10 109 #define D_MKDIR 11 110 #define D_DIRREM 12 111 #define D_NEWDIRBLK 13 112 #define D_LAST D_NEWDIRBLK 113 114 /* 115 * translate from workitem type to memory type 116 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 117 */ 118 static struct malloc_type *memtype[] = { 119 M_PAGEDEP, 120 M_INODEDEP, 121 M_NEWBLK, 122 M_BMSAFEMAP, 123 M_ALLOCDIRECT, 124 M_INDIRDEP, 125 M_ALLOCINDIR, 126 M_FREEFRAG, 127 M_FREEBLKS, 128 M_FREEFILE, 129 M_DIRADD, 130 M_MKDIR, 131 M_DIRREM, 132 M_NEWDIRBLK 133 }; 134 135 #define DtoM(type) (memtype[type]) 136 137 /* 138 * Names of malloc types. 139 */ 140 #define TYPENAME(type) \ 141 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 142 /* 143 * End system adaptaion definitions. 144 */ 145 146 /* 147 * Internal function prototypes. 148 */ 149 static void softdep_error __P((char *, int)); 150 static void drain_output __P((struct vnode *, int)); 151 static int getdirtybuf __P((struct buf **, int)); 152 static void clear_remove __P((struct thread *)); 153 static void clear_inodedeps __P((struct thread *)); 154 static int flush_pagedep_deps __P((struct vnode *, struct mount *, 155 struct diraddhd *)); 156 static int flush_inodedep_deps __P((struct fs *, ino_t)); 157 static int handle_written_filepage __P((struct pagedep *, struct buf *)); 158 static void diradd_inode_written __P((struct diradd *, struct inodedep *)); 159 static int handle_written_inodeblock __P((struct inodedep *, struct buf *)); 160 static void handle_allocdirect_partdone __P((struct allocdirect *)); 161 static void handle_allocindir_partdone __P((struct allocindir *)); 162 static void initiate_write_filepage __P((struct pagedep *, struct buf *)); 163 static void handle_written_mkdir __P((struct mkdir *, int)); 164 static void initiate_write_inodeblock __P((struct inodedep *, struct buf *)); 165 static void handle_workitem_freefile __P((struct freefile *)); 166 static void handle_workitem_remove __P((struct dirrem *)); 167 static struct dirrem *newdirrem __P((struct buf *, struct inode *, 168 struct inode *, int, struct dirrem **)); 169 static void free_diradd __P((struct diradd *)); 170 static void free_allocindir __P((struct allocindir *, struct inodedep *)); 171 static void free_newdirblk __P((struct newdirblk *)); 172 static int indir_trunc __P((struct freeblks *, ufs_daddr_t, int, ufs_lbn_t, 173 long *)); 174 static void deallocate_dependencies __P((struct buf *, struct inodedep *)); 175 static void free_allocdirect __P((struct allocdirectlst *, 176 struct allocdirect *, int)); 177 static int check_inode_unwritten __P((struct inodedep *)); 178 static int free_inodedep __P((struct inodedep *)); 179 static void handle_workitem_freeblocks __P((struct freeblks *, int)); 180 static void merge_inode_lists __P((struct inodedep *)); 181 static void setup_allocindir_phase2 __P((struct buf *, struct inode *, 182 struct allocindir *)); 183 static struct allocindir *newallocindir __P((struct inode *, int, ufs_daddr_t, 184 ufs_daddr_t)); 185 static void handle_workitem_freefrag __P((struct freefrag *)); 186 static struct freefrag *newfreefrag __P((struct inode *, ufs_daddr_t, long)); 187 static void allocdirect_merge __P((struct allocdirectlst *, 188 struct allocdirect *, struct allocdirect *)); 189 static struct bmsafemap *bmsafemap_lookup __P((struct buf *)); 190 static int newblk_lookup __P((struct fs *, ufs_daddr_t, int, 191 struct newblk **)); 192 static int inodedep_lookup __P((struct fs *, ino_t, int, struct inodedep **)); 193 static int pagedep_lookup __P((struct inode *, ufs_lbn_t, int, 194 struct pagedep **)); 195 static void pause_timer __P((void *)); 196 static int request_cleanup __P((int, int)); 197 static int process_worklist_item __P((struct mount *, int)); 198 static void add_to_worklist __P((struct worklist *)); 199 200 /* 201 * Exported softdep operations. 202 */ 203 static void softdep_disk_io_initiation __P((struct buf *)); 204 static void softdep_disk_write_complete __P((struct buf *)); 205 static void softdep_deallocate_dependencies __P((struct buf *)); 206 static void softdep_move_dependencies __P((struct buf *, struct buf *)); 207 static int softdep_count_dependencies __P((struct buf *bp, int)); 208 209 /* 210 * Locking primitives. 211 * 212 * For a uniprocessor, all we need to do is protect against disk 213 * interrupts. For a multiprocessor, this lock would have to be 214 * a mutex. A single mutex is used throughout this file, though 215 * finer grain locking could be used if contention warranted it. 216 * 217 * For a multiprocessor, the sleep call would accept a lock and 218 * release it after the sleep processing was complete. In a uniprocessor 219 * implementation there is no such interlock, so we simple mark 220 * the places where it needs to be done with the `interlocked' form 221 * of the lock calls. Since the uniprocessor sleep already interlocks 222 * the spl, there is nothing that really needs to be done. 223 */ 224 #ifndef /* NOT */ DEBUG 225 static struct lockit { 226 int lkt_spl; 227 } lk = { 0 }; 228 #define ACQUIRE_LOCK(lk) (lk)->lkt_spl = splbio() 229 #define FREE_LOCK(lk) splx((lk)->lkt_spl) 230 231 #else /* DEBUG */ 232 #define NOHOLDER ((struct thread *)-1) 233 #define SPECIAL_FLAG ((struct thread *)-2) 234 static struct lockit { 235 int lkt_spl; 236 struct thread *lkt_held; 237 } lk = { 0, NOHOLDER }; 238 static int lockcnt; 239 240 static void acquire_lock __P((struct lockit *)); 241 static void free_lock __P((struct lockit *)); 242 void softdep_panic __P((char *)); 243 244 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 245 #define FREE_LOCK(lk) free_lock(lk) 246 247 static void 248 acquire_lock(lk) 249 struct lockit *lk; 250 { 251 struct thread *holder; 252 253 if (lk->lkt_held != NOHOLDER) { 254 holder = lk->lkt_held; 255 FREE_LOCK(lk); 256 if (holder == curthread) 257 panic("softdep_lock: locking against myself"); 258 else 259 panic("softdep_lock: lock held by %p", holder); 260 } 261 lk->lkt_spl = splbio(); 262 lk->lkt_held = curthread; 263 lockcnt++; 264 } 265 266 static void 267 free_lock(lk) 268 struct lockit *lk; 269 { 270 271 if (lk->lkt_held == NOHOLDER) 272 panic("softdep_unlock: lock not held"); 273 lk->lkt_held = NOHOLDER; 274 splx(lk->lkt_spl); 275 } 276 277 /* 278 * Function to release soft updates lock and panic. 279 */ 280 void 281 softdep_panic(msg) 282 char *msg; 283 { 284 285 if (lk.lkt_held != NOHOLDER) 286 FREE_LOCK(&lk); 287 panic(msg); 288 } 289 #endif /* DEBUG */ 290 291 static int interlocked_sleep __P((struct lockit *, int, void *, int, 292 const char *, int)); 293 294 /* 295 * When going to sleep, we must save our SPL so that it does 296 * not get lost if some other process uses the lock while we 297 * are sleeping. We restore it after we have slept. This routine 298 * wraps the interlocking with functions that sleep. The list 299 * below enumerates the available set of operations. 300 */ 301 #define UNKNOWN 0 302 #define SLEEP 1 303 #define LOCKBUF 2 304 305 static int 306 interlocked_sleep(lk, op, ident, flags, wmesg, timo) 307 struct lockit *lk; 308 int op; 309 void *ident; 310 int flags; 311 const char *wmesg; 312 int timo; 313 { 314 struct thread *holder; 315 int s, retval; 316 317 s = lk->lkt_spl; 318 # ifdef DEBUG 319 if (lk->lkt_held == NOHOLDER) 320 panic("interlocked_sleep: lock not held"); 321 lk->lkt_held = NOHOLDER; 322 # endif /* DEBUG */ 323 switch (op) { 324 case SLEEP: 325 retval = tsleep(ident, flags, wmesg, timo); 326 break; 327 case LOCKBUF: 328 retval = BUF_LOCK((struct buf *)ident, flags); 329 break; 330 default: 331 panic("interlocked_sleep: unknown operation"); 332 } 333 # ifdef DEBUG 334 if (lk->lkt_held != NOHOLDER) { 335 holder = lk->lkt_held; 336 FREE_LOCK(lk); 337 if (holder == curthread) 338 panic("interlocked_sleep: locking against self"); 339 else 340 panic("interlocked_sleep: lock held by %p", holder); 341 } 342 lk->lkt_held = curthread; 343 lockcnt++; 344 # endif /* DEBUG */ 345 lk->lkt_spl = s; 346 return (retval); 347 } 348 349 /* 350 * Place holder for real semaphores. 351 */ 352 struct sema { 353 int value; 354 struct thread *holder; 355 char *name; 356 int prio; 357 int timo; 358 }; 359 static void sema_init __P((struct sema *, char *, int, int)); 360 static int sema_get __P((struct sema *, struct lockit *)); 361 static void sema_release __P((struct sema *)); 362 363 static void 364 sema_init(semap, name, prio, timo) 365 struct sema *semap; 366 char *name; 367 int prio, timo; 368 { 369 370 semap->holder = NOHOLDER; 371 semap->value = 0; 372 semap->name = name; 373 semap->prio = prio; 374 semap->timo = timo; 375 } 376 377 static int 378 sema_get(semap, interlock) 379 struct sema *semap; 380 struct lockit *interlock; 381 { 382 383 if (semap->value++ > 0) { 384 if (interlock != NULL) { 385 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 386 semap->prio, semap->name, semap->timo); 387 FREE_LOCK(interlock); 388 } else { 389 tsleep((caddr_t)semap, semap->prio, semap->name, 390 semap->timo); 391 } 392 return (0); 393 } 394 semap->holder = curthread; 395 if (interlock != NULL) 396 FREE_LOCK(interlock); 397 return (1); 398 } 399 400 static void 401 sema_release(semap) 402 struct sema *semap; 403 { 404 405 if (semap->value <= 0 || semap->holder != curthread) { 406 if (lk.lkt_held != NOHOLDER) 407 FREE_LOCK(&lk); 408 panic("sema_release: not held"); 409 } 410 if (--semap->value > 0) { 411 semap->value = 0; 412 wakeup(semap); 413 } 414 semap->holder = NOHOLDER; 415 } 416 417 /* 418 * Worklist queue management. 419 * These routines require that the lock be held. 420 */ 421 #ifndef /* NOT */ DEBUG 422 #define WORKLIST_INSERT(head, item) do { \ 423 (item)->wk_state |= ONWORKLIST; \ 424 LIST_INSERT_HEAD(head, item, wk_list); \ 425 } while (0) 426 #define WORKLIST_REMOVE(item) do { \ 427 (item)->wk_state &= ~ONWORKLIST; \ 428 LIST_REMOVE(item, wk_list); \ 429 } while (0) 430 #define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 431 432 #else /* DEBUG */ 433 static void worklist_insert __P((struct workhead *, struct worklist *)); 434 static void worklist_remove __P((struct worklist *)); 435 static void workitem_free __P((struct worklist *, int)); 436 437 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 438 #define WORKLIST_REMOVE(item) worklist_remove(item) 439 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 440 441 static void 442 worklist_insert(head, item) 443 struct workhead *head; 444 struct worklist *item; 445 { 446 447 if (lk.lkt_held == NOHOLDER) 448 panic("worklist_insert: lock not held"); 449 if (item->wk_state & ONWORKLIST) { 450 FREE_LOCK(&lk); 451 panic("worklist_insert: already on list"); 452 } 453 item->wk_state |= ONWORKLIST; 454 LIST_INSERT_HEAD(head, item, wk_list); 455 } 456 457 static void 458 worklist_remove(item) 459 struct worklist *item; 460 { 461 462 if (lk.lkt_held == NOHOLDER) 463 panic("worklist_remove: lock not held"); 464 if ((item->wk_state & ONWORKLIST) == 0) { 465 FREE_LOCK(&lk); 466 panic("worklist_remove: not on list"); 467 } 468 item->wk_state &= ~ONWORKLIST; 469 LIST_REMOVE(item, wk_list); 470 } 471 472 static void 473 workitem_free(item, type) 474 struct worklist *item; 475 int type; 476 { 477 478 if (item->wk_state & ONWORKLIST) { 479 if (lk.lkt_held != NOHOLDER) 480 FREE_LOCK(&lk); 481 panic("workitem_free: still on list"); 482 } 483 if (item->wk_type != type) { 484 if (lk.lkt_held != NOHOLDER) 485 FREE_LOCK(&lk); 486 panic("workitem_free: type mismatch"); 487 } 488 FREE(item, DtoM(type)); 489 } 490 #endif /* DEBUG */ 491 492 /* 493 * Workitem queue management 494 */ 495 static struct workhead softdep_workitem_pending; 496 static int num_on_worklist; /* number of worklist items to be processed */ 497 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 498 static int softdep_worklist_req; /* serialized waiters */ 499 static int max_softdeps; /* maximum number of structs before slowdown */ 500 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 501 static int proc_waiting; /* tracks whether we have a timeout posted */ 502 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 503 static struct callout_handle handle; /* handle on posted proc_waiting timeout */ 504 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 505 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 506 #define FLUSH_INODES 1 507 static int req_clear_remove; /* syncer process flush some freeblks */ 508 #define FLUSH_REMOVE 2 509 #define FLUSH_REMOVE_WAIT 3 510 /* 511 * runtime statistics 512 */ 513 static int stat_worklist_push; /* number of worklist cleanups */ 514 static int stat_blk_limit_push; /* number of times block limit neared */ 515 static int stat_ino_limit_push; /* number of times inode limit neared */ 516 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 517 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 518 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 519 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 520 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 521 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 522 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 523 #ifdef DEBUG 524 #include <vm/vm.h> 525 #include <sys/sysctl.h> 526 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 527 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 528 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 529 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 530 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 531 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 532 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 533 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 534 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 535 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 536 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 537 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 538 #endif /* DEBUG */ 539 540 /* 541 * Add an item to the end of the work queue. 542 * This routine requires that the lock be held. 543 * This is the only routine that adds items to the list. 544 * The following routine is the only one that removes items 545 * and does so in order from first to last. 546 */ 547 static void 548 add_to_worklist(wk) 549 struct worklist *wk; 550 { 551 static struct worklist *worklist_tail; 552 553 if (wk->wk_state & ONWORKLIST) { 554 if (lk.lkt_held != NOHOLDER) 555 FREE_LOCK(&lk); 556 panic("add_to_worklist: already on list"); 557 } 558 wk->wk_state |= ONWORKLIST; 559 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 560 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 561 else 562 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 563 worklist_tail = wk; 564 num_on_worklist += 1; 565 } 566 567 /* 568 * Process that runs once per second to handle items in the background queue. 569 * 570 * Note that we ensure that everything is done in the order in which they 571 * appear in the queue. The code below depends on this property to ensure 572 * that blocks of a file are freed before the inode itself is freed. This 573 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 574 * until all the old ones have been purged from the dependency lists. 575 */ 576 int 577 softdep_process_worklist(matchmnt) 578 struct mount *matchmnt; 579 { 580 struct thread *td = curthread; 581 int matchcnt, loopcount; 582 long starttime; 583 584 /* 585 * Record the process identifier of our caller so that we can give 586 * this process preferential treatment in request_cleanup below. 587 */ 588 filesys_syncer = td; 589 matchcnt = 0; 590 591 /* 592 * There is no danger of having multiple processes run this 593 * code, but we have to single-thread it when softdep_flushfiles() 594 * is in operation to get an accurate count of the number of items 595 * related to its mount point that are in the list. 596 */ 597 if (matchmnt == NULL) { 598 if (softdep_worklist_busy < 0) 599 return(-1); 600 softdep_worklist_busy += 1; 601 } 602 603 /* 604 * If requested, try removing inode or removal dependencies. 605 */ 606 if (req_clear_inodedeps) { 607 clear_inodedeps(td); 608 req_clear_inodedeps -= 1; 609 wakeup_one(&proc_waiting); 610 } 611 if (req_clear_remove) { 612 clear_remove(td); 613 req_clear_remove -= 1; 614 wakeup_one(&proc_waiting); 615 } 616 loopcount = 1; 617 starttime = time_second; 618 while (num_on_worklist > 0) { 619 matchcnt += process_worklist_item(matchmnt, 0); 620 621 /* 622 * If a umount operation wants to run the worklist 623 * accurately, abort. 624 */ 625 if (softdep_worklist_req && matchmnt == NULL) { 626 matchcnt = -1; 627 break; 628 } 629 630 /* 631 * If requested, try removing inode or removal dependencies. 632 */ 633 if (req_clear_inodedeps) { 634 clear_inodedeps(td); 635 req_clear_inodedeps -= 1; 636 wakeup_one(&proc_waiting); 637 } 638 if (req_clear_remove) { 639 clear_remove(td); 640 req_clear_remove -= 1; 641 wakeup_one(&proc_waiting); 642 } 643 /* 644 * We do not generally want to stop for buffer space, but if 645 * we are really being a buffer hog, we will stop and wait. 646 */ 647 if (loopcount++ % 128 == 0) 648 bwillwrite(); 649 /* 650 * Never allow processing to run for more than one 651 * second. Otherwise the other syncer tasks may get 652 * excessively backlogged. 653 */ 654 if (starttime != time_second && matchmnt == NULL) { 655 matchcnt = -1; 656 break; 657 } 658 } 659 if (matchmnt == NULL) { 660 softdep_worklist_busy -= 1; 661 if (softdep_worklist_req && softdep_worklist_busy == 0) 662 wakeup(&softdep_worklist_req); 663 } 664 return (matchcnt); 665 } 666 667 /* 668 * Process one item on the worklist. 669 */ 670 static int 671 process_worklist_item(matchmnt, flags) 672 struct mount *matchmnt; 673 int flags; 674 { 675 struct worklist *wk; 676 struct dirrem *dirrem; 677 struct mount *mp; 678 struct vnode *vp; 679 int matchcnt = 0; 680 681 ACQUIRE_LOCK(&lk); 682 /* 683 * Normally we just process each item on the worklist in order. 684 * However, if we are in a situation where we cannot lock any 685 * inodes, we have to skip over any dirrem requests whose 686 * vnodes are resident and locked. 687 */ 688 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 689 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 690 break; 691 dirrem = WK_DIRREM(wk); 692 vp = ufs_ihashlookup(VFSTOUFS(dirrem->dm_mnt)->um_dev, 693 dirrem->dm_oldinum); 694 if (vp == NULL || !VOP_ISLOCKED(vp, curthread)) 695 break; 696 } 697 if (wk == 0) { 698 FREE_LOCK(&lk); 699 return (-1); 700 } 701 WORKLIST_REMOVE(wk); 702 num_on_worklist -= 1; 703 FREE_LOCK(&lk); 704 switch (wk->wk_type) { 705 706 case D_DIRREM: 707 /* removal of a directory entry */ 708 mp = WK_DIRREM(wk)->dm_mnt; 709 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 710 panic("%s: dirrem on suspended filesystem", 711 "process_worklist_item"); 712 if (mp == matchmnt) 713 matchcnt += 1; 714 handle_workitem_remove(WK_DIRREM(wk)); 715 break; 716 717 case D_FREEBLKS: 718 /* releasing blocks and/or fragments from a file */ 719 mp = WK_FREEBLKS(wk)->fb_mnt; 720 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 721 panic("%s: freeblks on suspended filesystem", 722 "process_worklist_item"); 723 if (mp == matchmnt) 724 matchcnt += 1; 725 handle_workitem_freeblocks(WK_FREEBLKS(wk), flags & LK_NOWAIT); 726 break; 727 728 case D_FREEFRAG: 729 /* releasing a fragment when replaced as a file grows */ 730 mp = WK_FREEFRAG(wk)->ff_mnt; 731 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 732 panic("%s: freefrag on suspended filesystem", 733 "process_worklist_item"); 734 if (mp == matchmnt) 735 matchcnt += 1; 736 handle_workitem_freefrag(WK_FREEFRAG(wk)); 737 break; 738 739 case D_FREEFILE: 740 /* releasing an inode when its link count drops to 0 */ 741 mp = WK_FREEFILE(wk)->fx_mnt; 742 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 743 panic("%s: freefile on suspended filesystem", 744 "process_worklist_item"); 745 if (mp == matchmnt) 746 matchcnt += 1; 747 handle_workitem_freefile(WK_FREEFILE(wk)); 748 break; 749 750 default: 751 panic("%s_process_worklist: Unknown type %s", 752 "softdep", TYPENAME(wk->wk_type)); 753 /* NOTREACHED */ 754 } 755 return (matchcnt); 756 } 757 758 /* 759 * Move dependencies from one buffer to another. 760 */ 761 static void 762 softdep_move_dependencies(oldbp, newbp) 763 struct buf *oldbp; 764 struct buf *newbp; 765 { 766 struct worklist *wk, *wktail; 767 768 if (LIST_FIRST(&newbp->b_dep) != NULL) 769 panic("softdep_move_dependencies: need merge code"); 770 wktail = 0; 771 ACQUIRE_LOCK(&lk); 772 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 773 LIST_REMOVE(wk, wk_list); 774 if (wktail == 0) 775 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 776 else 777 LIST_INSERT_AFTER(wktail, wk, wk_list); 778 wktail = wk; 779 } 780 FREE_LOCK(&lk); 781 } 782 783 /* 784 * Purge the work list of all items associated with a particular mount point. 785 */ 786 int 787 softdep_flushworklist(oldmnt, countp, td) 788 struct mount *oldmnt; 789 int *countp; 790 struct thread *td; 791 { 792 struct vnode *devvp; 793 int count, error = 0; 794 795 /* 796 * Await our turn to clear out the queue, then serialize access. 797 */ 798 while (softdep_worklist_busy) { 799 softdep_worklist_req += 1; 800 tsleep(&softdep_worklist_req, PRIBIO, "softflush", 0); 801 softdep_worklist_req -= 1; 802 } 803 softdep_worklist_busy = -1; 804 /* 805 * Alternately flush the block device associated with the mount 806 * point and process any dependencies that the flushing 807 * creates. We continue until no more worklist dependencies 808 * are found. 809 */ 810 *countp = 0; 811 devvp = VFSTOUFS(oldmnt)->um_devvp; 812 while ((count = softdep_process_worklist(oldmnt)) > 0) { 813 *countp += count; 814 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td); 815 error = VOP_FSYNC(devvp, td->td_proc->p_ucred, MNT_WAIT, td); 816 VOP_UNLOCK(devvp, 0, td); 817 if (error) 818 break; 819 } 820 softdep_worklist_busy = 0; 821 if (softdep_worklist_req) 822 wakeup(&softdep_worklist_req); 823 return (error); 824 } 825 826 /* 827 * Flush all vnodes and worklist items associated with a specified mount point. 828 */ 829 int 830 softdep_flushfiles(oldmnt, flags, td) 831 struct mount *oldmnt; 832 int flags; 833 struct thread *td; 834 { 835 int error, count, loopcnt; 836 837 /* 838 * Alternately flush the vnodes associated with the mount 839 * point and process any dependencies that the flushing 840 * creates. In theory, this loop can happen at most twice, 841 * but we give it a few extra just to be sure. 842 */ 843 for (loopcnt = 10; loopcnt > 0; loopcnt--) { 844 /* 845 * Do another flush in case any vnodes were brought in 846 * as part of the cleanup operations. 847 */ 848 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) 849 break; 850 if ((error = softdep_flushworklist(oldmnt, &count, td)) != 0 || 851 count == 0) 852 break; 853 } 854 /* 855 * If we are unmounting then it is an error to fail. If we 856 * are simply trying to downgrade to read-only, then filesystem 857 * activity can keep us busy forever, so we just fail with EBUSY. 858 */ 859 if (loopcnt == 0) { 860 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 861 panic("softdep_flushfiles: looping"); 862 error = EBUSY; 863 } 864 return (error); 865 } 866 867 /* 868 * Structure hashing. 869 * 870 * There are three types of structures that can be looked up: 871 * 1) pagedep structures identified by mount point, inode number, 872 * and logical block. 873 * 2) inodedep structures identified by mount point and inode number. 874 * 3) newblk structures identified by mount point and 875 * physical block number. 876 * 877 * The "pagedep" and "inodedep" dependency structures are hashed 878 * separately from the file blocks and inodes to which they correspond. 879 * This separation helps when the in-memory copy of an inode or 880 * file block must be replaced. It also obviates the need to access 881 * an inode or file page when simply updating (or de-allocating) 882 * dependency structures. Lookup of newblk structures is needed to 883 * find newly allocated blocks when trying to associate them with 884 * their allocdirect or allocindir structure. 885 * 886 * The lookup routines optionally create and hash a new instance when 887 * an existing entry is not found. 888 */ 889 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 890 #define NODELAY 0x0002 /* cannot do background work */ 891 892 /* 893 * Structures and routines associated with pagedep caching. 894 */ 895 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 896 u_long pagedep_hash; /* size of hash table - 1 */ 897 #define PAGEDEP_HASH(mp, inum, lbn) \ 898 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 899 pagedep_hash]) 900 static struct sema pagedep_in_progress; 901 902 /* 903 * Look up a pagedep. Return 1 if found, 0 if not found or found 904 * when asked to allocate but not associated with any buffer. 905 * If not found, allocate if DEPALLOC flag is passed. 906 * Found or allocated entry is returned in pagedeppp. 907 * This routine must be called with splbio interrupts blocked. 908 */ 909 static int 910 pagedep_lookup(ip, lbn, flags, pagedeppp) 911 struct inode *ip; 912 ufs_lbn_t lbn; 913 int flags; 914 struct pagedep **pagedeppp; 915 { 916 struct pagedep *pagedep; 917 struct pagedep_hashhead *pagedephd; 918 struct mount *mp; 919 int i; 920 921 #ifdef DEBUG 922 if (lk.lkt_held == NOHOLDER) 923 panic("pagedep_lookup: lock not held"); 924 #endif 925 mp = ITOV(ip)->v_mount; 926 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 927 top: 928 LIST_FOREACH(pagedep, pagedephd, pd_hash) 929 if (ip->i_number == pagedep->pd_ino && 930 lbn == pagedep->pd_lbn && 931 mp == pagedep->pd_mnt) 932 break; 933 if (pagedep) { 934 *pagedeppp = pagedep; 935 if ((flags & DEPALLOC) != 0 && 936 (pagedep->pd_state & ONWORKLIST) == 0) 937 return (0); 938 return (1); 939 } 940 if ((flags & DEPALLOC) == 0) { 941 *pagedeppp = NULL; 942 return (0); 943 } 944 if (sema_get(&pagedep_in_progress, &lk) == 0) { 945 ACQUIRE_LOCK(&lk); 946 goto top; 947 } 948 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 949 M_SOFTDEP_FLAGS|M_ZERO); 950 pagedep->pd_list.wk_type = D_PAGEDEP; 951 pagedep->pd_mnt = mp; 952 pagedep->pd_ino = ip->i_number; 953 pagedep->pd_lbn = lbn; 954 LIST_INIT(&pagedep->pd_dirremhd); 955 LIST_INIT(&pagedep->pd_pendinghd); 956 for (i = 0; i < DAHASHSZ; i++) 957 LIST_INIT(&pagedep->pd_diraddhd[i]); 958 ACQUIRE_LOCK(&lk); 959 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 960 sema_release(&pagedep_in_progress); 961 *pagedeppp = pagedep; 962 return (0); 963 } 964 965 /* 966 * Structures and routines associated with inodedep caching. 967 */ 968 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 969 static u_long inodedep_hash; /* size of hash table - 1 */ 970 static long num_inodedep; /* number of inodedep allocated */ 971 #define INODEDEP_HASH(fs, inum) \ 972 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 973 static struct sema inodedep_in_progress; 974 975 /* 976 * Look up a inodedep. Return 1 if found, 0 if not found. 977 * If not found, allocate if DEPALLOC flag is passed. 978 * Found or allocated entry is returned in inodedeppp. 979 * This routine must be called with splbio interrupts blocked. 980 */ 981 static int 982 inodedep_lookup(fs, inum, flags, inodedeppp) 983 struct fs *fs; 984 ino_t inum; 985 int flags; 986 struct inodedep **inodedeppp; 987 { 988 struct inodedep *inodedep; 989 struct inodedep_hashhead *inodedephd; 990 int firsttry; 991 992 #ifdef DEBUG 993 if (lk.lkt_held == NOHOLDER) 994 panic("inodedep_lookup: lock not held"); 995 #endif 996 firsttry = 1; 997 inodedephd = INODEDEP_HASH(fs, inum); 998 top: 999 LIST_FOREACH(inodedep, inodedephd, id_hash) 1000 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 1001 break; 1002 if (inodedep) { 1003 *inodedeppp = inodedep; 1004 return (1); 1005 } 1006 if ((flags & DEPALLOC) == 0) { 1007 *inodedeppp = NULL; 1008 return (0); 1009 } 1010 /* 1011 * If we are over our limit, try to improve the situation. 1012 */ 1013 if (num_inodedep > max_softdeps && firsttry && (flags & NODELAY) == 0 && 1014 request_cleanup(FLUSH_INODES, 1)) { 1015 firsttry = 0; 1016 goto top; 1017 } 1018 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1019 ACQUIRE_LOCK(&lk); 1020 goto top; 1021 } 1022 num_inodedep += 1; 1023 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1024 M_INODEDEP, M_SOFTDEP_FLAGS); 1025 inodedep->id_list.wk_type = D_INODEDEP; 1026 inodedep->id_fs = fs; 1027 inodedep->id_ino = inum; 1028 inodedep->id_state = ALLCOMPLETE; 1029 inodedep->id_nlinkdelta = 0; 1030 inodedep->id_savedino = NULL; 1031 inodedep->id_savedsize = -1; 1032 inodedep->id_buf = NULL; 1033 LIST_INIT(&inodedep->id_pendinghd); 1034 LIST_INIT(&inodedep->id_inowait); 1035 LIST_INIT(&inodedep->id_bufwait); 1036 TAILQ_INIT(&inodedep->id_inoupdt); 1037 TAILQ_INIT(&inodedep->id_newinoupdt); 1038 ACQUIRE_LOCK(&lk); 1039 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1040 sema_release(&inodedep_in_progress); 1041 *inodedeppp = inodedep; 1042 return (0); 1043 } 1044 1045 /* 1046 * Structures and routines associated with newblk caching. 1047 */ 1048 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1049 u_long newblk_hash; /* size of hash table - 1 */ 1050 #define NEWBLK_HASH(fs, inum) \ 1051 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1052 static struct sema newblk_in_progress; 1053 1054 /* 1055 * Look up a newblk. Return 1 if found, 0 if not found. 1056 * If not found, allocate if DEPALLOC flag is passed. 1057 * Found or allocated entry is returned in newblkpp. 1058 */ 1059 static int 1060 newblk_lookup(fs, newblkno, flags, newblkpp) 1061 struct fs *fs; 1062 ufs_daddr_t newblkno; 1063 int flags; 1064 struct newblk **newblkpp; 1065 { 1066 struct newblk *newblk; 1067 struct newblk_hashhead *newblkhd; 1068 1069 newblkhd = NEWBLK_HASH(fs, newblkno); 1070 top: 1071 LIST_FOREACH(newblk, newblkhd, nb_hash) 1072 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1073 break; 1074 if (newblk) { 1075 *newblkpp = newblk; 1076 return (1); 1077 } 1078 if ((flags & DEPALLOC) == 0) { 1079 *newblkpp = NULL; 1080 return (0); 1081 } 1082 if (sema_get(&newblk_in_progress, 0) == 0) 1083 goto top; 1084 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1085 M_NEWBLK, M_SOFTDEP_FLAGS); 1086 newblk->nb_state = 0; 1087 newblk->nb_fs = fs; 1088 newblk->nb_newblkno = newblkno; 1089 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1090 sema_release(&newblk_in_progress); 1091 *newblkpp = newblk; 1092 return (0); 1093 } 1094 1095 /* 1096 * Executed during filesystem system initialization before 1097 * mounting any file systems. 1098 */ 1099 void 1100 softdep_initialize() 1101 { 1102 1103 LIST_INIT(&mkdirlisthd); 1104 LIST_INIT(&softdep_workitem_pending); 1105 max_softdeps = min(desiredvnodes * 8, 1106 M_INODEDEP->ks_limit / (2 * sizeof(struct inodedep))); 1107 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1108 &pagedep_hash); 1109 sema_init(&pagedep_in_progress, "pagedep", PRIBIO, 0); 1110 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1111 sema_init(&inodedep_in_progress, "inodedep", PRIBIO, 0); 1112 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1113 sema_init(&newblk_in_progress, "newblk", PRIBIO, 0); 1114 1115 /* initialise bioops hack */ 1116 bioops.io_start = softdep_disk_io_initiation; 1117 bioops.io_complete = softdep_disk_write_complete; 1118 bioops.io_deallocate = softdep_deallocate_dependencies; 1119 bioops.io_movedeps = softdep_move_dependencies; 1120 bioops.io_countdeps = softdep_count_dependencies; 1121 } 1122 1123 /* 1124 * Called at mount time to notify the dependency code that a 1125 * filesystem wishes to use it. 1126 */ 1127 int 1128 softdep_mount(devvp, mp, fs, cred) 1129 struct vnode *devvp; 1130 struct mount *mp; 1131 struct fs *fs; 1132 struct ucred *cred; 1133 { 1134 struct csum cstotal; 1135 struct cg *cgp; 1136 struct buf *bp; 1137 int error, cyl; 1138 1139 mp->mnt_flag &= ~MNT_ASYNC; 1140 mp->mnt_flag |= MNT_SOFTDEP; 1141 /* 1142 * When doing soft updates, the counters in the 1143 * superblock may have gotten out of sync, so we have 1144 * to scan the cylinder groups and recalculate them. 1145 */ 1146 if (fs->fs_clean != 0) 1147 return (0); 1148 bzero(&cstotal, sizeof cstotal); 1149 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1150 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 1151 fs->fs_cgsize, cred, &bp)) != 0) { 1152 brelse(bp); 1153 return (error); 1154 } 1155 cgp = (struct cg *)bp->b_data; 1156 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1157 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1158 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1159 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1160 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1161 brelse(bp); 1162 } 1163 #ifdef DEBUG 1164 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1165 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 1166 #endif 1167 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1168 return (0); 1169 } 1170 1171 /* 1172 * Protecting the freemaps (or bitmaps). 1173 * 1174 * To eliminate the need to execute fsck before mounting a file system 1175 * after a power failure, one must (conservatively) guarantee that the 1176 * on-disk copy of the bitmaps never indicate that a live inode or block is 1177 * free. So, when a block or inode is allocated, the bitmap should be 1178 * updated (on disk) before any new pointers. When a block or inode is 1179 * freed, the bitmap should not be updated until all pointers have been 1180 * reset. The latter dependency is handled by the delayed de-allocation 1181 * approach described below for block and inode de-allocation. The former 1182 * dependency is handled by calling the following procedure when a block or 1183 * inode is allocated. When an inode is allocated an "inodedep" is created 1184 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1185 * Each "inodedep" is also inserted into the hash indexing structure so 1186 * that any additional link additions can be made dependent on the inode 1187 * allocation. 1188 * 1189 * The ufs file system maintains a number of free block counts (e.g., per 1190 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1191 * in addition to the bitmaps. These counts are used to improve efficiency 1192 * during allocation and therefore must be consistent with the bitmaps. 1193 * There is no convenient way to guarantee post-crash consistency of these 1194 * counts with simple update ordering, for two main reasons: (1) The counts 1195 * and bitmaps for a single cylinder group block are not in the same disk 1196 * sector. If a disk write is interrupted (e.g., by power failure), one may 1197 * be written and the other not. (2) Some of the counts are located in the 1198 * superblock rather than the cylinder group block. So, we focus our soft 1199 * updates implementation on protecting the bitmaps. When mounting a 1200 * filesystem, we recompute the auxiliary counts from the bitmaps. 1201 */ 1202 1203 /* 1204 * Called just after updating the cylinder group block to allocate an inode. 1205 */ 1206 void 1207 softdep_setup_inomapdep(bp, ip, newinum) 1208 struct buf *bp; /* buffer for cylgroup block with inode map */ 1209 struct inode *ip; /* inode related to allocation */ 1210 ino_t newinum; /* new inode number being allocated */ 1211 { 1212 struct inodedep *inodedep; 1213 struct bmsafemap *bmsafemap; 1214 1215 /* 1216 * Create a dependency for the newly allocated inode. 1217 * Panic if it already exists as something is seriously wrong. 1218 * Otherwise add it to the dependency list for the buffer holding 1219 * the cylinder group map from which it was allocated. 1220 */ 1221 ACQUIRE_LOCK(&lk); 1222 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1223 FREE_LOCK(&lk); 1224 panic("softdep_setup_inomapdep: found inode"); 1225 } 1226 inodedep->id_buf = bp; 1227 inodedep->id_state &= ~DEPCOMPLETE; 1228 bmsafemap = bmsafemap_lookup(bp); 1229 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1230 FREE_LOCK(&lk); 1231 } 1232 1233 /* 1234 * Called just after updating the cylinder group block to 1235 * allocate block or fragment. 1236 */ 1237 void 1238 softdep_setup_blkmapdep(bp, fs, newblkno) 1239 struct buf *bp; /* buffer for cylgroup block with block map */ 1240 struct fs *fs; /* filesystem doing allocation */ 1241 ufs_daddr_t newblkno; /* number of newly allocated block */ 1242 { 1243 struct newblk *newblk; 1244 struct bmsafemap *bmsafemap; 1245 1246 /* 1247 * Create a dependency for the newly allocated block. 1248 * Add it to the dependency list for the buffer holding 1249 * the cylinder group map from which it was allocated. 1250 */ 1251 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1252 panic("softdep_setup_blkmapdep: found block"); 1253 ACQUIRE_LOCK(&lk); 1254 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1255 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1256 FREE_LOCK(&lk); 1257 } 1258 1259 /* 1260 * Find the bmsafemap associated with a cylinder group buffer. 1261 * If none exists, create one. The buffer must be locked when 1262 * this routine is called and this routine must be called with 1263 * splbio interrupts blocked. 1264 */ 1265 static struct bmsafemap * 1266 bmsafemap_lookup(bp) 1267 struct buf *bp; 1268 { 1269 struct bmsafemap *bmsafemap; 1270 struct worklist *wk; 1271 1272 #ifdef DEBUG 1273 if (lk.lkt_held == NOHOLDER) 1274 panic("bmsafemap_lookup: lock not held"); 1275 #endif 1276 LIST_FOREACH(wk, &bp->b_dep, wk_list) 1277 if (wk->wk_type == D_BMSAFEMAP) 1278 return (WK_BMSAFEMAP(wk)); 1279 FREE_LOCK(&lk); 1280 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1281 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1282 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1283 bmsafemap->sm_list.wk_state = 0; 1284 bmsafemap->sm_buf = bp; 1285 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1286 LIST_INIT(&bmsafemap->sm_allocindirhd); 1287 LIST_INIT(&bmsafemap->sm_inodedephd); 1288 LIST_INIT(&bmsafemap->sm_newblkhd); 1289 ACQUIRE_LOCK(&lk); 1290 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 1291 return (bmsafemap); 1292 } 1293 1294 /* 1295 * Direct block allocation dependencies. 1296 * 1297 * When a new block is allocated, the corresponding disk locations must be 1298 * initialized (with zeros or new data) before the on-disk inode points to 1299 * them. Also, the freemap from which the block was allocated must be 1300 * updated (on disk) before the inode's pointer. These two dependencies are 1301 * independent of each other and are needed for all file blocks and indirect 1302 * blocks that are pointed to directly by the inode. Just before the 1303 * "in-core" version of the inode is updated with a newly allocated block 1304 * number, a procedure (below) is called to setup allocation dependency 1305 * structures. These structures are removed when the corresponding 1306 * dependencies are satisfied or when the block allocation becomes obsolete 1307 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1308 * fragment that gets upgraded). All of these cases are handled in 1309 * procedures described later. 1310 * 1311 * When a file extension causes a fragment to be upgraded, either to a larger 1312 * fragment or to a full block, the on-disk location may change (if the 1313 * previous fragment could not simply be extended). In this case, the old 1314 * fragment must be de-allocated, but not until after the inode's pointer has 1315 * been updated. In most cases, this is handled by later procedures, which 1316 * will construct a "freefrag" structure to be added to the workitem queue 1317 * when the inode update is complete (or obsolete). The main exception to 1318 * this is when an allocation occurs while a pending allocation dependency 1319 * (for the same block pointer) remains. This case is handled in the main 1320 * allocation dependency setup procedure by immediately freeing the 1321 * unreferenced fragments. 1322 */ 1323 void 1324 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1325 struct inode *ip; /* inode to which block is being added */ 1326 ufs_lbn_t lbn; /* block pointer within inode */ 1327 ufs_daddr_t newblkno; /* disk block number being added */ 1328 ufs_daddr_t oldblkno; /* previous block number, 0 unless frag */ 1329 long newsize; /* size of new block */ 1330 long oldsize; /* size of new block */ 1331 struct buf *bp; /* bp for allocated block */ 1332 { 1333 struct allocdirect *adp, *oldadp; 1334 struct allocdirectlst *adphead; 1335 struct bmsafemap *bmsafemap; 1336 struct inodedep *inodedep; 1337 struct pagedep *pagedep; 1338 struct newblk *newblk; 1339 1340 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1341 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1342 adp->ad_list.wk_type = D_ALLOCDIRECT; 1343 adp->ad_lbn = lbn; 1344 adp->ad_newblkno = newblkno; 1345 adp->ad_oldblkno = oldblkno; 1346 adp->ad_newsize = newsize; 1347 adp->ad_oldsize = oldsize; 1348 adp->ad_state = ATTACHED; 1349 LIST_INIT(&adp->ad_newdirblk); 1350 if (newblkno == oldblkno) 1351 adp->ad_freefrag = NULL; 1352 else 1353 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1354 1355 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1356 panic("softdep_setup_allocdirect: lost block"); 1357 1358 ACQUIRE_LOCK(&lk); 1359 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1360 adp->ad_inodedep = inodedep; 1361 1362 if (newblk->nb_state == DEPCOMPLETE) { 1363 adp->ad_state |= DEPCOMPLETE; 1364 adp->ad_buf = NULL; 1365 } else { 1366 bmsafemap = newblk->nb_bmsafemap; 1367 adp->ad_buf = bmsafemap->sm_buf; 1368 LIST_REMOVE(newblk, nb_deps); 1369 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1370 } 1371 LIST_REMOVE(newblk, nb_hash); 1372 FREE(newblk, M_NEWBLK); 1373 1374 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1375 if (lbn >= NDADDR) { 1376 /* allocating an indirect block */ 1377 if (oldblkno != 0) { 1378 FREE_LOCK(&lk); 1379 panic("softdep_setup_allocdirect: non-zero indir"); 1380 } 1381 } else { 1382 /* 1383 * Allocating a direct block. 1384 * 1385 * If we are allocating a directory block, then we must 1386 * allocate an associated pagedep to track additions and 1387 * deletions. 1388 */ 1389 if ((ip->i_mode & IFMT) == IFDIR && 1390 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1391 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 1392 } 1393 /* 1394 * The list of allocdirects must be kept in sorted and ascending 1395 * order so that the rollback routines can quickly determine the 1396 * first uncommitted block (the size of the file stored on disk 1397 * ends at the end of the lowest committed fragment, or if there 1398 * are no fragments, at the end of the highest committed block). 1399 * Since files generally grow, the typical case is that the new 1400 * block is to be added at the end of the list. We speed this 1401 * special case by checking against the last allocdirect in the 1402 * list before laboriously traversing the list looking for the 1403 * insertion point. 1404 */ 1405 adphead = &inodedep->id_newinoupdt; 1406 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1407 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1408 /* insert at end of list */ 1409 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1410 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1411 allocdirect_merge(adphead, adp, oldadp); 1412 FREE_LOCK(&lk); 1413 return; 1414 } 1415 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1416 if (oldadp->ad_lbn >= lbn) 1417 break; 1418 } 1419 if (oldadp == NULL) { 1420 FREE_LOCK(&lk); 1421 panic("softdep_setup_allocdirect: lost entry"); 1422 } 1423 /* insert in middle of list */ 1424 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1425 if (oldadp->ad_lbn == lbn) 1426 allocdirect_merge(adphead, adp, oldadp); 1427 FREE_LOCK(&lk); 1428 } 1429 1430 /* 1431 * Replace an old allocdirect dependency with a newer one. 1432 * This routine must be called with splbio interrupts blocked. 1433 */ 1434 static void 1435 allocdirect_merge(adphead, newadp, oldadp) 1436 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 1437 struct allocdirect *newadp; /* allocdirect being added */ 1438 struct allocdirect *oldadp; /* existing allocdirect being checked */ 1439 { 1440 struct worklist *wk; 1441 struct freefrag *freefrag; 1442 struct newdirblk *newdirblk; 1443 1444 #ifdef DEBUG 1445 if (lk.lkt_held == NOHOLDER) 1446 panic("allocdirect_merge: lock not held"); 1447 #endif 1448 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1449 newadp->ad_oldsize != oldadp->ad_newsize || 1450 newadp->ad_lbn >= NDADDR) { 1451 FREE_LOCK(&lk); 1452 panic("allocdirect_merge: old %d != new %d || lbn %ld >= %d", 1453 newadp->ad_oldblkno, oldadp->ad_newblkno, newadp->ad_lbn, 1454 NDADDR); 1455 } 1456 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1457 newadp->ad_oldsize = oldadp->ad_oldsize; 1458 /* 1459 * If the old dependency had a fragment to free or had never 1460 * previously had a block allocated, then the new dependency 1461 * can immediately post its freefrag and adopt the old freefrag. 1462 * This action is done by swapping the freefrag dependencies. 1463 * The new dependency gains the old one's freefrag, and the 1464 * old one gets the new one and then immediately puts it on 1465 * the worklist when it is freed by free_allocdirect. It is 1466 * not possible to do this swap when the old dependency had a 1467 * non-zero size but no previous fragment to free. This condition 1468 * arises when the new block is an extension of the old block. 1469 * Here, the first part of the fragment allocated to the new 1470 * dependency is part of the block currently claimed on disk by 1471 * the old dependency, so cannot legitimately be freed until the 1472 * conditions for the new dependency are fulfilled. 1473 */ 1474 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1475 freefrag = newadp->ad_freefrag; 1476 newadp->ad_freefrag = oldadp->ad_freefrag; 1477 oldadp->ad_freefrag = freefrag; 1478 } 1479 /* 1480 * If we are tracking a new directory-block allocation, 1481 * move it from the old allocdirect to the new allocdirect. 1482 */ 1483 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 1484 newdirblk = WK_NEWDIRBLK(wk); 1485 WORKLIST_REMOVE(&newdirblk->db_list); 1486 if (LIST_FIRST(&oldadp->ad_newdirblk) != NULL) 1487 panic("allocdirect_merge: extra newdirblk"); 1488 WORKLIST_INSERT(&newadp->ad_newdirblk, &newdirblk->db_list); 1489 } 1490 free_allocdirect(adphead, oldadp, 0); 1491 } 1492 1493 /* 1494 * Allocate a new freefrag structure if needed. 1495 */ 1496 static struct freefrag * 1497 newfreefrag(ip, blkno, size) 1498 struct inode *ip; 1499 ufs_daddr_t blkno; 1500 long size; 1501 { 1502 struct freefrag *freefrag; 1503 struct fs *fs; 1504 1505 if (blkno == 0) 1506 return (NULL); 1507 fs = ip->i_fs; 1508 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1509 panic("newfreefrag: frag size"); 1510 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1511 M_FREEFRAG, M_SOFTDEP_FLAGS); 1512 freefrag->ff_list.wk_type = D_FREEFRAG; 1513 freefrag->ff_state = 0; 1514 freefrag->ff_inum = ip->i_number; 1515 freefrag->ff_mnt = ITOV(ip)->v_mount; 1516 freefrag->ff_devvp = ip->i_devvp; 1517 freefrag->ff_blkno = blkno; 1518 freefrag->ff_fragsize = size; 1519 return (freefrag); 1520 } 1521 1522 /* 1523 * This workitem de-allocates fragments that were replaced during 1524 * file block allocation. 1525 */ 1526 static void 1527 handle_workitem_freefrag(freefrag) 1528 struct freefrag *freefrag; 1529 { 1530 1531 ffs_blkfree(VFSTOUFS(freefrag->ff_mnt)->um_fs, freefrag->ff_devvp, 1532 freefrag->ff_blkno, freefrag->ff_fragsize, freefrag->ff_inum); 1533 FREE(freefrag, M_FREEFRAG); 1534 } 1535 1536 /* 1537 * Indirect block allocation dependencies. 1538 * 1539 * The same dependencies that exist for a direct block also exist when 1540 * a new block is allocated and pointed to by an entry in a block of 1541 * indirect pointers. The undo/redo states described above are also 1542 * used here. Because an indirect block contains many pointers that 1543 * may have dependencies, a second copy of the entire in-memory indirect 1544 * block is kept. The buffer cache copy is always completely up-to-date. 1545 * The second copy, which is used only as a source for disk writes, 1546 * contains only the safe pointers (i.e., those that have no remaining 1547 * update dependencies). The second copy is freed when all pointers 1548 * are safe. The cache is not allowed to replace indirect blocks with 1549 * pending update dependencies. If a buffer containing an indirect 1550 * block with dependencies is written, these routines will mark it 1551 * dirty again. It can only be successfully written once all the 1552 * dependencies are removed. The ffs_fsync routine in conjunction with 1553 * softdep_sync_metadata work together to get all the dependencies 1554 * removed so that a file can be successfully written to disk. Three 1555 * procedures are used when setting up indirect block pointer 1556 * dependencies. The division is necessary because of the organization 1557 * of the "balloc" routine and because of the distinction between file 1558 * pages and file metadata blocks. 1559 */ 1560 1561 /* 1562 * Allocate a new allocindir structure. 1563 */ 1564 static struct allocindir * 1565 newallocindir(ip, ptrno, newblkno, oldblkno) 1566 struct inode *ip; /* inode for file being extended */ 1567 int ptrno; /* offset of pointer in indirect block */ 1568 ufs_daddr_t newblkno; /* disk block number being added */ 1569 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1570 { 1571 struct allocindir *aip; 1572 1573 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1574 M_ALLOCINDIR, M_SOFTDEP_FLAGS|M_ZERO); 1575 aip->ai_list.wk_type = D_ALLOCINDIR; 1576 aip->ai_state = ATTACHED; 1577 aip->ai_offset = ptrno; 1578 aip->ai_newblkno = newblkno; 1579 aip->ai_oldblkno = oldblkno; 1580 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1581 return (aip); 1582 } 1583 1584 /* 1585 * Called just before setting an indirect block pointer 1586 * to a newly allocated file page. 1587 */ 1588 void 1589 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 1590 struct inode *ip; /* inode for file being extended */ 1591 ufs_lbn_t lbn; /* allocated block number within file */ 1592 struct buf *bp; /* buffer with indirect blk referencing page */ 1593 int ptrno; /* offset of pointer in indirect block */ 1594 ufs_daddr_t newblkno; /* disk block number being added */ 1595 ufs_daddr_t oldblkno; /* previous block number, 0 if none */ 1596 struct buf *nbp; /* buffer holding allocated page */ 1597 { 1598 struct allocindir *aip; 1599 struct pagedep *pagedep; 1600 1601 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1602 ACQUIRE_LOCK(&lk); 1603 /* 1604 * If we are allocating a directory page, then we must 1605 * allocate an associated pagedep to track additions and 1606 * deletions. 1607 */ 1608 if ((ip->i_mode & IFMT) == IFDIR && 1609 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1610 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list); 1611 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1612 FREE_LOCK(&lk); 1613 setup_allocindir_phase2(bp, ip, aip); 1614 } 1615 1616 /* 1617 * Called just before setting an indirect block pointer to a 1618 * newly allocated indirect block. 1619 */ 1620 void 1621 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 1622 struct buf *nbp; /* newly allocated indirect block */ 1623 struct inode *ip; /* inode for file being extended */ 1624 struct buf *bp; /* indirect block referencing allocated block */ 1625 int ptrno; /* offset of pointer in indirect block */ 1626 ufs_daddr_t newblkno; /* disk block number being added */ 1627 { 1628 struct allocindir *aip; 1629 1630 aip = newallocindir(ip, ptrno, newblkno, 0); 1631 ACQUIRE_LOCK(&lk); 1632 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1633 FREE_LOCK(&lk); 1634 setup_allocindir_phase2(bp, ip, aip); 1635 } 1636 1637 /* 1638 * Called to finish the allocation of the "aip" allocated 1639 * by one of the two routines above. 1640 */ 1641 static void 1642 setup_allocindir_phase2(bp, ip, aip) 1643 struct buf *bp; /* in-memory copy of the indirect block */ 1644 struct inode *ip; /* inode for file being extended */ 1645 struct allocindir *aip; /* allocindir allocated by the above routines */ 1646 { 1647 struct worklist *wk; 1648 struct indirdep *indirdep, *newindirdep; 1649 struct bmsafemap *bmsafemap; 1650 struct allocindir *oldaip; 1651 struct freefrag *freefrag; 1652 struct newblk *newblk; 1653 1654 if (bp->b_lblkno >= 0) 1655 panic("setup_allocindir_phase2: not indir blk"); 1656 for (indirdep = NULL, newindirdep = NULL; ; ) { 1657 ACQUIRE_LOCK(&lk); 1658 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1659 if (wk->wk_type != D_INDIRDEP) 1660 continue; 1661 indirdep = WK_INDIRDEP(wk); 1662 break; 1663 } 1664 if (indirdep == NULL && newindirdep) { 1665 indirdep = newindirdep; 1666 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 1667 newindirdep = NULL; 1668 } 1669 FREE_LOCK(&lk); 1670 if (indirdep) { 1671 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1672 &newblk) == 0) 1673 panic("setup_allocindir: lost block"); 1674 ACQUIRE_LOCK(&lk); 1675 if (newblk->nb_state == DEPCOMPLETE) { 1676 aip->ai_state |= DEPCOMPLETE; 1677 aip->ai_buf = NULL; 1678 } else { 1679 bmsafemap = newblk->nb_bmsafemap; 1680 aip->ai_buf = bmsafemap->sm_buf; 1681 LIST_REMOVE(newblk, nb_deps); 1682 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1683 aip, ai_deps); 1684 } 1685 LIST_REMOVE(newblk, nb_hash); 1686 FREE(newblk, M_NEWBLK); 1687 aip->ai_indirdep = indirdep; 1688 /* 1689 * Check to see if there is an existing dependency 1690 * for this block. If there is, merge the old 1691 * dependency into the new one. 1692 */ 1693 if (aip->ai_oldblkno == 0) 1694 oldaip = NULL; 1695 else 1696 1697 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1698 if (oldaip->ai_offset == aip->ai_offset) 1699 break; 1700 freefrag = NULL; 1701 if (oldaip != NULL) { 1702 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1703 FREE_LOCK(&lk); 1704 panic("setup_allocindir_phase2: blkno"); 1705 } 1706 aip->ai_oldblkno = oldaip->ai_oldblkno; 1707 freefrag = aip->ai_freefrag; 1708 aip->ai_freefrag = oldaip->ai_freefrag; 1709 oldaip->ai_freefrag = NULL; 1710 free_allocindir(oldaip, NULL); 1711 } 1712 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1713 ((ufs_daddr_t *)indirdep->ir_savebp->b_data) 1714 [aip->ai_offset] = aip->ai_oldblkno; 1715 FREE_LOCK(&lk); 1716 if (freefrag != NULL) 1717 handle_workitem_freefrag(freefrag); 1718 } 1719 if (newindirdep) { 1720 if (indirdep->ir_savebp != NULL) 1721 brelse(newindirdep->ir_savebp); 1722 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1723 } 1724 if (indirdep) 1725 break; 1726 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1727 M_INDIRDEP, M_SOFTDEP_FLAGS); 1728 newindirdep->ir_list.wk_type = D_INDIRDEP; 1729 newindirdep->ir_state = ATTACHED; 1730 LIST_INIT(&newindirdep->ir_deplisthd); 1731 LIST_INIT(&newindirdep->ir_donehd); 1732 if (bp->b_blkno == bp->b_lblkno) 1733 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &bp->b_blkno, NULL, NULL); 1734 newindirdep->ir_savebp = 1735 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0); 1736 BUF_KERNPROC(newindirdep->ir_savebp); 1737 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1738 } 1739 } 1740 1741 /* 1742 * Block de-allocation dependencies. 1743 * 1744 * When blocks are de-allocated, the on-disk pointers must be nullified before 1745 * the blocks are made available for use by other files. (The true 1746 * requirement is that old pointers must be nullified before new on-disk 1747 * pointers are set. We chose this slightly more stringent requirement to 1748 * reduce complexity.) Our implementation handles this dependency by updating 1749 * the inode (or indirect block) appropriately but delaying the actual block 1750 * de-allocation (i.e., freemap and free space count manipulation) until 1751 * after the updated versions reach stable storage. After the disk is 1752 * updated, the blocks can be safely de-allocated whenever it is convenient. 1753 * This implementation handles only the common case of reducing a file's 1754 * length to zero. Other cases are handled by the conventional synchronous 1755 * write approach. 1756 * 1757 * The ffs implementation with which we worked double-checks 1758 * the state of the block pointers and file size as it reduces 1759 * a file's length. Some of this code is replicated here in our 1760 * soft updates implementation. The freeblks->fb_chkcnt field is 1761 * used to transfer a part of this information to the procedure 1762 * that eventually de-allocates the blocks. 1763 * 1764 * This routine should be called from the routine that shortens 1765 * a file's length, before the inode's size or block pointers 1766 * are modified. It will save the block pointer information for 1767 * later release and zero the inode so that the calling routine 1768 * can release it. 1769 */ 1770 void 1771 softdep_setup_freeblocks(ip, length) 1772 struct inode *ip; /* The inode whose length is to be reduced */ 1773 off_t length; /* The new length for the file */ 1774 { 1775 struct freeblks *freeblks; 1776 struct inodedep *inodedep; 1777 struct allocdirect *adp; 1778 struct vnode *vp; 1779 struct buf *bp; 1780 struct fs *fs; 1781 int i, delay, error; 1782 1783 fs = ip->i_fs; 1784 if (length != 0) 1785 panic("softdep_setup_freeblocks: non-zero length"); 1786 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1787 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 1788 freeblks->fb_list.wk_type = D_FREEBLKS; 1789 freeblks->fb_uid = ip->i_uid; 1790 freeblks->fb_previousinum = ip->i_number; 1791 freeblks->fb_devvp = ip->i_devvp; 1792 freeblks->fb_mnt = ITOV(ip)->v_mount; 1793 freeblks->fb_oldsize = ip->i_size; 1794 freeblks->fb_newsize = length; 1795 freeblks->fb_chkcnt = ip->i_blocks; 1796 for (i = 0; i < NDADDR; i++) { 1797 freeblks->fb_dblks[i] = ip->i_db[i]; 1798 ip->i_db[i] = 0; 1799 } 1800 for (i = 0; i < NIADDR; i++) { 1801 freeblks->fb_iblks[i] = ip->i_ib[i]; 1802 ip->i_ib[i] = 0; 1803 } 1804 ip->i_blocks = 0; 1805 ip->i_size = 0; 1806 /* 1807 * If the file was removed, then the space being freed was 1808 * accounted for then (see softdep_filereleased()). If the 1809 * file is merely being truncated, then we account for it now. 1810 */ 1811 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 1812 fs->fs_pendingblocks += freeblks->fb_chkcnt; 1813 /* 1814 * Push the zero'ed inode to to its disk buffer so that we are free 1815 * to delete its dependencies below. Once the dependencies are gone 1816 * the buffer can be safely released. 1817 */ 1818 if ((error = bread(ip->i_devvp, 1819 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 1820 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 1821 brelse(bp); 1822 softdep_error("softdep_setup_freeblocks", error); 1823 } 1824 *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number)) = 1825 ip->i_din; 1826 /* 1827 * Find and eliminate any inode dependencies. 1828 */ 1829 ACQUIRE_LOCK(&lk); 1830 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 1831 if ((inodedep->id_state & IOSTARTED) != 0) { 1832 FREE_LOCK(&lk); 1833 panic("softdep_setup_freeblocks: inode busy"); 1834 } 1835 /* 1836 * Add the freeblks structure to the list of operations that 1837 * must await the zero'ed inode being written to disk. If we 1838 * still have a bitmap dependency (delay == 0), then the inode 1839 * has never been written to disk, so we can process the 1840 * freeblks below once we have deleted the dependencies. 1841 */ 1842 delay = (inodedep->id_state & DEPCOMPLETE); 1843 if (delay) 1844 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 1845 /* 1846 * Because the file length has been truncated to zero, any 1847 * pending block allocation dependency structures associated 1848 * with this inode are obsolete and can simply be de-allocated. 1849 * We must first merge the two dependency lists to get rid of 1850 * any duplicate freefrag structures, then purge the merged list. 1851 * If we still have a bitmap dependency, then the inode has never 1852 * been written to disk, so we can free any fragments without delay. 1853 */ 1854 merge_inode_lists(inodedep); 1855 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 1856 free_allocdirect(&inodedep->id_inoupdt, adp, delay); 1857 FREE_LOCK(&lk); 1858 bdwrite(bp); 1859 /* 1860 * We must wait for any I/O in progress to finish so that 1861 * all potential buffers on the dirty list will be visible. 1862 * Once they are all there, walk the list and get rid of 1863 * any dependencies. 1864 */ 1865 vp = ITOV(ip); 1866 ACQUIRE_LOCK(&lk); 1867 drain_output(vp, 1); 1868 while (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT)) { 1869 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 1870 (void) inodedep_lookup(fs, ip->i_number, 0, &inodedep); 1871 deallocate_dependencies(bp, inodedep); 1872 bp->b_flags |= B_INVAL | B_NOCACHE; 1873 FREE_LOCK(&lk); 1874 brelse(bp); 1875 ACQUIRE_LOCK(&lk); 1876 } 1877 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 1878 (void) free_inodedep(inodedep); 1879 FREE_LOCK(&lk); 1880 /* 1881 * If the inode has never been written to disk (delay == 0), 1882 * then we can process the freeblks now that we have deleted 1883 * the dependencies. 1884 */ 1885 if (!delay) 1886 handle_workitem_freeblocks(freeblks, 0); 1887 } 1888 1889 /* 1890 * Reclaim any dependency structures from a buffer that is about to 1891 * be reallocated to a new vnode. The buffer must be locked, thus, 1892 * no I/O completion operations can occur while we are manipulating 1893 * its associated dependencies. The mutex is held so that other I/O's 1894 * associated with related dependencies do not occur. 1895 */ 1896 static void 1897 deallocate_dependencies(bp, inodedep) 1898 struct buf *bp; 1899 struct inodedep *inodedep; 1900 { 1901 struct worklist *wk; 1902 struct indirdep *indirdep; 1903 struct allocindir *aip; 1904 struct pagedep *pagedep; 1905 struct dirrem *dirrem; 1906 struct diradd *dap; 1907 int i; 1908 1909 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 1910 switch (wk->wk_type) { 1911 1912 case D_INDIRDEP: 1913 indirdep = WK_INDIRDEP(wk); 1914 /* 1915 * None of the indirect pointers will ever be visible, 1916 * so they can simply be tossed. GOINGAWAY ensures 1917 * that allocated pointers will be saved in the buffer 1918 * cache until they are freed. Note that they will 1919 * only be able to be found by their physical address 1920 * since the inode mapping the logical address will 1921 * be gone. The save buffer used for the safe copy 1922 * was allocated in setup_allocindir_phase2 using 1923 * the physical address so it could be used for this 1924 * purpose. Hence we swap the safe copy with the real 1925 * copy, allowing the safe copy to be freed and holding 1926 * on to the real copy for later use in indir_trunc. 1927 */ 1928 if (indirdep->ir_state & GOINGAWAY) { 1929 FREE_LOCK(&lk); 1930 panic("deallocate_dependencies: already gone"); 1931 } 1932 indirdep->ir_state |= GOINGAWAY; 1933 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 1934 free_allocindir(aip, inodedep); 1935 if (bp->b_lblkno >= 0 || 1936 bp->b_blkno != indirdep->ir_savebp->b_lblkno) { 1937 FREE_LOCK(&lk); 1938 panic("deallocate_dependencies: not indir"); 1939 } 1940 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 1941 bp->b_bcount); 1942 WORKLIST_REMOVE(wk); 1943 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk); 1944 continue; 1945 1946 case D_PAGEDEP: 1947 pagedep = WK_PAGEDEP(wk); 1948 /* 1949 * None of the directory additions will ever be 1950 * visible, so they can simply be tossed. 1951 */ 1952 for (i = 0; i < DAHASHSZ; i++) 1953 while ((dap = 1954 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 1955 free_diradd(dap); 1956 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 1957 free_diradd(dap); 1958 /* 1959 * Copy any directory remove dependencies to the list 1960 * to be processed after the zero'ed inode is written. 1961 * If the inode has already been written, then they 1962 * can be dumped directly onto the work list. 1963 */ 1964 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 1965 LIST_REMOVE(dirrem, dm_next); 1966 dirrem->dm_dirinum = pagedep->pd_ino; 1967 if (inodedep == NULL || 1968 (inodedep->id_state & ALLCOMPLETE) == 1969 ALLCOMPLETE) 1970 add_to_worklist(&dirrem->dm_list); 1971 else 1972 WORKLIST_INSERT(&inodedep->id_bufwait, 1973 &dirrem->dm_list); 1974 } 1975 if ((pagedep->pd_state & NEWBLOCK) != 0) { 1976 LIST_FOREACH(wk, &inodedep->id_bufwait, wk_list) 1977 if (wk->wk_type == D_NEWDIRBLK && 1978 WK_NEWDIRBLK(wk)->db_pagedep == 1979 pagedep) 1980 break; 1981 if (wk != NULL) { 1982 WORKLIST_REMOVE(wk); 1983 free_newdirblk(WK_NEWDIRBLK(wk)); 1984 } else { 1985 FREE_LOCK(&lk); 1986 panic("deallocate_dependencies: " 1987 "lost pagedep"); 1988 } 1989 } 1990 WORKLIST_REMOVE(&pagedep->pd_list); 1991 LIST_REMOVE(pagedep, pd_hash); 1992 WORKITEM_FREE(pagedep, D_PAGEDEP); 1993 continue; 1994 1995 case D_ALLOCINDIR: 1996 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 1997 continue; 1998 1999 case D_ALLOCDIRECT: 2000 case D_INODEDEP: 2001 FREE_LOCK(&lk); 2002 panic("deallocate_dependencies: Unexpected type %s", 2003 TYPENAME(wk->wk_type)); 2004 /* NOTREACHED */ 2005 2006 default: 2007 FREE_LOCK(&lk); 2008 panic("deallocate_dependencies: Unknown type %s", 2009 TYPENAME(wk->wk_type)); 2010 /* NOTREACHED */ 2011 } 2012 } 2013 } 2014 2015 /* 2016 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2017 * This routine must be called with splbio interrupts blocked. 2018 */ 2019 static void 2020 free_allocdirect(adphead, adp, delay) 2021 struct allocdirectlst *adphead; 2022 struct allocdirect *adp; 2023 int delay; 2024 { 2025 struct newdirblk *newdirblk; 2026 struct worklist *wk; 2027 2028 #ifdef DEBUG 2029 if (lk.lkt_held == NOHOLDER) 2030 panic("free_allocdirect: lock not held"); 2031 #endif 2032 if ((adp->ad_state & DEPCOMPLETE) == 0) 2033 LIST_REMOVE(adp, ad_deps); 2034 TAILQ_REMOVE(adphead, adp, ad_next); 2035 if ((adp->ad_state & COMPLETE) == 0) 2036 WORKLIST_REMOVE(&adp->ad_list); 2037 if (adp->ad_freefrag != NULL) { 2038 if (delay) 2039 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2040 &adp->ad_freefrag->ff_list); 2041 else 2042 add_to_worklist(&adp->ad_freefrag->ff_list); 2043 } 2044 if ((wk = LIST_FIRST(&adp->ad_newdirblk)) != NULL) { 2045 newdirblk = WK_NEWDIRBLK(wk); 2046 WORKLIST_REMOVE(&newdirblk->db_list); 2047 if (LIST_FIRST(&adp->ad_newdirblk) != NULL) 2048 panic("free_allocdirect: extra newdirblk"); 2049 if (delay) 2050 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2051 &newdirblk->db_list); 2052 else 2053 free_newdirblk(newdirblk); 2054 } 2055 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2056 } 2057 2058 /* 2059 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 2060 * This routine must be called with splbio interrupts blocked. 2061 */ 2062 static void 2063 free_newdirblk(newdirblk) 2064 struct newdirblk *newdirblk; 2065 { 2066 struct pagedep *pagedep; 2067 struct diradd *dap; 2068 int i; 2069 2070 #ifdef DEBUG 2071 if (lk.lkt_held == NOHOLDER) 2072 panic("free_newdirblk: lock not held"); 2073 #endif 2074 /* 2075 * If the pagedep is still linked onto the directory buffer 2076 * dependency chain, then some of the entries on the 2077 * pd_pendinghd list may not be committed to disk yet. In 2078 * this case, we will simply clear the NEWBLOCK flag and 2079 * let the pd_pendinghd list be processed when the pagedep 2080 * is next written. If the pagedep is no longer on the buffer 2081 * dependency chain, then all the entries on the pd_pending 2082 * list are committed to disk and we can free them here. 2083 */ 2084 pagedep = newdirblk->db_pagedep; 2085 pagedep->pd_state &= ~NEWBLOCK; 2086 if ((pagedep->pd_state & ONWORKLIST) == 0) 2087 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 2088 free_diradd(dap); 2089 /* 2090 * If no dependencies remain, the pagedep will be freed. 2091 */ 2092 for (i = 0; i < DAHASHSZ; i++) 2093 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 2094 break; 2095 if (i == DAHASHSZ && (pagedep->pd_state & ONWORKLIST) == 0) { 2096 LIST_REMOVE(pagedep, pd_hash); 2097 WORKITEM_FREE(pagedep, D_PAGEDEP); 2098 } 2099 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2100 } 2101 2102 /* 2103 * Prepare an inode to be freed. The actual free operation is not 2104 * done until the zero'ed inode has been written to disk. 2105 */ 2106 void 2107 softdep_freefile(pvp, ino, mode) 2108 struct vnode *pvp; 2109 ino_t ino; 2110 int mode; 2111 { 2112 struct inode *ip = VTOI(pvp); 2113 struct inodedep *inodedep; 2114 struct freefile *freefile; 2115 2116 /* 2117 * This sets up the inode de-allocation dependency. 2118 */ 2119 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2120 M_FREEFILE, M_SOFTDEP_FLAGS); 2121 freefile->fx_list.wk_type = D_FREEFILE; 2122 freefile->fx_list.wk_state = 0; 2123 freefile->fx_mode = mode; 2124 freefile->fx_oldinum = ino; 2125 freefile->fx_devvp = ip->i_devvp; 2126 freefile->fx_mnt = ITOV(ip)->v_mount; 2127 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 2128 ip->i_fs->fs_pendinginodes += 1; 2129 2130 /* 2131 * If the inodedep does not exist, then the zero'ed inode has 2132 * been written to disk. If the allocated inode has never been 2133 * written to disk, then the on-disk inode is zero'ed. In either 2134 * case we can free the file immediately. 2135 */ 2136 ACQUIRE_LOCK(&lk); 2137 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2138 check_inode_unwritten(inodedep)) { 2139 FREE_LOCK(&lk); 2140 handle_workitem_freefile(freefile); 2141 return; 2142 } 2143 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2144 FREE_LOCK(&lk); 2145 } 2146 2147 /* 2148 * Check to see if an inode has never been written to disk. If 2149 * so free the inodedep and return success, otherwise return failure. 2150 * This routine must be called with splbio interrupts blocked. 2151 * 2152 * If we still have a bitmap dependency, then the inode has never 2153 * been written to disk. Drop the dependency as it is no longer 2154 * necessary since the inode is being deallocated. We set the 2155 * ALLCOMPLETE flags since the bitmap now properly shows that the 2156 * inode is not allocated. Even if the inode is actively being 2157 * written, it has been rolled back to its zero'ed state, so we 2158 * are ensured that a zero inode is what is on the disk. For short 2159 * lived files, this change will usually result in removing all the 2160 * dependencies from the inode so that it can be freed immediately. 2161 */ 2162 static int 2163 check_inode_unwritten(inodedep) 2164 struct inodedep *inodedep; 2165 { 2166 2167 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2168 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2169 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2170 LIST_FIRST(&inodedep->id_inowait) != NULL || 2171 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2172 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2173 inodedep->id_nlinkdelta != 0) 2174 return (0); 2175 inodedep->id_state |= ALLCOMPLETE; 2176 LIST_REMOVE(inodedep, id_deps); 2177 inodedep->id_buf = NULL; 2178 if (inodedep->id_state & ONWORKLIST) 2179 WORKLIST_REMOVE(&inodedep->id_list); 2180 if (inodedep->id_savedino != NULL) { 2181 FREE(inodedep->id_savedino, M_INODEDEP); 2182 inodedep->id_savedino = NULL; 2183 } 2184 if (free_inodedep(inodedep) == 0) { 2185 FREE_LOCK(&lk); 2186 panic("check_inode_unwritten: busy inode"); 2187 } 2188 return (1); 2189 } 2190 2191 /* 2192 * Try to free an inodedep structure. Return 1 if it could be freed. 2193 */ 2194 static int 2195 free_inodedep(inodedep) 2196 struct inodedep *inodedep; 2197 { 2198 2199 if ((inodedep->id_state & ONWORKLIST) != 0 || 2200 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2201 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2202 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2203 LIST_FIRST(&inodedep->id_inowait) != NULL || 2204 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2205 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2206 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino != NULL) 2207 return (0); 2208 LIST_REMOVE(inodedep, id_hash); 2209 WORKITEM_FREE(inodedep, D_INODEDEP); 2210 num_inodedep -= 1; 2211 return (1); 2212 } 2213 2214 /* 2215 * This workitem routine performs the block de-allocation. 2216 * The workitem is added to the pending list after the updated 2217 * inode block has been written to disk. As mentioned above, 2218 * checks regarding the number of blocks de-allocated (compared 2219 * to the number of blocks allocated for the file) are also 2220 * performed in this function. 2221 */ 2222 static void 2223 handle_workitem_freeblocks(freeblks, flags) 2224 struct freeblks *freeblks; 2225 int flags; 2226 { 2227 struct inode *ip; 2228 struct vnode *vp; 2229 ufs_daddr_t bn; 2230 struct fs *fs; 2231 int i, level, bsize; 2232 long nblocks, blocksreleased = 0; 2233 int error, allerror = 0; 2234 ufs_lbn_t baselbns[NIADDR], tmpval; 2235 2236 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2237 tmpval = 1; 2238 baselbns[0] = NDADDR; 2239 for (i = 1; i < NIADDR; i++) { 2240 tmpval *= NINDIR(fs); 2241 baselbns[i] = baselbns[i - 1] + tmpval; 2242 } 2243 nblocks = btodb(fs->fs_bsize); 2244 blocksreleased = 0; 2245 /* 2246 * Indirect blocks first. 2247 */ 2248 for (level = (NIADDR - 1); level >= 0; level--) { 2249 if ((bn = freeblks->fb_iblks[level]) == 0) 2250 continue; 2251 if ((error = indir_trunc(freeblks, fsbtodb(fs, bn), level, 2252 baselbns[level], &blocksreleased)) == 0) 2253 allerror = error; 2254 ffs_blkfree(fs, freeblks->fb_devvp, bn, fs->fs_bsize, 2255 freeblks->fb_previousinum); 2256 fs->fs_pendingblocks -= nblocks; 2257 blocksreleased += nblocks; 2258 } 2259 /* 2260 * All direct blocks or frags. 2261 */ 2262 for (i = (NDADDR - 1); i >= 0; i--) { 2263 if ((bn = freeblks->fb_dblks[i]) == 0) 2264 continue; 2265 bsize = sblksize(fs, freeblks->fb_oldsize, i); 2266 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2267 freeblks->fb_previousinum); 2268 fs->fs_pendingblocks -= btodb(bsize); 2269 blocksreleased += btodb(bsize); 2270 } 2271 /* 2272 * If we still have not finished background cleanup, then check 2273 * to see if the block count needs to be adjusted. 2274 */ 2275 if (freeblks->fb_chkcnt != blocksreleased && 2276 (fs->fs_flags & FS_UNCLEAN) != 0 && (flags & LK_NOWAIT) == 0 && 2277 VFS_VGET(freeblks->fb_mnt, freeblks->fb_previousinum, &vp) == 0) { 2278 ip = VTOI(vp); 2279 ip->i_blocks += freeblks->fb_chkcnt - blocksreleased; 2280 ip->i_flag |= IN_CHANGE; 2281 vput(vp); 2282 } 2283 2284 #ifdef DIAGNOSTIC 2285 if (freeblks->fb_chkcnt != blocksreleased && 2286 ((fs->fs_flags & FS_UNCLEAN) == 0 || (flags & LK_NOWAIT) != 0)) 2287 printf("handle_workitem_freeblocks: block count"); 2288 if (allerror) 2289 softdep_error("handle_workitem_freeblks", allerror); 2290 #endif /* DIAGNOSTIC */ 2291 2292 WORKITEM_FREE(freeblks, D_FREEBLKS); 2293 } 2294 2295 /* 2296 * Release blocks associated with the inode ip and stored in the indirect 2297 * block dbn. If level is greater than SINGLE, the block is an indirect block 2298 * and recursive calls to indirtrunc must be used to cleanse other indirect 2299 * blocks. 2300 */ 2301 static int 2302 indir_trunc(freeblks, dbn, level, lbn, countp) 2303 struct freeblks *freeblks; 2304 ufs_daddr_t dbn; 2305 int level; 2306 ufs_lbn_t lbn; 2307 long *countp; 2308 { 2309 struct buf *bp; 2310 ufs_daddr_t *bap; 2311 ufs_daddr_t nb; 2312 struct fs *fs; 2313 struct worklist *wk; 2314 struct indirdep *indirdep; 2315 int i, lbnadd, nblocks; 2316 int error, allerror = 0; 2317 2318 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2319 lbnadd = 1; 2320 for (i = level; i > 0; i--) 2321 lbnadd *= NINDIR(fs); 2322 /* 2323 * Get buffer of block pointers to be freed. This routine is not 2324 * called until the zero'ed inode has been written, so it is safe 2325 * to free blocks as they are encountered. Because the inode has 2326 * been zero'ed, calls to bmap on these blocks will fail. So, we 2327 * have to use the on-disk address and the block device for the 2328 * filesystem to look them up. If the file was deleted before its 2329 * indirect blocks were all written to disk, the routine that set 2330 * us up (deallocate_dependencies) will have arranged to leave 2331 * a complete copy of the indirect block in memory for our use. 2332 * Otherwise we have to read the blocks in from the disk. 2333 */ 2334 ACQUIRE_LOCK(&lk); 2335 if ((bp = incore(freeblks->fb_devvp, dbn)) != NULL && 2336 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2337 if (wk->wk_type != D_INDIRDEP || 2338 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2339 (indirdep->ir_state & GOINGAWAY) == 0) { 2340 FREE_LOCK(&lk); 2341 panic("indir_trunc: lost indirdep"); 2342 } 2343 WORKLIST_REMOVE(wk); 2344 WORKITEM_FREE(indirdep, D_INDIRDEP); 2345 if (LIST_FIRST(&bp->b_dep) != NULL) { 2346 FREE_LOCK(&lk); 2347 panic("indir_trunc: dangling dep"); 2348 } 2349 FREE_LOCK(&lk); 2350 } else { 2351 FREE_LOCK(&lk); 2352 error = bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 2353 NOCRED, &bp); 2354 if (error) { 2355 brelse(bp); 2356 return (error); 2357 } 2358 } 2359 /* 2360 * Recursively free indirect blocks. 2361 */ 2362 bap = (ufs_daddr_t *)bp->b_data; 2363 nblocks = btodb(fs->fs_bsize); 2364 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2365 if ((nb = bap[i]) == 0) 2366 continue; 2367 if (level != 0) { 2368 if ((error = indir_trunc(freeblks, fsbtodb(fs, nb), 2369 level - 1, lbn + (i * lbnadd), countp)) != 0) 2370 allerror = error; 2371 } 2372 ffs_blkfree(fs, freeblks->fb_devvp, nb, fs->fs_bsize, 2373 freeblks->fb_previousinum); 2374 fs->fs_pendingblocks -= nblocks; 2375 *countp += nblocks; 2376 } 2377 bp->b_flags |= B_INVAL | B_NOCACHE; 2378 brelse(bp); 2379 return (allerror); 2380 } 2381 2382 /* 2383 * Free an allocindir. 2384 * This routine must be called with splbio interrupts blocked. 2385 */ 2386 static void 2387 free_allocindir(aip, inodedep) 2388 struct allocindir *aip; 2389 struct inodedep *inodedep; 2390 { 2391 struct freefrag *freefrag; 2392 2393 #ifdef DEBUG 2394 if (lk.lkt_held == NOHOLDER) 2395 panic("free_allocindir: lock not held"); 2396 #endif 2397 if ((aip->ai_state & DEPCOMPLETE) == 0) 2398 LIST_REMOVE(aip, ai_deps); 2399 if (aip->ai_state & ONWORKLIST) 2400 WORKLIST_REMOVE(&aip->ai_list); 2401 LIST_REMOVE(aip, ai_next); 2402 if ((freefrag = aip->ai_freefrag) != NULL) { 2403 if (inodedep == NULL) 2404 add_to_worklist(&freefrag->ff_list); 2405 else 2406 WORKLIST_INSERT(&inodedep->id_bufwait, 2407 &freefrag->ff_list); 2408 } 2409 WORKITEM_FREE(aip, D_ALLOCINDIR); 2410 } 2411 2412 /* 2413 * Directory entry addition dependencies. 2414 * 2415 * When adding a new directory entry, the inode (with its incremented link 2416 * count) must be written to disk before the directory entry's pointer to it. 2417 * Also, if the inode is newly allocated, the corresponding freemap must be 2418 * updated (on disk) before the directory entry's pointer. These requirements 2419 * are met via undo/redo on the directory entry's pointer, which consists 2420 * simply of the inode number. 2421 * 2422 * As directory entries are added and deleted, the free space within a 2423 * directory block can become fragmented. The ufs file system will compact 2424 * a fragmented directory block to make space for a new entry. When this 2425 * occurs, the offsets of previously added entries change. Any "diradd" 2426 * dependency structures corresponding to these entries must be updated with 2427 * the new offsets. 2428 */ 2429 2430 /* 2431 * This routine is called after the in-memory inode's link 2432 * count has been incremented, but before the directory entry's 2433 * pointer to the inode has been set. 2434 */ 2435 int 2436 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 2437 struct buf *bp; /* buffer containing directory block */ 2438 struct inode *dp; /* inode for directory */ 2439 off_t diroffset; /* offset of new entry in directory */ 2440 long newinum; /* inode referenced by new directory entry */ 2441 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 2442 int isnewblk; /* entry is in a newly allocated block */ 2443 { 2444 int offset; /* offset of new entry within directory block */ 2445 ufs_lbn_t lbn; /* block in directory containing new entry */ 2446 struct fs *fs; 2447 struct diradd *dap; 2448 struct allocdirect *adp; 2449 struct pagedep *pagedep; 2450 struct inodedep *inodedep; 2451 struct newdirblk *newdirblk = 0; 2452 struct mkdir *mkdir1, *mkdir2; 2453 2454 /* 2455 * Whiteouts have no dependencies. 2456 */ 2457 if (newinum == WINO) { 2458 if (newdirbp != NULL) 2459 bdwrite(newdirbp); 2460 return (0); 2461 } 2462 2463 fs = dp->i_fs; 2464 lbn = lblkno(fs, diroffset); 2465 offset = blkoff(fs, diroffset); 2466 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2467 M_SOFTDEP_FLAGS|M_ZERO); 2468 dap->da_list.wk_type = D_DIRADD; 2469 dap->da_offset = offset; 2470 dap->da_newinum = newinum; 2471 dap->da_state = ATTACHED; 2472 if (isnewblk && lbn < NDADDR && fragoff(fs, diroffset) == 0) { 2473 MALLOC(newdirblk, struct newdirblk *, sizeof(struct newdirblk), 2474 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 2475 newdirblk->db_list.wk_type = D_NEWDIRBLK; 2476 newdirblk->db_state = 0; 2477 } 2478 if (newdirbp == NULL) { 2479 dap->da_state |= DEPCOMPLETE; 2480 ACQUIRE_LOCK(&lk); 2481 } else { 2482 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2483 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2484 M_SOFTDEP_FLAGS); 2485 mkdir1->md_list.wk_type = D_MKDIR; 2486 mkdir1->md_state = MKDIR_BODY; 2487 mkdir1->md_diradd = dap; 2488 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2489 M_SOFTDEP_FLAGS); 2490 mkdir2->md_list.wk_type = D_MKDIR; 2491 mkdir2->md_state = MKDIR_PARENT; 2492 mkdir2->md_diradd = dap; 2493 /* 2494 * Dependency on "." and ".." being written to disk. 2495 */ 2496 mkdir1->md_buf = newdirbp; 2497 ACQUIRE_LOCK(&lk); 2498 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2499 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list); 2500 FREE_LOCK(&lk); 2501 bdwrite(newdirbp); 2502 /* 2503 * Dependency on link count increase for parent directory 2504 */ 2505 ACQUIRE_LOCK(&lk); 2506 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0 2507 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2508 dap->da_state &= ~MKDIR_PARENT; 2509 WORKITEM_FREE(mkdir2, D_MKDIR); 2510 } else { 2511 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2512 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2513 } 2514 } 2515 /* 2516 * Link into parent directory pagedep to await its being written. 2517 */ 2518 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2519 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2520 dap->da_pagedep = pagedep; 2521 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2522 da_pdlist); 2523 /* 2524 * Link into its inodedep. Put it on the id_bufwait list if the inode 2525 * is not yet written. If it is written, do the post-inode write 2526 * processing to put it on the id_pendinghd list. 2527 */ 2528 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2529 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2530 diradd_inode_written(dap, inodedep); 2531 else 2532 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2533 if (isnewblk) { 2534 /* 2535 * Directories growing into indirect blocks are rare 2536 * enough and the frequency of new block allocation 2537 * in those cases even more rare, that we choose not 2538 * to bother tracking them. Rather we simply force the 2539 * new directory entry to disk. 2540 */ 2541 if (lbn >= NDADDR) { 2542 FREE_LOCK(&lk); 2543 /* 2544 * We only have a new allocation when at the 2545 * beginning of a new block, not when we are 2546 * expanding into an existing block. 2547 */ 2548 if (blkoff(fs, diroffset) == 0) 2549 return (1); 2550 return (0); 2551 } 2552 /* 2553 * We only have a new allocation when at the beginning 2554 * of a new fragment, not when we are expanding into an 2555 * existing fragment. Also, there is nothing to do if we 2556 * are already tracking this block. 2557 */ 2558 if (fragoff(fs, diroffset) != 0) { 2559 FREE_LOCK(&lk); 2560 return (0); 2561 } 2562 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2563 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2564 FREE_LOCK(&lk); 2565 return (0); 2566 } 2567 /* 2568 * Find our associated allocdirect and have it track us. 2569 */ 2570 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0) 2571 panic("softdep_setup_directory_add: lost inodedep"); 2572 adp = TAILQ_LAST(&inodedep->id_newinoupdt, allocdirectlst); 2573 if (adp == NULL || adp->ad_lbn != lbn) { 2574 FREE_LOCK(&lk); 2575 panic("softdep_setup_directory_add: lost entry"); 2576 } 2577 pagedep->pd_state |= NEWBLOCK; 2578 newdirblk->db_pagedep = pagedep; 2579 WORKLIST_INSERT(&adp->ad_newdirblk, &newdirblk->db_list); 2580 } 2581 FREE_LOCK(&lk); 2582 return (0); 2583 } 2584 2585 /* 2586 * This procedure is called to change the offset of a directory 2587 * entry when compacting a directory block which must be owned 2588 * exclusively by the caller. Note that the actual entry movement 2589 * must be done in this procedure to ensure that no I/O completions 2590 * occur while the move is in progress. 2591 */ 2592 void 2593 softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize) 2594 struct inode *dp; /* inode for directory */ 2595 caddr_t base; /* address of dp->i_offset */ 2596 caddr_t oldloc; /* address of old directory location */ 2597 caddr_t newloc; /* address of new directory location */ 2598 int entrysize; /* size of directory entry */ 2599 { 2600 int offset, oldoffset, newoffset; 2601 struct pagedep *pagedep; 2602 struct diradd *dap; 2603 ufs_lbn_t lbn; 2604 2605 ACQUIRE_LOCK(&lk); 2606 lbn = lblkno(dp->i_fs, dp->i_offset); 2607 offset = blkoff(dp->i_fs, dp->i_offset); 2608 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2609 goto done; 2610 oldoffset = offset + (oldloc - base); 2611 newoffset = offset + (newloc - base); 2612 2613 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2614 if (dap->da_offset != oldoffset) 2615 continue; 2616 dap->da_offset = newoffset; 2617 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2618 break; 2619 LIST_REMOVE(dap, da_pdlist); 2620 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2621 dap, da_pdlist); 2622 break; 2623 } 2624 if (dap == NULL) { 2625 2626 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2627 if (dap->da_offset == oldoffset) { 2628 dap->da_offset = newoffset; 2629 break; 2630 } 2631 } 2632 } 2633 done: 2634 bcopy(oldloc, newloc, entrysize); 2635 FREE_LOCK(&lk); 2636 } 2637 2638 /* 2639 * Free a diradd dependency structure. This routine must be called 2640 * with splbio interrupts blocked. 2641 */ 2642 static void 2643 free_diradd(dap) 2644 struct diradd *dap; 2645 { 2646 struct dirrem *dirrem; 2647 struct pagedep *pagedep; 2648 struct inodedep *inodedep; 2649 struct mkdir *mkdir, *nextmd; 2650 2651 #ifdef DEBUG 2652 if (lk.lkt_held == NOHOLDER) 2653 panic("free_diradd: lock not held"); 2654 #endif 2655 WORKLIST_REMOVE(&dap->da_list); 2656 LIST_REMOVE(dap, da_pdlist); 2657 if ((dap->da_state & DIRCHG) == 0) { 2658 pagedep = dap->da_pagedep; 2659 } else { 2660 dirrem = dap->da_previous; 2661 pagedep = dirrem->dm_pagedep; 2662 dirrem->dm_dirinum = pagedep->pd_ino; 2663 add_to_worklist(&dirrem->dm_list); 2664 } 2665 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2666 0, &inodedep) != 0) 2667 (void) free_inodedep(inodedep); 2668 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2669 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2670 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2671 if (mkdir->md_diradd != dap) 2672 continue; 2673 dap->da_state &= ~mkdir->md_state; 2674 WORKLIST_REMOVE(&mkdir->md_list); 2675 LIST_REMOVE(mkdir, md_mkdirs); 2676 WORKITEM_FREE(mkdir, D_MKDIR); 2677 } 2678 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2679 FREE_LOCK(&lk); 2680 panic("free_diradd: unfound ref"); 2681 } 2682 } 2683 WORKITEM_FREE(dap, D_DIRADD); 2684 } 2685 2686 /* 2687 * Directory entry removal dependencies. 2688 * 2689 * When removing a directory entry, the entry's inode pointer must be 2690 * zero'ed on disk before the corresponding inode's link count is decremented 2691 * (possibly freeing the inode for re-use). This dependency is handled by 2692 * updating the directory entry but delaying the inode count reduction until 2693 * after the directory block has been written to disk. After this point, the 2694 * inode count can be decremented whenever it is convenient. 2695 */ 2696 2697 /* 2698 * This routine should be called immediately after removing 2699 * a directory entry. The inode's link count should not be 2700 * decremented by the calling procedure -- the soft updates 2701 * code will do this task when it is safe. 2702 */ 2703 void 2704 softdep_setup_remove(bp, dp, ip, isrmdir) 2705 struct buf *bp; /* buffer containing directory block */ 2706 struct inode *dp; /* inode for the directory being modified */ 2707 struct inode *ip; /* inode for directory entry being removed */ 2708 int isrmdir; /* indicates if doing RMDIR */ 2709 { 2710 struct dirrem *dirrem, *prevdirrem; 2711 2712 /* 2713 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2714 */ 2715 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2716 2717 /* 2718 * If the COMPLETE flag is clear, then there were no active 2719 * entries and we want to roll back to a zeroed entry until 2720 * the new inode is committed to disk. If the COMPLETE flag is 2721 * set then we have deleted an entry that never made it to 2722 * disk. If the entry we deleted resulted from a name change, 2723 * then the old name still resides on disk. We cannot delete 2724 * its inode (returned to us in prevdirrem) until the zeroed 2725 * directory entry gets to disk. The new inode has never been 2726 * referenced on the disk, so can be deleted immediately. 2727 */ 2728 if ((dirrem->dm_state & COMPLETE) == 0) { 2729 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2730 dm_next); 2731 FREE_LOCK(&lk); 2732 } else { 2733 if (prevdirrem != NULL) 2734 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2735 prevdirrem, dm_next); 2736 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2737 FREE_LOCK(&lk); 2738 handle_workitem_remove(dirrem); 2739 } 2740 } 2741 2742 /* 2743 * Allocate a new dirrem if appropriate and return it along with 2744 * its associated pagedep. Called without a lock, returns with lock. 2745 */ 2746 static long num_dirrem; /* number of dirrem allocated */ 2747 static struct dirrem * 2748 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 2749 struct buf *bp; /* buffer containing directory block */ 2750 struct inode *dp; /* inode for the directory being modified */ 2751 struct inode *ip; /* inode for directory entry being removed */ 2752 int isrmdir; /* indicates if doing RMDIR */ 2753 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 2754 { 2755 int offset; 2756 ufs_lbn_t lbn; 2757 struct diradd *dap; 2758 struct dirrem *dirrem; 2759 struct pagedep *pagedep; 2760 2761 /* 2762 * Whiteouts have no deletion dependencies. 2763 */ 2764 if (ip == NULL) 2765 panic("newdirrem: whiteout"); 2766 /* 2767 * If we are over our limit, try to improve the situation. 2768 * Limiting the number of dirrem structures will also limit 2769 * the number of freefile and freeblks structures. 2770 */ 2771 if (num_dirrem > max_softdeps / 2) 2772 (void) request_cleanup(FLUSH_REMOVE, 0); 2773 num_dirrem += 1; 2774 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 2775 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 2776 dirrem->dm_list.wk_type = D_DIRREM; 2777 dirrem->dm_state = isrmdir ? RMDIR : 0; 2778 dirrem->dm_mnt = ITOV(ip)->v_mount; 2779 dirrem->dm_oldinum = ip->i_number; 2780 *prevdirremp = NULL; 2781 2782 ACQUIRE_LOCK(&lk); 2783 lbn = lblkno(dp->i_fs, dp->i_offset); 2784 offset = blkoff(dp->i_fs, dp->i_offset); 2785 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2786 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2787 dirrem->dm_pagedep = pagedep; 2788 /* 2789 * Check for a diradd dependency for the same directory entry. 2790 * If present, then both dependencies become obsolete and can 2791 * be de-allocated. Check for an entry on both the pd_dirraddhd 2792 * list and the pd_pendinghd list. 2793 */ 2794 2795 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 2796 if (dap->da_offset == offset) 2797 break; 2798 if (dap == NULL) { 2799 2800 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 2801 if (dap->da_offset == offset) 2802 break; 2803 if (dap == NULL) 2804 return (dirrem); 2805 } 2806 /* 2807 * Must be ATTACHED at this point. 2808 */ 2809 if ((dap->da_state & ATTACHED) == 0) { 2810 FREE_LOCK(&lk); 2811 panic("newdirrem: not ATTACHED"); 2812 } 2813 if (dap->da_newinum != ip->i_number) { 2814 FREE_LOCK(&lk); 2815 panic("newdirrem: inum %d should be %d", 2816 ip->i_number, dap->da_newinum); 2817 } 2818 /* 2819 * If we are deleting a changed name that never made it to disk, 2820 * then return the dirrem describing the previous inode (which 2821 * represents the inode currently referenced from this entry on disk). 2822 */ 2823 if ((dap->da_state & DIRCHG) != 0) { 2824 *prevdirremp = dap->da_previous; 2825 dap->da_state &= ~DIRCHG; 2826 dap->da_pagedep = pagedep; 2827 } 2828 /* 2829 * We are deleting an entry that never made it to disk. 2830 * Mark it COMPLETE so we can delete its inode immediately. 2831 */ 2832 dirrem->dm_state |= COMPLETE; 2833 free_diradd(dap); 2834 return (dirrem); 2835 } 2836 2837 /* 2838 * Directory entry change dependencies. 2839 * 2840 * Changing an existing directory entry requires that an add operation 2841 * be completed first followed by a deletion. The semantics for the addition 2842 * are identical to the description of adding a new entry above except 2843 * that the rollback is to the old inode number rather than zero. Once 2844 * the addition dependency is completed, the removal is done as described 2845 * in the removal routine above. 2846 */ 2847 2848 /* 2849 * This routine should be called immediately after changing 2850 * a directory entry. The inode's link count should not be 2851 * decremented by the calling procedure -- the soft updates 2852 * code will perform this task when it is safe. 2853 */ 2854 void 2855 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 2856 struct buf *bp; /* buffer containing directory block */ 2857 struct inode *dp; /* inode for the directory being modified */ 2858 struct inode *ip; /* inode for directory entry being removed */ 2859 long newinum; /* new inode number for changed entry */ 2860 int isrmdir; /* indicates if doing RMDIR */ 2861 { 2862 int offset; 2863 struct diradd *dap = NULL; 2864 struct dirrem *dirrem, *prevdirrem; 2865 struct pagedep *pagedep; 2866 struct inodedep *inodedep; 2867 2868 offset = blkoff(dp->i_fs, dp->i_offset); 2869 2870 /* 2871 * Whiteouts do not need diradd dependencies. 2872 */ 2873 if (newinum != WINO) { 2874 MALLOC(dap, struct diradd *, sizeof(struct diradd), 2875 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 2876 dap->da_list.wk_type = D_DIRADD; 2877 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 2878 dap->da_offset = offset; 2879 dap->da_newinum = newinum; 2880 } 2881 2882 /* 2883 * Allocate a new dirrem and ACQUIRE_LOCK. 2884 */ 2885 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2886 pagedep = dirrem->dm_pagedep; 2887 /* 2888 * The possible values for isrmdir: 2889 * 0 - non-directory file rename 2890 * 1 - directory rename within same directory 2891 * inum - directory rename to new directory of given inode number 2892 * When renaming to a new directory, we are both deleting and 2893 * creating a new directory entry, so the link count on the new 2894 * directory should not change. Thus we do not need the followup 2895 * dirrem which is usually done in handle_workitem_remove. We set 2896 * the DIRCHG flag to tell handle_workitem_remove to skip the 2897 * followup dirrem. 2898 */ 2899 if (isrmdir > 1) 2900 dirrem->dm_state |= DIRCHG; 2901 2902 /* 2903 * Whiteouts have no additional dependencies, 2904 * so just put the dirrem on the correct list. 2905 */ 2906 if (newinum == WINO) { 2907 if ((dirrem->dm_state & COMPLETE) == 0) { 2908 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 2909 dm_next); 2910 } else { 2911 dirrem->dm_dirinum = pagedep->pd_ino; 2912 add_to_worklist(&dirrem->dm_list); 2913 } 2914 FREE_LOCK(&lk); 2915 return; 2916 } 2917 2918 /* 2919 * If the COMPLETE flag is clear, then there were no active 2920 * entries and we want to roll back to the previous inode until 2921 * the new inode is committed to disk. If the COMPLETE flag is 2922 * set, then we have deleted an entry that never made it to disk. 2923 * If the entry we deleted resulted from a name change, then the old 2924 * inode reference still resides on disk. Any rollback that we do 2925 * needs to be to that old inode (returned to us in prevdirrem). If 2926 * the entry we deleted resulted from a create, then there is 2927 * no entry on the disk, so we want to roll back to zero rather 2928 * than the uncommitted inode. In either of the COMPLETE cases we 2929 * want to immediately free the unwritten and unreferenced inode. 2930 */ 2931 if ((dirrem->dm_state & COMPLETE) == 0) { 2932 dap->da_previous = dirrem; 2933 } else { 2934 if (prevdirrem != NULL) { 2935 dap->da_previous = prevdirrem; 2936 } else { 2937 dap->da_state &= ~DIRCHG; 2938 dap->da_pagedep = pagedep; 2939 } 2940 dirrem->dm_dirinum = pagedep->pd_ino; 2941 add_to_worklist(&dirrem->dm_list); 2942 } 2943 /* 2944 * Link into its inodedep. Put it on the id_bufwait list if the inode 2945 * is not yet written. If it is written, do the post-inode write 2946 * processing to put it on the id_pendinghd list. 2947 */ 2948 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 2949 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2950 dap->da_state |= COMPLETE; 2951 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 2952 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 2953 } else { 2954 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 2955 dap, da_pdlist); 2956 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2957 } 2958 FREE_LOCK(&lk); 2959 } 2960 2961 /* 2962 * Called whenever the link count on an inode is changed. 2963 * It creates an inode dependency so that the new reference(s) 2964 * to the inode cannot be committed to disk until the updated 2965 * inode has been written. 2966 */ 2967 void 2968 softdep_change_linkcnt(ip) 2969 struct inode *ip; /* the inode with the increased link count */ 2970 { 2971 struct inodedep *inodedep; 2972 2973 ACQUIRE_LOCK(&lk); 2974 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 2975 if (ip->i_nlink < ip->i_effnlink) { 2976 FREE_LOCK(&lk); 2977 panic("softdep_change_linkcnt: bad delta"); 2978 } 2979 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 2980 FREE_LOCK(&lk); 2981 } 2982 2983 /* 2984 * Called when the effective link count and the reference count 2985 * on an inode drops to zero. At this point there are no names 2986 * referencing the file in the filesystem and no active file 2987 * references. The space associated with the file will be freed 2988 * as soon as the necessary soft dependencies are cleared. 2989 */ 2990 void 2991 softdep_releasefile(ip) 2992 struct inode *ip; /* inode with the zero effective link count */ 2993 { 2994 struct inodedep *inodedep; 2995 2996 if (ip->i_effnlink > 0) 2997 panic("softdep_filerelease: file still referenced"); 2998 /* 2999 * We may be called several times as the real reference count 3000 * drops to zero. We only want to account for the space once. 3001 */ 3002 if (ip->i_flag & IN_SPACECOUNTED) 3003 return; 3004 /* 3005 * We have to deactivate a snapshot otherwise copyonwrites may 3006 * add blocks and the cleanup may remove blocks after we have 3007 * tried to account for them. 3008 */ 3009 if ((ip->i_flags & SF_SNAPSHOT) != 0) 3010 ffs_snapremove(ITOV(ip)); 3011 /* 3012 * If we are tracking an nlinkdelta, we have to also remember 3013 * whether we accounted for the freed space yet. 3014 */ 3015 ACQUIRE_LOCK(&lk); 3016 if ((inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep))) 3017 inodedep->id_state |= SPACECOUNTED; 3018 FREE_LOCK(&lk); 3019 ip->i_fs->fs_pendingblocks += ip->i_blocks; 3020 ip->i_fs->fs_pendinginodes += 1; 3021 ip->i_flag |= IN_SPACECOUNTED; 3022 } 3023 3024 /* 3025 * This workitem decrements the inode's link count. 3026 * If the link count reaches zero, the file is removed. 3027 */ 3028 static void 3029 handle_workitem_remove(dirrem) 3030 struct dirrem *dirrem; 3031 { 3032 struct thread *td = curthread; 3033 struct inodedep *inodedep; 3034 struct vnode *vp; 3035 struct inode *ip; 3036 ino_t oldinum; 3037 int error; 3038 3039 if ((error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, &vp)) != 0) { 3040 softdep_error("handle_workitem_remove: vget", error); 3041 return; 3042 } 3043 ip = VTOI(vp); 3044 ACQUIRE_LOCK(&lk); 3045 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 3046 FREE_LOCK(&lk); 3047 panic("handle_workitem_remove: lost inodedep"); 3048 } 3049 /* 3050 * Normal file deletion. 3051 */ 3052 if ((dirrem->dm_state & RMDIR) == 0) { 3053 ip->i_nlink--; 3054 ip->i_flag |= IN_CHANGE; 3055 if (ip->i_nlink < ip->i_effnlink) { 3056 FREE_LOCK(&lk); 3057 panic("handle_workitem_remove: bad file delta"); 3058 } 3059 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3060 FREE_LOCK(&lk); 3061 vput(vp); 3062 num_dirrem -= 1; 3063 WORKITEM_FREE(dirrem, D_DIRREM); 3064 return; 3065 } 3066 /* 3067 * Directory deletion. Decrement reference count for both the 3068 * just deleted parent directory entry and the reference for ".". 3069 * Next truncate the directory to length zero. When the 3070 * truncation completes, arrange to have the reference count on 3071 * the parent decremented to account for the loss of "..". 3072 */ 3073 ip->i_nlink -= 2; 3074 ip->i_flag |= IN_CHANGE; 3075 if (ip->i_nlink < ip->i_effnlink) { 3076 FREE_LOCK(&lk); 3077 panic("handle_workitem_remove: bad dir delta"); 3078 } 3079 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3080 FREE_LOCK(&lk); 3081 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, td->td_proc->p_ucred, td)) != 0) 3082 softdep_error("handle_workitem_remove: truncate", error); 3083 /* 3084 * Rename a directory to a new parent. Since, we are both deleting 3085 * and creating a new directory entry, the link count on the new 3086 * directory should not change. Thus we skip the followup dirrem. 3087 */ 3088 if (dirrem->dm_state & DIRCHG) { 3089 vput(vp); 3090 num_dirrem -= 1; 3091 WORKITEM_FREE(dirrem, D_DIRREM); 3092 return; 3093 } 3094 /* 3095 * If the inodedep does not exist, then the zero'ed inode has 3096 * been written to disk. If the allocated inode has never been 3097 * written to disk, then the on-disk inode is zero'ed. In either 3098 * case we can remove the file immediately. 3099 */ 3100 ACQUIRE_LOCK(&lk); 3101 dirrem->dm_state = 0; 3102 oldinum = dirrem->dm_oldinum; 3103 dirrem->dm_oldinum = dirrem->dm_dirinum; 3104 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 3105 check_inode_unwritten(inodedep)) { 3106 FREE_LOCK(&lk); 3107 vput(vp); 3108 handle_workitem_remove(dirrem); 3109 return; 3110 } 3111 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 3112 FREE_LOCK(&lk); 3113 vput(vp); 3114 } 3115 3116 /* 3117 * Inode de-allocation dependencies. 3118 * 3119 * When an inode's link count is reduced to zero, it can be de-allocated. We 3120 * found it convenient to postpone de-allocation until after the inode is 3121 * written to disk with its new link count (zero). At this point, all of the 3122 * on-disk inode's block pointers are nullified and, with careful dependency 3123 * list ordering, all dependencies related to the inode will be satisfied and 3124 * the corresponding dependency structures de-allocated. So, if/when the 3125 * inode is reused, there will be no mixing of old dependencies with new 3126 * ones. This artificial dependency is set up by the block de-allocation 3127 * procedure above (softdep_setup_freeblocks) and completed by the 3128 * following procedure. 3129 */ 3130 static void 3131 handle_workitem_freefile(freefile) 3132 struct freefile *freefile; 3133 { 3134 struct fs *fs; 3135 struct inodedep *idp; 3136 int error; 3137 3138 fs = VFSTOUFS(freefile->fx_mnt)->um_fs; 3139 #ifdef DEBUG 3140 ACQUIRE_LOCK(&lk); 3141 error = inodedep_lookup(fs, freefile->fx_oldinum, 0, &idp); 3142 FREE_LOCK(&lk); 3143 if (error) 3144 panic("handle_workitem_freefile: inodedep survived"); 3145 #endif 3146 fs->fs_pendinginodes -= 1; 3147 if ((error = ffs_freefile(fs, freefile->fx_devvp, freefile->fx_oldinum, 3148 freefile->fx_mode)) != 0) 3149 softdep_error("handle_workitem_freefile", error); 3150 WORKITEM_FREE(freefile, D_FREEFILE); 3151 } 3152 3153 /* 3154 * Disk writes. 3155 * 3156 * The dependency structures constructed above are most actively used when file 3157 * system blocks are written to disk. No constraints are placed on when a 3158 * block can be written, but unsatisfied update dependencies are made safe by 3159 * modifying (or replacing) the source memory for the duration of the disk 3160 * write. When the disk write completes, the memory block is again brought 3161 * up-to-date. 3162 * 3163 * In-core inode structure reclamation. 3164 * 3165 * Because there are a finite number of "in-core" inode structures, they are 3166 * reused regularly. By transferring all inode-related dependencies to the 3167 * in-memory inode block and indexing them separately (via "inodedep"s), we 3168 * can allow "in-core" inode structures to be reused at any time and avoid 3169 * any increase in contention. 3170 * 3171 * Called just before entering the device driver to initiate a new disk I/O. 3172 * The buffer must be locked, thus, no I/O completion operations can occur 3173 * while we are manipulating its associated dependencies. 3174 */ 3175 static void 3176 softdep_disk_io_initiation(bp) 3177 struct buf *bp; /* structure describing disk write to occur */ 3178 { 3179 struct worklist *wk, *nextwk; 3180 struct indirdep *indirdep; 3181 3182 /* 3183 * We only care about write operations. There should never 3184 * be dependencies for reads. 3185 */ 3186 if (bp->b_iocmd == BIO_READ) 3187 panic("softdep_disk_io_initiation: read"); 3188 /* 3189 * Do any necessary pre-I/O processing. 3190 */ 3191 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = nextwk) { 3192 nextwk = LIST_NEXT(wk, wk_list); 3193 switch (wk->wk_type) { 3194 3195 case D_PAGEDEP: 3196 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3197 continue; 3198 3199 case D_INODEDEP: 3200 initiate_write_inodeblock(WK_INODEDEP(wk), bp); 3201 continue; 3202 3203 case D_INDIRDEP: 3204 indirdep = WK_INDIRDEP(wk); 3205 if (indirdep->ir_state & GOINGAWAY) 3206 panic("disk_io_initiation: indirdep gone"); 3207 /* 3208 * If there are no remaining dependencies, this 3209 * will be writing the real pointers, so the 3210 * dependency can be freed. 3211 */ 3212 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3213 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3214 brelse(indirdep->ir_savebp); 3215 /* inline expand WORKLIST_REMOVE(wk); */ 3216 wk->wk_state &= ~ONWORKLIST; 3217 LIST_REMOVE(wk, wk_list); 3218 WORKITEM_FREE(indirdep, D_INDIRDEP); 3219 continue; 3220 } 3221 /* 3222 * Replace up-to-date version with safe version. 3223 */ 3224 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3225 M_INDIRDEP, M_SOFTDEP_FLAGS); 3226 ACQUIRE_LOCK(&lk); 3227 indirdep->ir_state &= ~ATTACHED; 3228 indirdep->ir_state |= UNDONE; 3229 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3230 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3231 bp->b_bcount); 3232 FREE_LOCK(&lk); 3233 continue; 3234 3235 case D_MKDIR: 3236 case D_BMSAFEMAP: 3237 case D_ALLOCDIRECT: 3238 case D_ALLOCINDIR: 3239 continue; 3240 3241 default: 3242 panic("handle_disk_io_initiation: Unexpected type %s", 3243 TYPENAME(wk->wk_type)); 3244 /* NOTREACHED */ 3245 } 3246 } 3247 } 3248 3249 /* 3250 * Called from within the procedure above to deal with unsatisfied 3251 * allocation dependencies in a directory. The buffer must be locked, 3252 * thus, no I/O completion operations can occur while we are 3253 * manipulating its associated dependencies. 3254 */ 3255 static void 3256 initiate_write_filepage(pagedep, bp) 3257 struct pagedep *pagedep; 3258 struct buf *bp; 3259 { 3260 struct diradd *dap; 3261 struct direct *ep; 3262 int i; 3263 3264 if (pagedep->pd_state & IOSTARTED) { 3265 /* 3266 * This can only happen if there is a driver that does not 3267 * understand chaining. Here biodone will reissue the call 3268 * to strategy for the incomplete buffers. 3269 */ 3270 printf("initiate_write_filepage: already started\n"); 3271 return; 3272 } 3273 pagedep->pd_state |= IOSTARTED; 3274 ACQUIRE_LOCK(&lk); 3275 for (i = 0; i < DAHASHSZ; i++) { 3276 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3277 ep = (struct direct *) 3278 ((char *)bp->b_data + dap->da_offset); 3279 if (ep->d_ino != dap->da_newinum) { 3280 FREE_LOCK(&lk); 3281 panic("%s: dir inum %d != new %d", 3282 "initiate_write_filepage", 3283 ep->d_ino, dap->da_newinum); 3284 } 3285 if (dap->da_state & DIRCHG) 3286 ep->d_ino = dap->da_previous->dm_oldinum; 3287 else 3288 ep->d_ino = 0; 3289 dap->da_state &= ~ATTACHED; 3290 dap->da_state |= UNDONE; 3291 } 3292 } 3293 FREE_LOCK(&lk); 3294 } 3295 3296 /* 3297 * Called from within the procedure above to deal with unsatisfied 3298 * allocation dependencies in an inodeblock. The buffer must be 3299 * locked, thus, no I/O completion operations can occur while we 3300 * are manipulating its associated dependencies. 3301 */ 3302 static void 3303 initiate_write_inodeblock(inodedep, bp) 3304 struct inodedep *inodedep; 3305 struct buf *bp; /* The inode block */ 3306 { 3307 struct allocdirect *adp, *lastadp; 3308 struct dinode *dp; 3309 struct fs *fs; 3310 ufs_lbn_t prevlbn = 0; 3311 int i, deplist; 3312 3313 if (inodedep->id_state & IOSTARTED) 3314 panic("initiate_write_inodeblock: already started"); 3315 inodedep->id_state |= IOSTARTED; 3316 fs = inodedep->id_fs; 3317 dp = (struct dinode *)bp->b_data + 3318 ino_to_fsbo(fs, inodedep->id_ino); 3319 /* 3320 * If the bitmap is not yet written, then the allocated 3321 * inode cannot be written to disk. 3322 */ 3323 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3324 if (inodedep->id_savedino != NULL) 3325 panic("initiate_write_inodeblock: already doing I/O"); 3326 MALLOC(inodedep->id_savedino, struct dinode *, 3327 sizeof(struct dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3328 *inodedep->id_savedino = *dp; 3329 bzero((caddr_t)dp, sizeof(struct dinode)); 3330 return; 3331 } 3332 /* 3333 * If no dependencies, then there is nothing to roll back. 3334 */ 3335 inodedep->id_savedsize = dp->di_size; 3336 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3337 return; 3338 /* 3339 * Set the dependencies to busy. 3340 */ 3341 ACQUIRE_LOCK(&lk); 3342 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3343 adp = TAILQ_NEXT(adp, ad_next)) { 3344 #ifdef DIAGNOSTIC 3345 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3346 FREE_LOCK(&lk); 3347 panic("softdep_write_inodeblock: lbn order"); 3348 } 3349 prevlbn = adp->ad_lbn; 3350 if (adp->ad_lbn < NDADDR && 3351 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3352 FREE_LOCK(&lk); 3353 panic("%s: direct pointer #%ld mismatch %d != %d", 3354 "softdep_write_inodeblock", adp->ad_lbn, 3355 dp->di_db[adp->ad_lbn], adp->ad_newblkno); 3356 } 3357 if (adp->ad_lbn >= NDADDR && 3358 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3359 FREE_LOCK(&lk); 3360 panic("%s: indirect pointer #%ld mismatch %d != %d", 3361 "softdep_write_inodeblock", adp->ad_lbn - NDADDR, 3362 dp->di_ib[adp->ad_lbn - NDADDR], adp->ad_newblkno); 3363 } 3364 deplist |= 1 << adp->ad_lbn; 3365 if ((adp->ad_state & ATTACHED) == 0) { 3366 FREE_LOCK(&lk); 3367 panic("softdep_write_inodeblock: Unknown state 0x%x", 3368 adp->ad_state); 3369 } 3370 #endif /* DIAGNOSTIC */ 3371 adp->ad_state &= ~ATTACHED; 3372 adp->ad_state |= UNDONE; 3373 } 3374 /* 3375 * The on-disk inode cannot claim to be any larger than the last 3376 * fragment that has been written. Otherwise, the on-disk inode 3377 * might have fragments that were not the last block in the file 3378 * which would corrupt the filesystem. 3379 */ 3380 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3381 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3382 if (adp->ad_lbn >= NDADDR) 3383 break; 3384 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3385 /* keep going until hitting a rollback to a frag */ 3386 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3387 continue; 3388 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3389 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3390 #ifdef DIAGNOSTIC 3391 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3392 FREE_LOCK(&lk); 3393 panic("softdep_write_inodeblock: lost dep1"); 3394 } 3395 #endif /* DIAGNOSTIC */ 3396 dp->di_db[i] = 0; 3397 } 3398 for (i = 0; i < NIADDR; i++) { 3399 #ifdef DIAGNOSTIC 3400 if (dp->di_ib[i] != 0 && 3401 (deplist & ((1 << NDADDR) << i)) == 0) { 3402 FREE_LOCK(&lk); 3403 panic("softdep_write_inodeblock: lost dep2"); 3404 } 3405 #endif /* DIAGNOSTIC */ 3406 dp->di_ib[i] = 0; 3407 } 3408 FREE_LOCK(&lk); 3409 return; 3410 } 3411 /* 3412 * If we have zero'ed out the last allocated block of the file, 3413 * roll back the size to the last currently allocated block. 3414 * We know that this last allocated block is a full-sized as 3415 * we already checked for fragments in the loop above. 3416 */ 3417 if (lastadp != NULL && 3418 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3419 for (i = lastadp->ad_lbn; i >= 0; i--) 3420 if (dp->di_db[i] != 0) 3421 break; 3422 dp->di_size = (i + 1) * fs->fs_bsize; 3423 } 3424 /* 3425 * The only dependencies are for indirect blocks. 3426 * 3427 * The file size for indirect block additions is not guaranteed. 3428 * Such a guarantee would be non-trivial to achieve. The conventional 3429 * synchronous write implementation also does not make this guarantee. 3430 * Fsck should catch and fix discrepancies. Arguably, the file size 3431 * can be over-estimated without destroying integrity when the file 3432 * moves into the indirect blocks (i.e., is large). If we want to 3433 * postpone fsck, we are stuck with this argument. 3434 */ 3435 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3436 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3437 FREE_LOCK(&lk); 3438 } 3439 3440 /* 3441 * This routine is called during the completion interrupt 3442 * service routine for a disk write (from the procedure called 3443 * by the device driver to inform the file system caches of 3444 * a request completion). It should be called early in this 3445 * procedure, before the block is made available to other 3446 * processes or other routines are called. 3447 */ 3448 static void 3449 softdep_disk_write_complete(bp) 3450 struct buf *bp; /* describes the completed disk write */ 3451 { 3452 struct worklist *wk; 3453 struct workhead reattach; 3454 struct newblk *newblk; 3455 struct allocindir *aip; 3456 struct allocdirect *adp; 3457 struct indirdep *indirdep; 3458 struct inodedep *inodedep; 3459 struct bmsafemap *bmsafemap; 3460 3461 #ifdef DEBUG 3462 if (lk.lkt_held != NOHOLDER) 3463 panic("softdep_disk_write_complete: lock is held"); 3464 lk.lkt_held = SPECIAL_FLAG; 3465 #endif 3466 LIST_INIT(&reattach); 3467 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3468 WORKLIST_REMOVE(wk); 3469 switch (wk->wk_type) { 3470 3471 case D_PAGEDEP: 3472 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3473 WORKLIST_INSERT(&reattach, wk); 3474 continue; 3475 3476 case D_INODEDEP: 3477 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3478 WORKLIST_INSERT(&reattach, wk); 3479 continue; 3480 3481 case D_BMSAFEMAP: 3482 bmsafemap = WK_BMSAFEMAP(wk); 3483 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3484 newblk->nb_state |= DEPCOMPLETE; 3485 newblk->nb_bmsafemap = NULL; 3486 LIST_REMOVE(newblk, nb_deps); 3487 } 3488 while ((adp = 3489 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3490 adp->ad_state |= DEPCOMPLETE; 3491 adp->ad_buf = NULL; 3492 LIST_REMOVE(adp, ad_deps); 3493 handle_allocdirect_partdone(adp); 3494 } 3495 while ((aip = 3496 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3497 aip->ai_state |= DEPCOMPLETE; 3498 aip->ai_buf = NULL; 3499 LIST_REMOVE(aip, ai_deps); 3500 handle_allocindir_partdone(aip); 3501 } 3502 while ((inodedep = 3503 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3504 inodedep->id_state |= DEPCOMPLETE; 3505 LIST_REMOVE(inodedep, id_deps); 3506 inodedep->id_buf = NULL; 3507 } 3508 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3509 continue; 3510 3511 case D_MKDIR: 3512 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3513 continue; 3514 3515 case D_ALLOCDIRECT: 3516 adp = WK_ALLOCDIRECT(wk); 3517 adp->ad_state |= COMPLETE; 3518 handle_allocdirect_partdone(adp); 3519 continue; 3520 3521 case D_ALLOCINDIR: 3522 aip = WK_ALLOCINDIR(wk); 3523 aip->ai_state |= COMPLETE; 3524 handle_allocindir_partdone(aip); 3525 continue; 3526 3527 case D_INDIRDEP: 3528 indirdep = WK_INDIRDEP(wk); 3529 if (indirdep->ir_state & GOINGAWAY) { 3530 lk.lkt_held = NOHOLDER; 3531 panic("disk_write_complete: indirdep gone"); 3532 } 3533 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 3534 FREE(indirdep->ir_saveddata, M_INDIRDEP); 3535 indirdep->ir_saveddata = 0; 3536 indirdep->ir_state &= ~UNDONE; 3537 indirdep->ir_state |= ATTACHED; 3538 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 3539 handle_allocindir_partdone(aip); 3540 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 3541 lk.lkt_held = NOHOLDER; 3542 panic("disk_write_complete: not gone"); 3543 } 3544 } 3545 WORKLIST_INSERT(&reattach, wk); 3546 if ((bp->b_flags & B_DELWRI) == 0) 3547 stat_indir_blk_ptrs++; 3548 bdirty(bp); 3549 continue; 3550 3551 default: 3552 lk.lkt_held = NOHOLDER; 3553 panic("handle_disk_write_complete: Unknown type %s", 3554 TYPENAME(wk->wk_type)); 3555 /* NOTREACHED */ 3556 } 3557 } 3558 /* 3559 * Reattach any requests that must be redone. 3560 */ 3561 while ((wk = LIST_FIRST(&reattach)) != NULL) { 3562 WORKLIST_REMOVE(wk); 3563 WORKLIST_INSERT(&bp->b_dep, wk); 3564 } 3565 #ifdef DEBUG 3566 if (lk.lkt_held != SPECIAL_FLAG) 3567 panic("softdep_disk_write_complete: lock lost"); 3568 lk.lkt_held = NOHOLDER; 3569 #endif 3570 } 3571 3572 /* 3573 * Called from within softdep_disk_write_complete above. Note that 3574 * this routine is always called from interrupt level with further 3575 * splbio interrupts blocked. 3576 */ 3577 static void 3578 handle_allocdirect_partdone(adp) 3579 struct allocdirect *adp; /* the completed allocdirect */ 3580 { 3581 struct allocdirect *listadp; 3582 struct inodedep *inodedep; 3583 long bsize, delay; 3584 3585 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3586 return; 3587 if (adp->ad_buf != NULL) { 3588 lk.lkt_held = NOHOLDER; 3589 panic("handle_allocdirect_partdone: dangling dep"); 3590 } 3591 /* 3592 * The on-disk inode cannot claim to be any larger than the last 3593 * fragment that has been written. Otherwise, the on-disk inode 3594 * might have fragments that were not the last block in the file 3595 * which would corrupt the filesystem. Thus, we cannot free any 3596 * allocdirects after one whose ad_oldblkno claims a fragment as 3597 * these blocks must be rolled back to zero before writing the inode. 3598 * We check the currently active set of allocdirects in id_inoupdt. 3599 */ 3600 inodedep = adp->ad_inodedep; 3601 bsize = inodedep->id_fs->fs_bsize; 3602 TAILQ_FOREACH(listadp, &inodedep->id_inoupdt, ad_next) { 3603 /* found our block */ 3604 if (listadp == adp) 3605 break; 3606 /* continue if ad_oldlbn is not a fragment */ 3607 if (listadp->ad_oldsize == 0 || 3608 listadp->ad_oldsize == bsize) 3609 continue; 3610 /* hit a fragment */ 3611 return; 3612 } 3613 /* 3614 * If we have reached the end of the current list without 3615 * finding the just finished dependency, then it must be 3616 * on the future dependency list. Future dependencies cannot 3617 * be freed until they are moved to the current list. 3618 */ 3619 if (listadp == NULL) { 3620 #ifdef DEBUG 3621 TAILQ_FOREACH(listadp, &inodedep->id_newinoupdt, ad_next) 3622 /* found our block */ 3623 if (listadp == adp) 3624 break; 3625 if (listadp == NULL) { 3626 lk.lkt_held = NOHOLDER; 3627 panic("handle_allocdirect_partdone: lost dep"); 3628 } 3629 #endif /* DEBUG */ 3630 return; 3631 } 3632 /* 3633 * If we have found the just finished dependency, then free 3634 * it along with anything that follows it that is complete. 3635 * If the inode still has a bitmap dependency, then it has 3636 * never been written to disk, hence the on-disk inode cannot 3637 * reference the old fragment so we can free it without delay. 3638 */ 3639 delay = (inodedep->id_state & DEPCOMPLETE); 3640 for (; adp; adp = listadp) { 3641 listadp = TAILQ_NEXT(adp, ad_next); 3642 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 3643 return; 3644 free_allocdirect(&inodedep->id_inoupdt, adp, delay); 3645 } 3646 } 3647 3648 /* 3649 * Called from within softdep_disk_write_complete above. Note that 3650 * this routine is always called from interrupt level with further 3651 * splbio interrupts blocked. 3652 */ 3653 static void 3654 handle_allocindir_partdone(aip) 3655 struct allocindir *aip; /* the completed allocindir */ 3656 { 3657 struct indirdep *indirdep; 3658 3659 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 3660 return; 3661 if (aip->ai_buf != NULL) { 3662 lk.lkt_held = NOHOLDER; 3663 panic("handle_allocindir_partdone: dangling dependency"); 3664 } 3665 indirdep = aip->ai_indirdep; 3666 if (indirdep->ir_state & UNDONE) { 3667 LIST_REMOVE(aip, ai_next); 3668 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 3669 return; 3670 } 3671 ((ufs_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 3672 aip->ai_newblkno; 3673 LIST_REMOVE(aip, ai_next); 3674 if (aip->ai_freefrag != NULL) 3675 add_to_worklist(&aip->ai_freefrag->ff_list); 3676 WORKITEM_FREE(aip, D_ALLOCINDIR); 3677 } 3678 3679 /* 3680 * Called from within softdep_disk_write_complete above to restore 3681 * in-memory inode block contents to their most up-to-date state. Note 3682 * that this routine is always called from interrupt level with further 3683 * splbio interrupts blocked. 3684 */ 3685 static int 3686 handle_written_inodeblock(inodedep, bp) 3687 struct inodedep *inodedep; 3688 struct buf *bp; /* buffer containing the inode block */ 3689 { 3690 struct worklist *wk, *filefree; 3691 struct allocdirect *adp, *nextadp; 3692 struct dinode *dp; 3693 int hadchanges; 3694 3695 if ((inodedep->id_state & IOSTARTED) == 0) { 3696 lk.lkt_held = NOHOLDER; 3697 panic("handle_written_inodeblock: not started"); 3698 } 3699 inodedep->id_state &= ~IOSTARTED; 3700 inodedep->id_state |= COMPLETE; 3701 dp = (struct dinode *)bp->b_data + 3702 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 3703 /* 3704 * If we had to rollback the inode allocation because of 3705 * bitmaps being incomplete, then simply restore it. 3706 * Keep the block dirty so that it will not be reclaimed until 3707 * all associated dependencies have been cleared and the 3708 * corresponding updates written to disk. 3709 */ 3710 if (inodedep->id_savedino != NULL) { 3711 *dp = *inodedep->id_savedino; 3712 FREE(inodedep->id_savedino, M_INODEDEP); 3713 inodedep->id_savedino = NULL; 3714 if ((bp->b_flags & B_DELWRI) == 0) 3715 stat_inode_bitmap++; 3716 bdirty(bp); 3717 return (1); 3718 } 3719 /* 3720 * Roll forward anything that had to be rolled back before 3721 * the inode could be updated. 3722 */ 3723 hadchanges = 0; 3724 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 3725 nextadp = TAILQ_NEXT(adp, ad_next); 3726 if (adp->ad_state & ATTACHED) { 3727 lk.lkt_held = NOHOLDER; 3728 panic("handle_written_inodeblock: new entry"); 3729 } 3730 if (adp->ad_lbn < NDADDR) { 3731 if (dp->di_db[adp->ad_lbn] != adp->ad_oldblkno) { 3732 lk.lkt_held = NOHOLDER; 3733 panic("%s: %s #%ld mismatch %d != %d", 3734 "handle_written_inodeblock", 3735 "direct pointer", adp->ad_lbn, 3736 dp->di_db[adp->ad_lbn], adp->ad_oldblkno); 3737 } 3738 dp->di_db[adp->ad_lbn] = adp->ad_newblkno; 3739 } else { 3740 if (dp->di_ib[adp->ad_lbn - NDADDR] != 0) { 3741 lk.lkt_held = NOHOLDER; 3742 panic("%s: %s #%ld allocated as %d", 3743 "handle_written_inodeblock", 3744 "indirect pointer", adp->ad_lbn - NDADDR, 3745 dp->di_ib[adp->ad_lbn - NDADDR]); 3746 } 3747 dp->di_ib[adp->ad_lbn - NDADDR] = adp->ad_newblkno; 3748 } 3749 adp->ad_state &= ~UNDONE; 3750 adp->ad_state |= ATTACHED; 3751 hadchanges = 1; 3752 } 3753 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 3754 stat_direct_blk_ptrs++; 3755 /* 3756 * Reset the file size to its most up-to-date value. 3757 */ 3758 if (inodedep->id_savedsize == -1) { 3759 lk.lkt_held = NOHOLDER; 3760 panic("handle_written_inodeblock: bad size"); 3761 } 3762 if (dp->di_size != inodedep->id_savedsize) { 3763 dp->di_size = inodedep->id_savedsize; 3764 hadchanges = 1; 3765 } 3766 inodedep->id_savedsize = -1; 3767 /* 3768 * If there were any rollbacks in the inode block, then it must be 3769 * marked dirty so that its will eventually get written back in 3770 * its correct form. 3771 */ 3772 if (hadchanges) 3773 bdirty(bp); 3774 /* 3775 * Process any allocdirects that completed during the update. 3776 */ 3777 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 3778 handle_allocdirect_partdone(adp); 3779 /* 3780 * Process deallocations that were held pending until the 3781 * inode had been written to disk. Freeing of the inode 3782 * is delayed until after all blocks have been freed to 3783 * avoid creation of new <vfsid, inum, lbn> triples 3784 * before the old ones have been deleted. 3785 */ 3786 filefree = NULL; 3787 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 3788 WORKLIST_REMOVE(wk); 3789 switch (wk->wk_type) { 3790 3791 case D_FREEFILE: 3792 /* 3793 * We defer adding filefree to the worklist until 3794 * all other additions have been made to ensure 3795 * that it will be done after all the old blocks 3796 * have been freed. 3797 */ 3798 if (filefree != NULL) { 3799 lk.lkt_held = NOHOLDER; 3800 panic("handle_written_inodeblock: filefree"); 3801 } 3802 filefree = wk; 3803 continue; 3804 3805 case D_MKDIR: 3806 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 3807 continue; 3808 3809 case D_DIRADD: 3810 diradd_inode_written(WK_DIRADD(wk), inodedep); 3811 continue; 3812 3813 case D_FREEBLKS: 3814 case D_FREEFRAG: 3815 case D_DIRREM: 3816 add_to_worklist(wk); 3817 continue; 3818 3819 case D_NEWDIRBLK: 3820 free_newdirblk(WK_NEWDIRBLK(wk)); 3821 continue; 3822 3823 default: 3824 lk.lkt_held = NOHOLDER; 3825 panic("handle_written_inodeblock: Unknown type %s", 3826 TYPENAME(wk->wk_type)); 3827 /* NOTREACHED */ 3828 } 3829 } 3830 if (filefree != NULL) { 3831 if (free_inodedep(inodedep) == 0) { 3832 lk.lkt_held = NOHOLDER; 3833 panic("handle_written_inodeblock: live inodedep"); 3834 } 3835 add_to_worklist(filefree); 3836 return (0); 3837 } 3838 3839 /* 3840 * If no outstanding dependencies, free it. 3841 */ 3842 if (free_inodedep(inodedep) || TAILQ_FIRST(&inodedep->id_inoupdt) == 0) 3843 return (0); 3844 return (hadchanges); 3845 } 3846 3847 /* 3848 * Process a diradd entry after its dependent inode has been written. 3849 * This routine must be called with splbio interrupts blocked. 3850 */ 3851 static void 3852 diradd_inode_written(dap, inodedep) 3853 struct diradd *dap; 3854 struct inodedep *inodedep; 3855 { 3856 struct pagedep *pagedep; 3857 3858 dap->da_state |= COMPLETE; 3859 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3860 if (dap->da_state & DIRCHG) 3861 pagedep = dap->da_previous->dm_pagedep; 3862 else 3863 pagedep = dap->da_pagedep; 3864 LIST_REMOVE(dap, da_pdlist); 3865 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3866 } 3867 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3868 } 3869 3870 /* 3871 * Handle the completion of a mkdir dependency. 3872 */ 3873 static void 3874 handle_written_mkdir(mkdir, type) 3875 struct mkdir *mkdir; 3876 int type; 3877 { 3878 struct diradd *dap; 3879 struct pagedep *pagedep; 3880 3881 if (mkdir->md_state != type) { 3882 lk.lkt_held = NOHOLDER; 3883 panic("handle_written_mkdir: bad type"); 3884 } 3885 dap = mkdir->md_diradd; 3886 dap->da_state &= ~type; 3887 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 3888 dap->da_state |= DEPCOMPLETE; 3889 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3890 if (dap->da_state & DIRCHG) 3891 pagedep = dap->da_previous->dm_pagedep; 3892 else 3893 pagedep = dap->da_pagedep; 3894 LIST_REMOVE(dap, da_pdlist); 3895 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3896 } 3897 LIST_REMOVE(mkdir, md_mkdirs); 3898 WORKITEM_FREE(mkdir, D_MKDIR); 3899 } 3900 3901 /* 3902 * Called from within softdep_disk_write_complete above. 3903 * A write operation was just completed. Removed inodes can 3904 * now be freed and associated block pointers may be committed. 3905 * Note that this routine is always called from interrupt level 3906 * with further splbio interrupts blocked. 3907 */ 3908 static int 3909 handle_written_filepage(pagedep, bp) 3910 struct pagedep *pagedep; 3911 struct buf *bp; /* buffer containing the written page */ 3912 { 3913 struct dirrem *dirrem; 3914 struct diradd *dap, *nextdap; 3915 struct direct *ep; 3916 int i, chgs; 3917 3918 if ((pagedep->pd_state & IOSTARTED) == 0) { 3919 lk.lkt_held = NOHOLDER; 3920 panic("handle_written_filepage: not started"); 3921 } 3922 pagedep->pd_state &= ~IOSTARTED; 3923 /* 3924 * Process any directory removals that have been committed. 3925 */ 3926 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 3927 LIST_REMOVE(dirrem, dm_next); 3928 dirrem->dm_dirinum = pagedep->pd_ino; 3929 add_to_worklist(&dirrem->dm_list); 3930 } 3931 /* 3932 * Free any directory additions that have been committed. 3933 * If it is a newly allocated block, we have to wait until 3934 * the on-disk directory inode claims the new block. 3935 */ 3936 if ((pagedep->pd_state & NEWBLOCK) == 0) 3937 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 3938 free_diradd(dap); 3939 /* 3940 * Uncommitted directory entries must be restored. 3941 */ 3942 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 3943 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 3944 dap = nextdap) { 3945 nextdap = LIST_NEXT(dap, da_pdlist); 3946 if (dap->da_state & ATTACHED) { 3947 lk.lkt_held = NOHOLDER; 3948 panic("handle_written_filepage: attached"); 3949 } 3950 ep = (struct direct *) 3951 ((char *)bp->b_data + dap->da_offset); 3952 ep->d_ino = dap->da_newinum; 3953 dap->da_state &= ~UNDONE; 3954 dap->da_state |= ATTACHED; 3955 chgs = 1; 3956 /* 3957 * If the inode referenced by the directory has 3958 * been written out, then the dependency can be 3959 * moved to the pending list. 3960 */ 3961 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 3962 LIST_REMOVE(dap, da_pdlist); 3963 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 3964 da_pdlist); 3965 } 3966 } 3967 } 3968 /* 3969 * If there were any rollbacks in the directory, then it must be 3970 * marked dirty so that its will eventually get written back in 3971 * its correct form. 3972 */ 3973 if (chgs) { 3974 if ((bp->b_flags & B_DELWRI) == 0) 3975 stat_dir_entry++; 3976 bdirty(bp); 3977 return (1); 3978 } 3979 /* 3980 * If we are not waiting for a new directory block to be 3981 * claimed by its inode, then the pagedep will be freed. 3982 * Otherwise it will remain to track any new entries on 3983 * the page in case they are fsync'ed. 3984 */ 3985 if ((pagedep->pd_state & NEWBLOCK) == 0) { 3986 LIST_REMOVE(pagedep, pd_hash); 3987 WORKITEM_FREE(pagedep, D_PAGEDEP); 3988 } 3989 return (0); 3990 } 3991 3992 /* 3993 * Writing back in-core inode structures. 3994 * 3995 * The file system only accesses an inode's contents when it occupies an 3996 * "in-core" inode structure. These "in-core" structures are separate from 3997 * the page frames used to cache inode blocks. Only the latter are 3998 * transferred to/from the disk. So, when the updated contents of the 3999 * "in-core" inode structure are copied to the corresponding in-memory inode 4000 * block, the dependencies are also transferred. The following procedure is 4001 * called when copying a dirty "in-core" inode to a cached inode block. 4002 */ 4003 4004 /* 4005 * Called when an inode is loaded from disk. If the effective link count 4006 * differed from the actual link count when it was last flushed, then we 4007 * need to ensure that the correct effective link count is put back. 4008 */ 4009 void 4010 softdep_load_inodeblock(ip) 4011 struct inode *ip; /* the "in_core" copy of the inode */ 4012 { 4013 struct inodedep *inodedep; 4014 4015 /* 4016 * Check for alternate nlink count. 4017 */ 4018 ip->i_effnlink = ip->i_nlink; 4019 ACQUIRE_LOCK(&lk); 4020 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4021 FREE_LOCK(&lk); 4022 return; 4023 } 4024 ip->i_effnlink -= inodedep->id_nlinkdelta; 4025 if (inodedep->id_state & SPACECOUNTED) 4026 ip->i_flag |= IN_SPACECOUNTED; 4027 FREE_LOCK(&lk); 4028 } 4029 4030 /* 4031 * This routine is called just before the "in-core" inode 4032 * information is to be copied to the in-memory inode block. 4033 * Recall that an inode block contains several inodes. If 4034 * the force flag is set, then the dependencies will be 4035 * cleared so that the update can always be made. Note that 4036 * the buffer is locked when this routine is called, so we 4037 * will never be in the middle of writing the inode block 4038 * to disk. 4039 */ 4040 void 4041 softdep_update_inodeblock(ip, bp, waitfor) 4042 struct inode *ip; /* the "in_core" copy of the inode */ 4043 struct buf *bp; /* the buffer containing the inode block */ 4044 int waitfor; /* nonzero => update must be allowed */ 4045 { 4046 struct inodedep *inodedep; 4047 struct worklist *wk; 4048 int error, gotit; 4049 4050 /* 4051 * If the effective link count is not equal to the actual link 4052 * count, then we must track the difference in an inodedep while 4053 * the inode is (potentially) tossed out of the cache. Otherwise, 4054 * if there is no existing inodedep, then there are no dependencies 4055 * to track. 4056 */ 4057 ACQUIRE_LOCK(&lk); 4058 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4059 FREE_LOCK(&lk); 4060 if (ip->i_effnlink != ip->i_nlink) 4061 panic("softdep_update_inodeblock: bad link count"); 4062 return; 4063 } 4064 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 4065 FREE_LOCK(&lk); 4066 panic("softdep_update_inodeblock: bad delta"); 4067 } 4068 /* 4069 * Changes have been initiated. Anything depending on these 4070 * changes cannot occur until this inode has been written. 4071 */ 4072 inodedep->id_state &= ~COMPLETE; 4073 if ((inodedep->id_state & ONWORKLIST) == 0) 4074 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 4075 /* 4076 * Any new dependencies associated with the incore inode must 4077 * now be moved to the list associated with the buffer holding 4078 * the in-memory copy of the inode. Once merged process any 4079 * allocdirects that are completed by the merger. 4080 */ 4081 merge_inode_lists(inodedep); 4082 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 4083 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 4084 /* 4085 * Now that the inode has been pushed into the buffer, the 4086 * operations dependent on the inode being written to disk 4087 * can be moved to the id_bufwait so that they will be 4088 * processed when the buffer I/O completes. 4089 */ 4090 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 4091 WORKLIST_REMOVE(wk); 4092 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4093 } 4094 /* 4095 * Newly allocated inodes cannot be written until the bitmap 4096 * that allocates them have been written (indicated by 4097 * DEPCOMPLETE being set in id_state). If we are doing a 4098 * forced sync (e.g., an fsync on a file), we force the bitmap 4099 * to be written so that the update can be done. 4100 */ 4101 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4102 FREE_LOCK(&lk); 4103 return; 4104 } 4105 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4106 FREE_LOCK(&lk); 4107 if (gotit && 4108 (error = BUF_WRITE(inodedep->id_buf)) != 0) 4109 softdep_error("softdep_update_inodeblock: bwrite", error); 4110 if ((inodedep->id_state & DEPCOMPLETE) == 0) 4111 panic("softdep_update_inodeblock: update failed"); 4112 } 4113 4114 /* 4115 * Merge the new inode dependency list (id_newinoupdt) into the old 4116 * inode dependency list (id_inoupdt). This routine must be called 4117 * with splbio interrupts blocked. 4118 */ 4119 static void 4120 merge_inode_lists(inodedep) 4121 struct inodedep *inodedep; 4122 { 4123 struct allocdirect *listadp, *newadp; 4124 4125 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4126 for (listadp = TAILQ_FIRST(&inodedep->id_inoupdt); listadp && newadp;) { 4127 if (listadp->ad_lbn < newadp->ad_lbn) { 4128 listadp = TAILQ_NEXT(listadp, ad_next); 4129 continue; 4130 } 4131 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4132 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4133 if (listadp->ad_lbn == newadp->ad_lbn) { 4134 allocdirect_merge(&inodedep->id_inoupdt, newadp, 4135 listadp); 4136 listadp = newadp; 4137 } 4138 newadp = TAILQ_FIRST(&inodedep->id_newinoupdt); 4139 } 4140 while ((newadp = TAILQ_FIRST(&inodedep->id_newinoupdt)) != NULL) { 4141 TAILQ_REMOVE(&inodedep->id_newinoupdt, newadp, ad_next); 4142 TAILQ_INSERT_TAIL(&inodedep->id_inoupdt, newadp, ad_next); 4143 } 4144 } 4145 4146 /* 4147 * If we are doing an fsync, then we must ensure that any directory 4148 * entries for the inode have been written after the inode gets to disk. 4149 */ 4150 int 4151 softdep_fsync(vp) 4152 struct vnode *vp; /* the "in_core" copy of the inode */ 4153 { 4154 struct inodedep *inodedep; 4155 struct pagedep *pagedep; 4156 struct worklist *wk; 4157 struct diradd *dap; 4158 struct mount *mnt; 4159 struct vnode *pvp; 4160 struct inode *ip; 4161 struct buf *bp; 4162 struct fs *fs; 4163 struct thread *td = curthread; 4164 int error, flushparent; 4165 ino_t parentino; 4166 ufs_lbn_t lbn; 4167 4168 ip = VTOI(vp); 4169 fs = ip->i_fs; 4170 ACQUIRE_LOCK(&lk); 4171 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4172 FREE_LOCK(&lk); 4173 return (0); 4174 } 4175 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4176 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4177 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4178 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4179 FREE_LOCK(&lk); 4180 panic("softdep_fsync: pending ops"); 4181 } 4182 for (error = 0, flushparent = 0; ; ) { 4183 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4184 break; 4185 if (wk->wk_type != D_DIRADD) { 4186 FREE_LOCK(&lk); 4187 panic("softdep_fsync: Unexpected type %s", 4188 TYPENAME(wk->wk_type)); 4189 } 4190 dap = WK_DIRADD(wk); 4191 /* 4192 * Flush our parent if this directory entry has a MKDIR_PARENT 4193 * dependency or is contained in a newly allocated block. 4194 */ 4195 if (dap->da_state & DIRCHG) 4196 pagedep = dap->da_previous->dm_pagedep; 4197 else 4198 pagedep = dap->da_pagedep; 4199 mnt = pagedep->pd_mnt; 4200 parentino = pagedep->pd_ino; 4201 lbn = pagedep->pd_lbn; 4202 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4203 FREE_LOCK(&lk); 4204 panic("softdep_fsync: dirty"); 4205 } 4206 if ((dap->da_state & MKDIR_PARENT) || 4207 (pagedep->pd_state & NEWBLOCK)) 4208 flushparent = 1; 4209 else 4210 flushparent = 0; 4211 /* 4212 * If we are being fsync'ed as part of vgone'ing this vnode, 4213 * then we will not be able to release and recover the 4214 * vnode below, so we just have to give up on writing its 4215 * directory entry out. It will eventually be written, just 4216 * not now, but then the user was not asking to have it 4217 * written, so we are not breaking any promises. 4218 */ 4219 if (vp->v_flag & VXLOCK) 4220 break; 4221 /* 4222 * We prevent deadlock by always fetching inodes from the 4223 * root, moving down the directory tree. Thus, when fetching 4224 * our parent directory, we must unlock ourselves before 4225 * requesting the lock on our parent. See the comment in 4226 * ufs_lookup for details on possible races. 4227 */ 4228 FREE_LOCK(&lk); 4229 VOP_UNLOCK(vp, 0, td); 4230 error = VFS_VGET(mnt, parentino, &pvp); 4231 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 4232 if (error != 0) 4233 return (error); 4234 /* 4235 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 4236 * that are contained in direct blocks will be resolved by 4237 * doing a UFS_UPDATE. Pagedeps contained in indirect blocks 4238 * may require a complete sync'ing of the directory. So, we 4239 * try the cheap and fast UFS_UPDATE first, and if that fails, 4240 * then we do the slower VOP_FSYNC of the directory. 4241 */ 4242 if (flushparent) { 4243 if ((error = UFS_UPDATE(pvp, 1)) != 0) { 4244 vput(pvp); 4245 return (error); 4246 } 4247 if ((pagedep->pd_state & NEWBLOCK) && 4248 (error = VOP_FSYNC(pvp, td->td_proc->p_ucred, MNT_WAIT, td))) { 4249 vput(pvp); 4250 return (error); 4251 } 4252 } 4253 /* 4254 * Flush directory page containing the inode's name. 4255 */ 4256 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_proc->p_ucred, 4257 &bp); 4258 if (error == 0) 4259 error = BUF_WRITE(bp); 4260 else 4261 brelse(bp); 4262 vput(pvp); 4263 if (error != 0) 4264 return (error); 4265 ACQUIRE_LOCK(&lk); 4266 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4267 break; 4268 } 4269 FREE_LOCK(&lk); 4270 return (0); 4271 } 4272 4273 /* 4274 * Flush all the dirty bitmaps associated with the block device 4275 * before flushing the rest of the dirty blocks so as to reduce 4276 * the number of dependencies that will have to be rolled back. 4277 */ 4278 void 4279 softdep_fsync_mountdev(vp) 4280 struct vnode *vp; 4281 { 4282 struct buf *bp, *nbp; 4283 struct worklist *wk; 4284 4285 if (!vn_isdisk(vp, NULL)) 4286 panic("softdep_fsync_mountdev: vnode not a disk"); 4287 ACQUIRE_LOCK(&lk); 4288 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 4289 nbp = TAILQ_NEXT(bp, b_vnbufs); 4290 /* 4291 * If it is already scheduled, skip to the next buffer. 4292 */ 4293 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) 4294 continue; 4295 if ((bp->b_flags & B_DELWRI) == 0) { 4296 FREE_LOCK(&lk); 4297 panic("softdep_fsync_mountdev: not dirty"); 4298 } 4299 /* 4300 * We are only interested in bitmaps with outstanding 4301 * dependencies. 4302 */ 4303 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4304 wk->wk_type != D_BMSAFEMAP || 4305 (bp->b_xflags & BX_BKGRDINPROG)) { 4306 BUF_UNLOCK(bp); 4307 continue; 4308 } 4309 bremfree(bp); 4310 FREE_LOCK(&lk); 4311 (void) bawrite(bp); 4312 ACQUIRE_LOCK(&lk); 4313 /* 4314 * Since we may have slept during the I/O, we need 4315 * to start from a known point. 4316 */ 4317 nbp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4318 } 4319 drain_output(vp, 1); 4320 FREE_LOCK(&lk); 4321 } 4322 4323 /* 4324 * This routine is called when we are trying to synchronously flush a 4325 * file. This routine must eliminate any filesystem metadata dependencies 4326 * so that the syncing routine can succeed by pushing the dirty blocks 4327 * associated with the file. If any I/O errors occur, they are returned. 4328 */ 4329 int 4330 softdep_sync_metadata(ap) 4331 struct vop_fsync_args /* { 4332 struct vnode *a_vp; 4333 struct ucred *a_cred; 4334 int a_waitfor; 4335 struct thread *a_td; 4336 } */ *ap; 4337 { 4338 struct vnode *vp = ap->a_vp; 4339 struct pagedep *pagedep; 4340 struct allocdirect *adp; 4341 struct allocindir *aip; 4342 struct buf *bp, *nbp; 4343 struct worklist *wk; 4344 int i, error, waitfor; 4345 4346 /* 4347 * Check whether this vnode is involved in a filesystem 4348 * that is doing soft dependency processing. 4349 */ 4350 if (!vn_isdisk(vp, NULL)) { 4351 if (!DOINGSOFTDEP(vp)) 4352 return (0); 4353 } else 4354 if (vp->v_rdev->si_mountpoint == NULL || 4355 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4356 return (0); 4357 /* 4358 * Ensure that any direct block dependencies have been cleared. 4359 */ 4360 ACQUIRE_LOCK(&lk); 4361 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4362 FREE_LOCK(&lk); 4363 return (error); 4364 } 4365 /* 4366 * For most files, the only metadata dependencies are the 4367 * cylinder group maps that allocate their inode or blocks. 4368 * The block allocation dependencies can be found by traversing 4369 * the dependency lists for any buffers that remain on their 4370 * dirty buffer list. The inode allocation dependency will 4371 * be resolved when the inode is updated with MNT_WAIT. 4372 * This work is done in two passes. The first pass grabs most 4373 * of the buffers and begins asynchronously writing them. The 4374 * only way to wait for these asynchronous writes is to sleep 4375 * on the filesystem vnode which may stay busy for a long time 4376 * if the filesystem is active. So, instead, we make a second 4377 * pass over the dependencies blocking on each write. In the 4378 * usual case we will be blocking against a write that we 4379 * initiated, so when it is done the dependency will have been 4380 * resolved. Thus the second pass is expected to end quickly. 4381 */ 4382 waitfor = MNT_NOWAIT; 4383 top: 4384 /* 4385 * We must wait for any I/O in progress to finish so that 4386 * all potential buffers on the dirty list will be visible. 4387 */ 4388 drain_output(vp, 1); 4389 if (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT) == 0) { 4390 FREE_LOCK(&lk); 4391 return (0); 4392 } 4393 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4394 /* While syncing snapshots, we must allow recursive lookups */ 4395 bp->b_lock.lk_flags |= LK_CANRECURSE; 4396 loop: 4397 /* 4398 * As we hold the buffer locked, none of its dependencies 4399 * will disappear. 4400 */ 4401 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4402 switch (wk->wk_type) { 4403 4404 case D_ALLOCDIRECT: 4405 adp = WK_ALLOCDIRECT(wk); 4406 if (adp->ad_state & DEPCOMPLETE) 4407 continue; 4408 nbp = adp->ad_buf; 4409 if (getdirtybuf(&nbp, waitfor) == 0) 4410 continue; 4411 FREE_LOCK(&lk); 4412 if (waitfor == MNT_NOWAIT) { 4413 bawrite(nbp); 4414 } else if ((error = BUF_WRITE(nbp)) != 0) { 4415 break; 4416 } 4417 ACQUIRE_LOCK(&lk); 4418 continue; 4419 4420 case D_ALLOCINDIR: 4421 aip = WK_ALLOCINDIR(wk); 4422 if (aip->ai_state & DEPCOMPLETE) 4423 continue; 4424 nbp = aip->ai_buf; 4425 if (getdirtybuf(&nbp, waitfor) == 0) 4426 continue; 4427 FREE_LOCK(&lk); 4428 if (waitfor == MNT_NOWAIT) { 4429 bawrite(nbp); 4430 } else if ((error = BUF_WRITE(nbp)) != 0) { 4431 break; 4432 } 4433 ACQUIRE_LOCK(&lk); 4434 continue; 4435 4436 case D_INDIRDEP: 4437 restart: 4438 4439 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 4440 if (aip->ai_state & DEPCOMPLETE) 4441 continue; 4442 nbp = aip->ai_buf; 4443 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 4444 goto restart; 4445 FREE_LOCK(&lk); 4446 if ((error = BUF_WRITE(nbp)) != 0) { 4447 break; 4448 } 4449 ACQUIRE_LOCK(&lk); 4450 goto restart; 4451 } 4452 continue; 4453 4454 case D_INODEDEP: 4455 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 4456 WK_INODEDEP(wk)->id_ino)) != 0) { 4457 FREE_LOCK(&lk); 4458 break; 4459 } 4460 continue; 4461 4462 case D_PAGEDEP: 4463 /* 4464 * We are trying to sync a directory that may 4465 * have dependencies on both its own metadata 4466 * and/or dependencies on the inodes of any 4467 * recently allocated files. We walk its diradd 4468 * lists pushing out the associated inode. 4469 */ 4470 pagedep = WK_PAGEDEP(wk); 4471 for (i = 0; i < DAHASHSZ; i++) { 4472 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 4473 continue; 4474 if ((error = 4475 flush_pagedep_deps(vp, pagedep->pd_mnt, 4476 &pagedep->pd_diraddhd[i]))) { 4477 FREE_LOCK(&lk); 4478 break; 4479 } 4480 } 4481 continue; 4482 4483 case D_MKDIR: 4484 /* 4485 * This case should never happen if the vnode has 4486 * been properly sync'ed. However, if this function 4487 * is used at a place where the vnode has not yet 4488 * been sync'ed, this dependency can show up. So, 4489 * rather than panic, just flush it. 4490 */ 4491 nbp = WK_MKDIR(wk)->md_buf; 4492 if (getdirtybuf(&nbp, waitfor) == 0) 4493 continue; 4494 FREE_LOCK(&lk); 4495 if (waitfor == MNT_NOWAIT) { 4496 bawrite(nbp); 4497 } else if ((error = BUF_WRITE(nbp)) != 0) { 4498 break; 4499 } 4500 ACQUIRE_LOCK(&lk); 4501 continue; 4502 4503 case D_BMSAFEMAP: 4504 /* 4505 * This case should never happen if the vnode has 4506 * been properly sync'ed. However, if this function 4507 * is used at a place where the vnode has not yet 4508 * been sync'ed, this dependency can show up. So, 4509 * rather than panic, just flush it. 4510 */ 4511 nbp = WK_BMSAFEMAP(wk)->sm_buf; 4512 if (getdirtybuf(&nbp, waitfor) == 0) 4513 continue; 4514 FREE_LOCK(&lk); 4515 if (waitfor == MNT_NOWAIT) { 4516 bawrite(nbp); 4517 } else if ((error = BUF_WRITE(nbp)) != 0) { 4518 break; 4519 } 4520 ACQUIRE_LOCK(&lk); 4521 continue; 4522 4523 default: 4524 FREE_LOCK(&lk); 4525 panic("softdep_sync_metadata: Unknown type %s", 4526 TYPENAME(wk->wk_type)); 4527 /* NOTREACHED */ 4528 } 4529 /* We reach here only in error and unlocked */ 4530 if (error == 0) 4531 panic("softdep_sync_metadata: zero error"); 4532 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 4533 bawrite(bp); 4534 return (error); 4535 } 4536 (void) getdirtybuf(&TAILQ_NEXT(bp, b_vnbufs), MNT_WAIT); 4537 nbp = TAILQ_NEXT(bp, b_vnbufs); 4538 FREE_LOCK(&lk); 4539 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 4540 bawrite(bp); 4541 ACQUIRE_LOCK(&lk); 4542 if (nbp != NULL) { 4543 bp = nbp; 4544 goto loop; 4545 } 4546 /* 4547 * The brief unlock is to allow any pent up dependency 4548 * processing to be done. Then proceed with the second pass. 4549 */ 4550 if (waitfor == MNT_NOWAIT) { 4551 waitfor = MNT_WAIT; 4552 FREE_LOCK(&lk); 4553 ACQUIRE_LOCK(&lk); 4554 goto top; 4555 } 4556 4557 /* 4558 * If we have managed to get rid of all the dirty buffers, 4559 * then we are done. For certain directories and block 4560 * devices, we may need to do further work. 4561 * 4562 * We must wait for any I/O in progress to finish so that 4563 * all potential buffers on the dirty list will be visible. 4564 */ 4565 drain_output(vp, 1); 4566 if (TAILQ_FIRST(&vp->v_dirtyblkhd) == NULL) { 4567 FREE_LOCK(&lk); 4568 return (0); 4569 } 4570 4571 FREE_LOCK(&lk); 4572 /* 4573 * If we are trying to sync a block device, some of its buffers may 4574 * contain metadata that cannot be written until the contents of some 4575 * partially written files have been written to disk. The only easy 4576 * way to accomplish this is to sync the entire filesystem (luckily 4577 * this happens rarely). 4578 */ 4579 if (vn_isdisk(vp, NULL) && 4580 vp->v_rdev->si_mountpoint && !VOP_ISLOCKED(vp, NULL) && 4581 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT, ap->a_cred, 4582 ap->a_td)) != 0) 4583 return (error); 4584 return (0); 4585 } 4586 4587 /* 4588 * Flush the dependencies associated with an inodedep. 4589 * Called with splbio blocked. 4590 */ 4591 static int 4592 flush_inodedep_deps(fs, ino) 4593 struct fs *fs; 4594 ino_t ino; 4595 { 4596 struct inodedep *inodedep; 4597 struct allocdirect *adp; 4598 int error, waitfor; 4599 struct buf *bp; 4600 4601 /* 4602 * This work is done in two passes. The first pass grabs most 4603 * of the buffers and begins asynchronously writing them. The 4604 * only way to wait for these asynchronous writes is to sleep 4605 * on the filesystem vnode which may stay busy for a long time 4606 * if the filesystem is active. So, instead, we make a second 4607 * pass over the dependencies blocking on each write. In the 4608 * usual case we will be blocking against a write that we 4609 * initiated, so when it is done the dependency will have been 4610 * resolved. Thus the second pass is expected to end quickly. 4611 * We give a brief window at the top of the loop to allow 4612 * any pending I/O to complete. 4613 */ 4614 for (waitfor = MNT_NOWAIT; ; ) { 4615 FREE_LOCK(&lk); 4616 ACQUIRE_LOCK(&lk); 4617 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 4618 return (0); 4619 TAILQ_FOREACH(adp, &inodedep->id_inoupdt, ad_next) { 4620 if (adp->ad_state & DEPCOMPLETE) 4621 continue; 4622 bp = adp->ad_buf; 4623 if (getdirtybuf(&bp, waitfor) == 0) { 4624 if (waitfor == MNT_NOWAIT) 4625 continue; 4626 break; 4627 } 4628 FREE_LOCK(&lk); 4629 if (waitfor == MNT_NOWAIT) { 4630 bawrite(bp); 4631 } else if ((error = BUF_WRITE(bp)) != 0) { 4632 ACQUIRE_LOCK(&lk); 4633 return (error); 4634 } 4635 ACQUIRE_LOCK(&lk); 4636 break; 4637 } 4638 if (adp != NULL) 4639 continue; 4640 TAILQ_FOREACH(adp, &inodedep->id_newinoupdt, ad_next) { 4641 if (adp->ad_state & DEPCOMPLETE) 4642 continue; 4643 bp = adp->ad_buf; 4644 if (getdirtybuf(&bp, waitfor) == 0) { 4645 if (waitfor == MNT_NOWAIT) 4646 continue; 4647 break; 4648 } 4649 FREE_LOCK(&lk); 4650 if (waitfor == MNT_NOWAIT) { 4651 bawrite(bp); 4652 } else if ((error = BUF_WRITE(bp)) != 0) { 4653 ACQUIRE_LOCK(&lk); 4654 return (error); 4655 } 4656 ACQUIRE_LOCK(&lk); 4657 break; 4658 } 4659 if (adp != NULL) 4660 continue; 4661 /* 4662 * If pass2, we are done, otherwise do pass 2. 4663 */ 4664 if (waitfor == MNT_WAIT) 4665 break; 4666 waitfor = MNT_WAIT; 4667 } 4668 /* 4669 * Try freeing inodedep in case all dependencies have been removed. 4670 */ 4671 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 4672 (void) free_inodedep(inodedep); 4673 return (0); 4674 } 4675 4676 /* 4677 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 4678 * Called with splbio blocked. 4679 */ 4680 static int 4681 flush_pagedep_deps(pvp, mp, diraddhdp) 4682 struct vnode *pvp; 4683 struct mount *mp; 4684 struct diraddhd *diraddhdp; 4685 { 4686 struct thread *td = curthread; 4687 struct inodedep *inodedep; 4688 struct ufsmount *ump; 4689 struct diradd *dap; 4690 struct vnode *vp; 4691 int gotit, error = 0; 4692 struct buf *bp; 4693 ino_t inum; 4694 4695 ump = VFSTOUFS(mp); 4696 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 4697 /* 4698 * Flush ourselves if this directory entry 4699 * has a MKDIR_PARENT dependency. 4700 */ 4701 if (dap->da_state & MKDIR_PARENT) { 4702 FREE_LOCK(&lk); 4703 if ((error = UFS_UPDATE(pvp, 1)) != 0) 4704 break; 4705 ACQUIRE_LOCK(&lk); 4706 /* 4707 * If that cleared dependencies, go on to next. 4708 */ 4709 if (dap != LIST_FIRST(diraddhdp)) 4710 continue; 4711 if (dap->da_state & MKDIR_PARENT) { 4712 FREE_LOCK(&lk); 4713 panic("flush_pagedep_deps: MKDIR_PARENT"); 4714 } 4715 } 4716 /* 4717 * A newly allocated directory must have its "." and 4718 * ".." entries written out before its name can be 4719 * committed in its parent. We do not want or need 4720 * the full semantics of a synchronous VOP_FSYNC as 4721 * that may end up here again, once for each directory 4722 * level in the filesystem. Instead, we push the blocks 4723 * and wait for them to clear. We have to fsync twice 4724 * because the first call may choose to defer blocks 4725 * that still have dependencies, but deferral will 4726 * happen at most once. 4727 */ 4728 inum = dap->da_newinum; 4729 if (dap->da_state & MKDIR_BODY) { 4730 FREE_LOCK(&lk); 4731 if ((error = VFS_VGET(mp, inum, &vp)) != 0) 4732 break; 4733 if ((error=VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td)) || 4734 (error=VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td))) { 4735 vput(vp); 4736 break; 4737 } 4738 drain_output(vp, 0); 4739 vput(vp); 4740 ACQUIRE_LOCK(&lk); 4741 /* 4742 * If that cleared dependencies, go on to next. 4743 */ 4744 if (dap != LIST_FIRST(diraddhdp)) 4745 continue; 4746 if (dap->da_state & MKDIR_BODY) { 4747 FREE_LOCK(&lk); 4748 panic("flush_pagedep_deps: MKDIR_BODY"); 4749 } 4750 } 4751 /* 4752 * Flush the inode on which the directory entry depends. 4753 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 4754 * the only remaining dependency is that the updated inode 4755 * count must get pushed to disk. The inode has already 4756 * been pushed into its inode buffer (via VOP_UPDATE) at 4757 * the time of the reference count change. So we need only 4758 * locate that buffer, ensure that there will be no rollback 4759 * caused by a bitmap dependency, then write the inode buffer. 4760 */ 4761 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 4762 FREE_LOCK(&lk); 4763 panic("flush_pagedep_deps: lost inode"); 4764 } 4765 /* 4766 * If the inode still has bitmap dependencies, 4767 * push them to disk. 4768 */ 4769 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 4770 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4771 FREE_LOCK(&lk); 4772 if (gotit && 4773 (error = BUF_WRITE(inodedep->id_buf)) != 0) 4774 break; 4775 ACQUIRE_LOCK(&lk); 4776 if (dap != LIST_FIRST(diraddhdp)) 4777 continue; 4778 } 4779 /* 4780 * If the inode is still sitting in a buffer waiting 4781 * to be written, push it to disk. 4782 */ 4783 FREE_LOCK(&lk); 4784 if ((error = bread(ump->um_devvp, 4785 fsbtodb(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 4786 (int)ump->um_fs->fs_bsize, NOCRED, &bp)) != 0) { 4787 brelse(bp); 4788 break; 4789 } 4790 if ((error = BUF_WRITE(bp)) != 0) 4791 break; 4792 ACQUIRE_LOCK(&lk); 4793 /* 4794 * If we have failed to get rid of all the dependencies 4795 * then something is seriously wrong. 4796 */ 4797 if (dap == LIST_FIRST(diraddhdp)) { 4798 FREE_LOCK(&lk); 4799 panic("flush_pagedep_deps: flush failed"); 4800 } 4801 } 4802 if (error) 4803 ACQUIRE_LOCK(&lk); 4804 return (error); 4805 } 4806 4807 /* 4808 * A large burst of file addition or deletion activity can drive the 4809 * memory load excessively high. First attempt to slow things down 4810 * using the techniques below. If that fails, this routine requests 4811 * the offending operations to fall back to running synchronously 4812 * until the memory load returns to a reasonable level. 4813 */ 4814 int 4815 softdep_slowdown(vp) 4816 struct vnode *vp; 4817 { 4818 int max_softdeps_hard; 4819 4820 max_softdeps_hard = max_softdeps * 11 / 10; 4821 if (num_dirrem < max_softdeps_hard / 2 && 4822 num_inodedep < max_softdeps_hard) 4823 return (0); 4824 stat_sync_limit_hit += 1; 4825 return (1); 4826 } 4827 4828 /* 4829 * Called by the allocation routines when they are about to fail 4830 * in the hope that we can free up some disk space. 4831 * 4832 * First check to see if the work list has anything on it. If it has, 4833 * clean up entries until we successfully free some space. Because this 4834 * process holds inodes locked, we cannot handle any remove requests 4835 * that might block on a locked inode as that could lead to deadlock. 4836 * If the worklist yields no free space, encourage the syncer daemon 4837 * to help us. In no event will we try for longer than tickdelay seconds. 4838 */ 4839 int 4840 softdep_request_cleanup(fs, vp) 4841 struct fs *fs; 4842 struct vnode *vp; 4843 { 4844 long starttime, needed; 4845 4846 needed = fs->fs_cstotal.cs_nbfree + fs->fs_contigsumsize; 4847 starttime = time_second + tickdelay; 4848 if (UFS_UPDATE(vp, 1) != 0) 4849 return (0); 4850 while (fs->fs_pendingblocks > 0 && fs->fs_cstotal.cs_nbfree <= needed) { 4851 if (time_second > starttime) 4852 return (0); 4853 if (num_on_worklist > 0 && 4854 process_worklist_item(NULL, LK_NOWAIT) != -1) { 4855 stat_worklist_push += 1; 4856 continue; 4857 } 4858 request_cleanup(FLUSH_REMOVE_WAIT, 0); 4859 } 4860 return (1); 4861 } 4862 4863 /* 4864 * If memory utilization has gotten too high, deliberately slow things 4865 * down and speed up the I/O processing. 4866 */ 4867 static int 4868 request_cleanup(resource, islocked) 4869 int resource; 4870 int islocked; 4871 { 4872 struct thread *td = curthread; 4873 4874 /* 4875 * We never hold up the filesystem syncer process. 4876 */ 4877 if (td == filesys_syncer) 4878 return (0); 4879 /* 4880 * First check to see if the work list has gotten backlogged. 4881 * If it has, co-opt this process to help clean up two entries. 4882 * Because this process may hold inodes locked, we cannot 4883 * handle any remove requests that might block on a locked 4884 * inode as that could lead to deadlock. 4885 */ 4886 if (num_on_worklist > max_softdeps / 10) { 4887 if (islocked) 4888 FREE_LOCK(&lk); 4889 process_worklist_item(NULL, LK_NOWAIT); 4890 process_worklist_item(NULL, LK_NOWAIT); 4891 stat_worklist_push += 2; 4892 if (islocked) 4893 ACQUIRE_LOCK(&lk); 4894 return(1); 4895 } 4896 /* 4897 * Next, we attempt to speed up the syncer process. If that 4898 * is successful, then we allow the process to continue. 4899 */ 4900 if (speedup_syncer() && resource != FLUSH_REMOVE_WAIT) 4901 return(0); 4902 /* 4903 * If we are resource constrained on inode dependencies, try 4904 * flushing some dirty inodes. Otherwise, we are constrained 4905 * by file deletions, so try accelerating flushes of directories 4906 * with removal dependencies. We would like to do the cleanup 4907 * here, but we probably hold an inode locked at this point and 4908 * that might deadlock against one that we try to clean. So, 4909 * the best that we can do is request the syncer daemon to do 4910 * the cleanup for us. 4911 */ 4912 switch (resource) { 4913 4914 case FLUSH_INODES: 4915 stat_ino_limit_push += 1; 4916 req_clear_inodedeps += 1; 4917 stat_countp = &stat_ino_limit_hit; 4918 break; 4919 4920 case FLUSH_REMOVE: 4921 case FLUSH_REMOVE_WAIT: 4922 stat_blk_limit_push += 1; 4923 req_clear_remove += 1; 4924 stat_countp = &stat_blk_limit_hit; 4925 break; 4926 4927 default: 4928 if (islocked) 4929 FREE_LOCK(&lk); 4930 panic("request_cleanup: unknown type"); 4931 } 4932 /* 4933 * Hopefully the syncer daemon will catch up and awaken us. 4934 * We wait at most tickdelay before proceeding in any case. 4935 */ 4936 if (islocked == 0) 4937 ACQUIRE_LOCK(&lk); 4938 proc_waiting += 1; 4939 if (handle.callout == NULL) 4940 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 4941 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, PPAUSE, 4942 "softupdate", 0); 4943 proc_waiting -= 1; 4944 if (islocked == 0) 4945 FREE_LOCK(&lk); 4946 return (1); 4947 } 4948 4949 /* 4950 * Awaken processes pausing in request_cleanup and clear proc_waiting 4951 * to indicate that there is no longer a timer running. 4952 */ 4953 void 4954 pause_timer(arg) 4955 void *arg; 4956 { 4957 4958 *stat_countp += 1; 4959 wakeup_one(&proc_waiting); 4960 if (proc_waiting > 0) 4961 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 4962 else 4963 handle.callout = NULL; 4964 } 4965 4966 /* 4967 * Flush out a directory with at least one removal dependency in an effort to 4968 * reduce the number of dirrem, freefile, and freeblks dependency structures. 4969 */ 4970 static void 4971 clear_remove(td) 4972 struct thread *td; 4973 { 4974 struct pagedep_hashhead *pagedephd; 4975 struct pagedep *pagedep; 4976 static int next = 0; 4977 struct mount *mp; 4978 struct vnode *vp; 4979 int error, cnt; 4980 ino_t ino; 4981 4982 ACQUIRE_LOCK(&lk); 4983 for (cnt = 0; cnt < pagedep_hash; cnt++) { 4984 pagedephd = &pagedep_hashtbl[next++]; 4985 if (next >= pagedep_hash) 4986 next = 0; 4987 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 4988 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 4989 continue; 4990 mp = pagedep->pd_mnt; 4991 ino = pagedep->pd_ino; 4992 FREE_LOCK(&lk); 4993 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 4994 continue; 4995 if ((error = VFS_VGET(mp, ino, &vp)) != 0) { 4996 softdep_error("clear_remove: vget", error); 4997 vn_finished_write(mp); 4998 return; 4999 } 5000 if ((error = VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td))) 5001 softdep_error("clear_remove: fsync", error); 5002 drain_output(vp, 0); 5003 vput(vp); 5004 vn_finished_write(mp); 5005 return; 5006 } 5007 } 5008 FREE_LOCK(&lk); 5009 } 5010 5011 /* 5012 * Clear out a block of dirty inodes in an effort to reduce 5013 * the number of inodedep dependency structures. 5014 */ 5015 static void 5016 clear_inodedeps(td) 5017 struct thread *td; 5018 { 5019 struct inodedep_hashhead *inodedephd; 5020 struct inodedep *inodedep; 5021 static int next = 0; 5022 struct mount *mp; 5023 struct vnode *vp; 5024 struct fs *fs; 5025 int error, cnt; 5026 ino_t firstino, lastino, ino; 5027 5028 ACQUIRE_LOCK(&lk); 5029 /* 5030 * Pick a random inode dependency to be cleared. 5031 * We will then gather up all the inodes in its block 5032 * that have dependencies and flush them out. 5033 */ 5034 for (cnt = 0; cnt < inodedep_hash; cnt++) { 5035 inodedephd = &inodedep_hashtbl[next++]; 5036 if (next >= inodedep_hash) 5037 next = 0; 5038 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 5039 break; 5040 } 5041 if (inodedep == NULL) 5042 return; 5043 /* 5044 * Ugly code to find mount point given pointer to superblock. 5045 */ 5046 fs = inodedep->id_fs; 5047 TAILQ_FOREACH(mp, &mountlist, mnt_list) 5048 if ((mp->mnt_flag & MNT_SOFTDEP) && fs == VFSTOUFS(mp)->um_fs) 5049 break; 5050 /* 5051 * Find the last inode in the block with dependencies. 5052 */ 5053 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 5054 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 5055 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 5056 break; 5057 /* 5058 * Asynchronously push all but the last inode with dependencies. 5059 * Synchronously push the last inode with dependencies to ensure 5060 * that the inode block gets written to free up the inodedeps. 5061 */ 5062 for (ino = firstino; ino <= lastino; ino++) { 5063 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5064 continue; 5065 FREE_LOCK(&lk); 5066 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5067 continue; 5068 if ((error = VFS_VGET(mp, ino, &vp)) != 0) { 5069 softdep_error("clear_inodedeps: vget", error); 5070 vn_finished_write(mp); 5071 return; 5072 } 5073 if (ino == lastino) { 5074 if ((error = VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_WAIT, td))) 5075 softdep_error("clear_inodedeps: fsync1", error); 5076 } else { 5077 if ((error = VOP_FSYNC(vp, td->td_proc->p_ucred, MNT_NOWAIT, td))) 5078 softdep_error("clear_inodedeps: fsync2", error); 5079 drain_output(vp, 0); 5080 } 5081 vput(vp); 5082 vn_finished_write(mp); 5083 ACQUIRE_LOCK(&lk); 5084 } 5085 FREE_LOCK(&lk); 5086 } 5087 5088 /* 5089 * Function to determine if the buffer has outstanding dependencies 5090 * that will cause a roll-back if the buffer is written. If wantcount 5091 * is set, return number of dependencies, otherwise just yes or no. 5092 */ 5093 static int 5094 softdep_count_dependencies(bp, wantcount) 5095 struct buf *bp; 5096 int wantcount; 5097 { 5098 struct worklist *wk; 5099 struct inodedep *inodedep; 5100 struct indirdep *indirdep; 5101 struct allocindir *aip; 5102 struct pagedep *pagedep; 5103 struct diradd *dap; 5104 int i, retval; 5105 5106 retval = 0; 5107 ACQUIRE_LOCK(&lk); 5108 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5109 switch (wk->wk_type) { 5110 5111 case D_INODEDEP: 5112 inodedep = WK_INODEDEP(wk); 5113 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5114 /* bitmap allocation dependency */ 5115 retval += 1; 5116 if (!wantcount) 5117 goto out; 5118 } 5119 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5120 /* direct block pointer dependency */ 5121 retval += 1; 5122 if (!wantcount) 5123 goto out; 5124 } 5125 continue; 5126 5127 case D_INDIRDEP: 5128 indirdep = WK_INDIRDEP(wk); 5129 5130 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5131 /* indirect block pointer dependency */ 5132 retval += 1; 5133 if (!wantcount) 5134 goto out; 5135 } 5136 continue; 5137 5138 case D_PAGEDEP: 5139 pagedep = WK_PAGEDEP(wk); 5140 for (i = 0; i < DAHASHSZ; i++) { 5141 5142 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5143 /* directory entry dependency */ 5144 retval += 1; 5145 if (!wantcount) 5146 goto out; 5147 } 5148 } 5149 continue; 5150 5151 case D_BMSAFEMAP: 5152 case D_ALLOCDIRECT: 5153 case D_ALLOCINDIR: 5154 case D_MKDIR: 5155 /* never a dependency on these blocks */ 5156 continue; 5157 5158 default: 5159 FREE_LOCK(&lk); 5160 panic("softdep_check_for_rollback: Unexpected type %s", 5161 TYPENAME(wk->wk_type)); 5162 /* NOTREACHED */ 5163 } 5164 } 5165 out: 5166 FREE_LOCK(&lk); 5167 return retval; 5168 } 5169 5170 /* 5171 * Acquire exclusive access to a buffer. 5172 * Must be called with splbio blocked. 5173 * Return 1 if buffer was acquired. 5174 */ 5175 static int 5176 getdirtybuf(bpp, waitfor) 5177 struct buf **bpp; 5178 int waitfor; 5179 { 5180 struct buf *bp; 5181 int error; 5182 5183 for (;;) { 5184 if ((bp = *bpp) == NULL) 5185 return (0); 5186 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 5187 if ((bp->b_xflags & BX_BKGRDINPROG) == 0) 5188 break; 5189 BUF_UNLOCK(bp); 5190 if (waitfor != MNT_WAIT) 5191 return (0); 5192 bp->b_xflags |= BX_BKGRDWAIT; 5193 interlocked_sleep(&lk, SLEEP, &bp->b_xflags, PRIBIO, 5194 "getbuf", 0); 5195 continue; 5196 } 5197 if (waitfor != MNT_WAIT) 5198 return (0); 5199 error = interlocked_sleep(&lk, LOCKBUF, bp, 5200 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5201 if (error != ENOLCK) { 5202 FREE_LOCK(&lk); 5203 panic("getdirtybuf: inconsistent lock"); 5204 } 5205 } 5206 if ((bp->b_flags & B_DELWRI) == 0) { 5207 BUF_UNLOCK(bp); 5208 return (0); 5209 } 5210 bremfree(bp); 5211 return (1); 5212 } 5213 5214 /* 5215 * Wait for pending output on a vnode to complete. 5216 * Must be called with vnode locked. 5217 */ 5218 static void 5219 drain_output(vp, islocked) 5220 struct vnode *vp; 5221 int islocked; 5222 { 5223 5224 if (!islocked) 5225 ACQUIRE_LOCK(&lk); 5226 while (vp->v_numoutput) { 5227 vp->v_flag |= VBWAIT; 5228 interlocked_sleep(&lk, SLEEP, (caddr_t)&vp->v_numoutput, 5229 PRIBIO + 1, "drainvp", 0); 5230 } 5231 if (!islocked) 5232 FREE_LOCK(&lk); 5233 } 5234 5235 /* 5236 * Called whenever a buffer that is being invalidated or reallocated 5237 * contains dependencies. This should only happen if an I/O error has 5238 * occurred. The routine is called with the buffer locked. 5239 */ 5240 static void 5241 softdep_deallocate_dependencies(bp) 5242 struct buf *bp; 5243 { 5244 5245 if ((bp->b_ioflags & BIO_ERROR) == 0) 5246 panic("softdep_deallocate_dependencies: dangling deps"); 5247 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 5248 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5249 } 5250 5251 /* 5252 * Function to handle asynchronous write errors in the filesystem. 5253 */ 5254 void 5255 softdep_error(func, error) 5256 char *func; 5257 int error; 5258 { 5259 5260 /* XXX should do something better! */ 5261 printf("%s: got error %d while accessing filesystem\n", func, error); 5262 } 5263