1 /* 2 * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved. 3 * 4 * The soft updates code is derived from the appendix of a University 5 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 6 * "Soft Updates: A Solution to the Metadata Update Problem in File 7 * Systems", CSE-TR-254-95, August 1995). 8 * 9 * Further information about soft updates can be obtained from: 10 * 11 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 12 * 1614 Oxford Street mckusick@mckusick.com 13 * Berkeley, CA 94709-1608 +1-510-843-9542 14 * USA 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 26 * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY 27 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR 30 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 39 */ 40 41 #include <sys/cdefs.h> 42 __FBSDID("$FreeBSD$"); 43 44 /* 45 * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide. 46 */ 47 #ifndef DIAGNOSTIC 48 #define DIAGNOSTIC 49 #endif 50 #ifndef DEBUG 51 #define DEBUG 52 #endif 53 54 #include <sys/param.h> 55 #include <sys/kernel.h> 56 #include <sys/systm.h> 57 #include <sys/stdint.h> 58 #include <sys/bio.h> 59 #include <sys/buf.h> 60 #include <sys/malloc.h> 61 #include <sys/mount.h> 62 #include <sys/proc.h> 63 #include <sys/stat.h> 64 #include <sys/syslog.h> 65 #include <sys/vnode.h> 66 #include <sys/conf.h> 67 #include <ufs/ufs/dir.h> 68 #include <ufs/ufs/extattr.h> 69 #include <ufs/ufs/quota.h> 70 #include <ufs/ufs/inode.h> 71 #include <ufs/ufs/ufsmount.h> 72 #include <ufs/ffs/fs.h> 73 #include <ufs/ffs/softdep.h> 74 #include <ufs/ffs/ffs_extern.h> 75 #include <ufs/ufs/ufs_extern.h> 76 77 /* 78 * These definitions need to be adapted to the system to which 79 * this file is being ported. 80 */ 81 /* 82 * malloc types defined for the softdep system. 83 */ 84 static MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies"); 85 static MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies"); 86 static MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation"); 87 static MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map"); 88 static MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode"); 89 static MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies"); 90 static MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block"); 91 static MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode"); 92 static MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode"); 93 static MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated"); 94 static MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry"); 95 static MALLOC_DEFINE(M_MKDIR, "mkdir","New directory"); 96 static MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted"); 97 static MALLOC_DEFINE(M_NEWDIRBLK, "newdirblk","Unclaimed new directory block"); 98 99 #define M_SOFTDEP_FLAGS (M_WAITOK | M_USE_RESERVE) 100 101 #define D_PAGEDEP 0 102 #define D_INODEDEP 1 103 #define D_NEWBLK 2 104 #define D_BMSAFEMAP 3 105 #define D_ALLOCDIRECT 4 106 #define D_INDIRDEP 5 107 #define D_ALLOCINDIR 6 108 #define D_FREEFRAG 7 109 #define D_FREEBLKS 8 110 #define D_FREEFILE 9 111 #define D_DIRADD 10 112 #define D_MKDIR 11 113 #define D_DIRREM 12 114 #define D_NEWDIRBLK 13 115 #define D_LAST D_NEWDIRBLK 116 117 /* 118 * translate from workitem type to memory type 119 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 120 */ 121 static struct malloc_type *memtype[] = { 122 M_PAGEDEP, 123 M_INODEDEP, 124 M_NEWBLK, 125 M_BMSAFEMAP, 126 M_ALLOCDIRECT, 127 M_INDIRDEP, 128 M_ALLOCINDIR, 129 M_FREEFRAG, 130 M_FREEBLKS, 131 M_FREEFILE, 132 M_DIRADD, 133 M_MKDIR, 134 M_DIRREM, 135 M_NEWDIRBLK 136 }; 137 138 #define DtoM(type) (memtype[type]) 139 140 /* 141 * Names of malloc types. 142 */ 143 #define TYPENAME(type) \ 144 ((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???") 145 /* 146 * End system adaptaion definitions. 147 */ 148 149 /* 150 * Internal function prototypes. 151 */ 152 static void softdep_error(char *, int); 153 static void drain_output(struct vnode *, int); 154 static int getdirtybuf(struct buf **, int); 155 static void clear_remove(struct thread *); 156 static void clear_inodedeps(struct thread *); 157 static int flush_pagedep_deps(struct vnode *, struct mount *, 158 struct diraddhd *); 159 static int flush_inodedep_deps(struct fs *, ino_t); 160 static int flush_deplist(struct allocdirectlst *, int, int *); 161 static int handle_written_filepage(struct pagedep *, struct buf *); 162 static void diradd_inode_written(struct diradd *, struct inodedep *); 163 static int handle_written_inodeblock(struct inodedep *, struct buf *); 164 static void handle_allocdirect_partdone(struct allocdirect *); 165 static void handle_allocindir_partdone(struct allocindir *); 166 static void initiate_write_filepage(struct pagedep *, struct buf *); 167 static void handle_written_mkdir(struct mkdir *, int); 168 static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 169 static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 170 static void handle_workitem_freefile(struct freefile *); 171 static void handle_workitem_remove(struct dirrem *, struct vnode *); 172 static struct dirrem *newdirrem(struct buf *, struct inode *, 173 struct inode *, int, struct dirrem **); 174 static void free_diradd(struct diradd *); 175 static void free_allocindir(struct allocindir *, struct inodedep *); 176 static void free_newdirblk(struct newdirblk *); 177 static int indir_trunc(struct freeblks *, ufs2_daddr_t, int, ufs_lbn_t, 178 ufs2_daddr_t *); 179 static void deallocate_dependencies(struct buf *, struct inodedep *); 180 static void free_allocdirect(struct allocdirectlst *, 181 struct allocdirect *, int); 182 static int check_inode_unwritten(struct inodedep *); 183 static int free_inodedep(struct inodedep *); 184 static void handle_workitem_freeblocks(struct freeblks *, int); 185 static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 186 static void setup_allocindir_phase2(struct buf *, struct inode *, 187 struct allocindir *); 188 static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 189 ufs2_daddr_t); 190 static void handle_workitem_freefrag(struct freefrag *); 191 static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long); 192 static void allocdirect_merge(struct allocdirectlst *, 193 struct allocdirect *, struct allocdirect *); 194 static struct bmsafemap *bmsafemap_lookup(struct buf *); 195 static int newblk_lookup(struct fs *, ufs2_daddr_t, int, struct newblk **); 196 static int inodedep_lookup(struct fs *, ino_t, int, struct inodedep **); 197 static int pagedep_lookup(struct inode *, ufs_lbn_t, int, struct pagedep **); 198 static void pause_timer(void *); 199 static int request_cleanup(int, int); 200 static int process_worklist_item(struct mount *, int); 201 static void add_to_worklist(struct worklist *); 202 203 /* 204 * Exported softdep operations. 205 */ 206 static void softdep_disk_io_initiation(struct buf *); 207 static void softdep_disk_write_complete(struct buf *); 208 static void softdep_deallocate_dependencies(struct buf *); 209 static void softdep_move_dependencies(struct buf *, struct buf *); 210 static int softdep_count_dependencies(struct buf *bp, int); 211 212 /* 213 * Locking primitives. 214 * 215 * For a uniprocessor, all we need to do is protect against disk 216 * interrupts. For a multiprocessor, this lock would have to be 217 * a mutex. A single mutex is used throughout this file, though 218 * finer grain locking could be used if contention warranted it. 219 * 220 * For a multiprocessor, the sleep call would accept a lock and 221 * release it after the sleep processing was complete. In a uniprocessor 222 * implementation there is no such interlock, so we simple mark 223 * the places where it needs to be done with the `interlocked' form 224 * of the lock calls. Since the uniprocessor sleep already interlocks 225 * the spl, there is nothing that really needs to be done. 226 */ 227 #ifndef /* NOT */ DEBUG 228 static struct lockit { 229 int lkt_spl; 230 } lk = { 0 }; 231 #define ACQUIRE_LOCK(lk) (lk)->lkt_spl = splbio() 232 #define FREE_LOCK(lk) splx((lk)->lkt_spl) 233 234 #else /* DEBUG */ 235 #define NOHOLDER ((struct thread *)-1) 236 #define SPECIAL_FLAG ((struct thread *)-2) 237 static struct lockit { 238 int lkt_spl; 239 struct thread *lkt_held; 240 } lk = { 0, NOHOLDER }; 241 static int lockcnt; 242 243 static void acquire_lock(struct lockit *); 244 static void free_lock(struct lockit *); 245 void softdep_panic(char *); 246 247 #define ACQUIRE_LOCK(lk) acquire_lock(lk) 248 #define FREE_LOCK(lk) free_lock(lk) 249 250 static void 251 acquire_lock(lk) 252 struct lockit *lk; 253 { 254 struct thread *holder; 255 256 if (lk->lkt_held != NOHOLDER) { 257 holder = lk->lkt_held; 258 FREE_LOCK(lk); 259 if (holder == curthread) 260 panic("softdep_lock: locking against myself"); 261 else 262 panic("softdep_lock: lock held by %p", holder); 263 } 264 lk->lkt_spl = splbio(); 265 lk->lkt_held = curthread; 266 lockcnt++; 267 } 268 269 static void 270 free_lock(lk) 271 struct lockit *lk; 272 { 273 274 if (lk->lkt_held == NOHOLDER) 275 panic("softdep_unlock: lock not held"); 276 lk->lkt_held = NOHOLDER; 277 splx(lk->lkt_spl); 278 } 279 280 /* 281 * Function to release soft updates lock and panic. 282 */ 283 void 284 softdep_panic(msg) 285 char *msg; 286 { 287 288 if (lk.lkt_held != NOHOLDER) 289 FREE_LOCK(&lk); 290 panic(msg); 291 } 292 #endif /* DEBUG */ 293 294 static int interlocked_sleep(struct lockit *, int, void *, struct mtx *, int, 295 const char *, int); 296 297 /* 298 * When going to sleep, we must save our SPL so that it does 299 * not get lost if some other process uses the lock while we 300 * are sleeping. We restore it after we have slept. This routine 301 * wraps the interlocking with functions that sleep. The list 302 * below enumerates the available set of operations. 303 */ 304 #define UNKNOWN 0 305 #define SLEEP 1 306 #define LOCKBUF 2 307 308 static int 309 interlocked_sleep(lk, op, ident, mtx, flags, wmesg, timo) 310 struct lockit *lk; 311 int op; 312 void *ident; 313 struct mtx *mtx; 314 int flags; 315 const char *wmesg; 316 int timo; 317 { 318 struct thread *holder; 319 int s, retval; 320 321 s = lk->lkt_spl; 322 # ifdef DEBUG 323 if (lk->lkt_held == NOHOLDER) 324 panic("interlocked_sleep: lock not held"); 325 lk->lkt_held = NOHOLDER; 326 # endif /* DEBUG */ 327 switch (op) { 328 case SLEEP: 329 retval = msleep(ident, mtx, flags, wmesg, timo); 330 break; 331 case LOCKBUF: 332 retval = BUF_LOCK((struct buf *)ident, flags); 333 break; 334 default: 335 panic("interlocked_sleep: unknown operation"); 336 } 337 # ifdef DEBUG 338 if (lk->lkt_held != NOHOLDER) { 339 holder = lk->lkt_held; 340 FREE_LOCK(lk); 341 if (holder == curthread) 342 panic("interlocked_sleep: locking against self"); 343 else 344 panic("interlocked_sleep: lock held by %p", holder); 345 } 346 lk->lkt_held = curthread; 347 lockcnt++; 348 # endif /* DEBUG */ 349 lk->lkt_spl = s; 350 return (retval); 351 } 352 353 /* 354 * Place holder for real semaphores. 355 */ 356 struct sema { 357 int value; 358 struct thread *holder; 359 char *name; 360 int prio; 361 int timo; 362 }; 363 static void sema_init(struct sema *, char *, int, int); 364 static int sema_get(struct sema *, struct lockit *); 365 static void sema_release(struct sema *); 366 367 static void 368 sema_init(semap, name, prio, timo) 369 struct sema *semap; 370 char *name; 371 int prio, timo; 372 { 373 374 semap->holder = NOHOLDER; 375 semap->value = 0; 376 semap->name = name; 377 semap->prio = prio; 378 semap->timo = timo; 379 } 380 381 static int 382 sema_get(semap, interlock) 383 struct sema *semap; 384 struct lockit *interlock; 385 { 386 387 if (semap->value++ > 0) { 388 if (interlock != NULL) { 389 interlocked_sleep(interlock, SLEEP, (caddr_t)semap, 390 NULL, semap->prio, semap->name, 391 semap->timo); 392 FREE_LOCK(interlock); 393 } else { 394 tsleep((caddr_t)semap, semap->prio, semap->name, 395 semap->timo); 396 } 397 return (0); 398 } 399 semap->holder = curthread; 400 if (interlock != NULL) 401 FREE_LOCK(interlock); 402 return (1); 403 } 404 405 static void 406 sema_release(semap) 407 struct sema *semap; 408 { 409 410 if (semap->value <= 0 || semap->holder != curthread) { 411 if (lk.lkt_held != NOHOLDER) 412 FREE_LOCK(&lk); 413 panic("sema_release: not held"); 414 } 415 if (--semap->value > 0) { 416 semap->value = 0; 417 wakeup(semap); 418 } 419 semap->holder = NOHOLDER; 420 } 421 422 /* 423 * Worklist queue management. 424 * These routines require that the lock be held. 425 */ 426 #ifndef /* NOT */ DEBUG 427 #define WORKLIST_INSERT(head, item) do { \ 428 (item)->wk_state |= ONWORKLIST; \ 429 LIST_INSERT_HEAD(head, item, wk_list); \ 430 } while (0) 431 #define WORKLIST_REMOVE(item) do { \ 432 (item)->wk_state &= ~ONWORKLIST; \ 433 LIST_REMOVE(item, wk_list); \ 434 } while (0) 435 #define WORKITEM_FREE(item, type) FREE(item, DtoM(type)) 436 437 #else /* DEBUG */ 438 static void worklist_insert(struct workhead *, struct worklist *); 439 static void worklist_remove(struct worklist *); 440 static void workitem_free(struct worklist *, int); 441 442 #define WORKLIST_INSERT(head, item) worklist_insert(head, item) 443 #define WORKLIST_REMOVE(item) worklist_remove(item) 444 #define WORKITEM_FREE(item, type) workitem_free((struct worklist *)item, type) 445 446 static void 447 worklist_insert(head, item) 448 struct workhead *head; 449 struct worklist *item; 450 { 451 452 if (lk.lkt_held == NOHOLDER) 453 panic("worklist_insert: lock not held"); 454 if (item->wk_state & ONWORKLIST) { 455 FREE_LOCK(&lk); 456 panic("worklist_insert: already on list"); 457 } 458 item->wk_state |= ONWORKLIST; 459 LIST_INSERT_HEAD(head, item, wk_list); 460 } 461 462 static void 463 worklist_remove(item) 464 struct worklist *item; 465 { 466 467 if (lk.lkt_held == NOHOLDER) 468 panic("worklist_remove: lock not held"); 469 if ((item->wk_state & ONWORKLIST) == 0) { 470 FREE_LOCK(&lk); 471 panic("worklist_remove: not on list"); 472 } 473 item->wk_state &= ~ONWORKLIST; 474 LIST_REMOVE(item, wk_list); 475 } 476 477 static void 478 workitem_free(item, type) 479 struct worklist *item; 480 int type; 481 { 482 483 if (item->wk_state & ONWORKLIST) { 484 if (lk.lkt_held != NOHOLDER) 485 FREE_LOCK(&lk); 486 panic("workitem_free: still on list"); 487 } 488 if (item->wk_type != type) { 489 if (lk.lkt_held != NOHOLDER) 490 FREE_LOCK(&lk); 491 panic("workitem_free: type mismatch"); 492 } 493 FREE(item, DtoM(type)); 494 } 495 #endif /* DEBUG */ 496 497 /* 498 * Workitem queue management 499 */ 500 static struct workhead softdep_workitem_pending; 501 static int num_on_worklist; /* number of worklist items to be processed */ 502 static int softdep_worklist_busy; /* 1 => trying to do unmount */ 503 static int softdep_worklist_req; /* serialized waiters */ 504 static int max_softdeps; /* maximum number of structs before slowdown */ 505 static int tickdelay = 2; /* number of ticks to pause during slowdown */ 506 static int proc_waiting; /* tracks whether we have a timeout posted */ 507 static int *stat_countp; /* statistic to count in proc_waiting timeout */ 508 static struct callout_handle handle; /* handle on posted proc_waiting timeout */ 509 static struct thread *filesys_syncer; /* proc of filesystem syncer process */ 510 static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 511 #define FLUSH_INODES 1 512 static int req_clear_remove; /* syncer process flush some freeblks */ 513 #define FLUSH_REMOVE 2 514 #define FLUSH_REMOVE_WAIT 3 515 /* 516 * runtime statistics 517 */ 518 static int stat_worklist_push; /* number of worklist cleanups */ 519 static int stat_blk_limit_push; /* number of times block limit neared */ 520 static int stat_ino_limit_push; /* number of times inode limit neared */ 521 static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 522 static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 523 static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 524 static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 525 static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 526 static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 527 static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 528 #ifdef DEBUG 529 #include <vm/vm.h> 530 #include <sys/sysctl.h> 531 SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, ""); 532 SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, ""); 533 SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,""); 534 SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,""); 535 SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,""); 536 SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, ""); 537 SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, ""); 538 SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, ""); 539 SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, ""); 540 SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, ""); 541 SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, ""); 542 SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, ""); 543 #endif /* DEBUG */ 544 545 /* 546 * Add an item to the end of the work queue. 547 * This routine requires that the lock be held. 548 * This is the only routine that adds items to the list. 549 * The following routine is the only one that removes items 550 * and does so in order from first to last. 551 */ 552 static void 553 add_to_worklist(wk) 554 struct worklist *wk; 555 { 556 static struct worklist *worklist_tail; 557 558 if (wk->wk_state & ONWORKLIST) { 559 if (lk.lkt_held != NOHOLDER) 560 FREE_LOCK(&lk); 561 panic("add_to_worklist: already on list"); 562 } 563 wk->wk_state |= ONWORKLIST; 564 if (LIST_FIRST(&softdep_workitem_pending) == NULL) 565 LIST_INSERT_HEAD(&softdep_workitem_pending, wk, wk_list); 566 else 567 LIST_INSERT_AFTER(worklist_tail, wk, wk_list); 568 worklist_tail = wk; 569 num_on_worklist += 1; 570 } 571 572 /* 573 * Process that runs once per second to handle items in the background queue. 574 * 575 * Note that we ensure that everything is done in the order in which they 576 * appear in the queue. The code below depends on this property to ensure 577 * that blocks of a file are freed before the inode itself is freed. This 578 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 579 * until all the old ones have been purged from the dependency lists. 580 */ 581 int 582 softdep_process_worklist(matchmnt) 583 struct mount *matchmnt; 584 { 585 struct thread *td = curthread; 586 int cnt, matchcnt, loopcount; 587 long starttime; 588 589 /* 590 * Record the process identifier of our caller so that we can give 591 * this process preferential treatment in request_cleanup below. 592 */ 593 filesys_syncer = td; 594 matchcnt = 0; 595 596 /* 597 * There is no danger of having multiple processes run this 598 * code, but we have to single-thread it when softdep_flushfiles() 599 * is in operation to get an accurate count of the number of items 600 * related to its mount point that are in the list. 601 */ 602 if (matchmnt == NULL) { 603 if (softdep_worklist_busy < 0) 604 return(-1); 605 softdep_worklist_busy += 1; 606 } 607 608 /* 609 * If requested, try removing inode or removal dependencies. 610 */ 611 if (req_clear_inodedeps) { 612 clear_inodedeps(td); 613 req_clear_inodedeps -= 1; 614 wakeup_one(&proc_waiting); 615 } 616 if (req_clear_remove) { 617 clear_remove(td); 618 req_clear_remove -= 1; 619 wakeup_one(&proc_waiting); 620 } 621 loopcount = 1; 622 starttime = time_second; 623 while (num_on_worklist > 0) { 624 if ((cnt = process_worklist_item(matchmnt, 0)) == -1) 625 break; 626 else 627 matchcnt += cnt; 628 629 /* 630 * If a umount operation wants to run the worklist 631 * accurately, abort. 632 */ 633 if (softdep_worklist_req && matchmnt == NULL) { 634 matchcnt = -1; 635 break; 636 } 637 638 /* 639 * If requested, try removing inode or removal dependencies. 640 */ 641 if (req_clear_inodedeps) { 642 clear_inodedeps(td); 643 req_clear_inodedeps -= 1; 644 wakeup_one(&proc_waiting); 645 } 646 if (req_clear_remove) { 647 clear_remove(td); 648 req_clear_remove -= 1; 649 wakeup_one(&proc_waiting); 650 } 651 /* 652 * We do not generally want to stop for buffer space, but if 653 * we are really being a buffer hog, we will stop and wait. 654 */ 655 if (loopcount++ % 128 == 0) 656 bwillwrite(); 657 /* 658 * Never allow processing to run for more than one 659 * second. Otherwise the other syncer tasks may get 660 * excessively backlogged. 661 */ 662 if (starttime != time_second && matchmnt == NULL) { 663 matchcnt = -1; 664 break; 665 } 666 } 667 if (matchmnt == NULL) { 668 softdep_worklist_busy -= 1; 669 if (softdep_worklist_req && softdep_worklist_busy == 0) 670 wakeup(&softdep_worklist_req); 671 } 672 return (matchcnt); 673 } 674 675 /* 676 * Process one item on the worklist. 677 */ 678 static int 679 process_worklist_item(matchmnt, flags) 680 struct mount *matchmnt; 681 int flags; 682 { 683 struct worklist *wk; 684 struct mount *mp; 685 struct vnode *vp; 686 int matchcnt = 0; 687 688 /* 689 * If we are being called because of a process doing a 690 * copy-on-write, then it is not safe to write as we may 691 * recurse into the copy-on-write routine. 692 */ 693 if (curthread->td_proc->p_flag & P_COWINPROGRESS) 694 return (-1); 695 ACQUIRE_LOCK(&lk); 696 /* 697 * Normally we just process each item on the worklist in order. 698 * However, if we are in a situation where we cannot lock any 699 * inodes, we have to skip over any dirrem requests whose 700 * vnodes are resident and locked. 701 */ 702 vp = NULL; 703 LIST_FOREACH(wk, &softdep_workitem_pending, wk_list) { 704 if (wk->wk_state & INPROGRESS) 705 continue; 706 if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM) 707 break; 708 wk->wk_state |= INPROGRESS; 709 FREE_LOCK(&lk); 710 VFS_VGET(WK_DIRREM(wk)->dm_mnt, WK_DIRREM(wk)->dm_oldinum, 711 LK_NOWAIT | LK_EXCLUSIVE, &vp); 712 ACQUIRE_LOCK(&lk); 713 wk->wk_state &= ~INPROGRESS; 714 if (vp != NULL) 715 break; 716 } 717 if (wk == 0) { 718 FREE_LOCK(&lk); 719 return (-1); 720 } 721 WORKLIST_REMOVE(wk); 722 num_on_worklist -= 1; 723 FREE_LOCK(&lk); 724 switch (wk->wk_type) { 725 726 case D_DIRREM: 727 /* removal of a directory entry */ 728 mp = WK_DIRREM(wk)->dm_mnt; 729 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 730 panic("%s: dirrem on suspended filesystem", 731 "process_worklist_item"); 732 if (mp == matchmnt) 733 matchcnt += 1; 734 handle_workitem_remove(WK_DIRREM(wk), vp); 735 break; 736 737 case D_FREEBLKS: 738 /* releasing blocks and/or fragments from a file */ 739 mp = WK_FREEBLKS(wk)->fb_mnt; 740 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 741 panic("%s: freeblks on suspended filesystem", 742 "process_worklist_item"); 743 if (mp == matchmnt) 744 matchcnt += 1; 745 handle_workitem_freeblocks(WK_FREEBLKS(wk), flags & LK_NOWAIT); 746 break; 747 748 case D_FREEFRAG: 749 /* releasing a fragment when replaced as a file grows */ 750 mp = WK_FREEFRAG(wk)->ff_mnt; 751 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 752 panic("%s: freefrag on suspended filesystem", 753 "process_worklist_item"); 754 if (mp == matchmnt) 755 matchcnt += 1; 756 handle_workitem_freefrag(WK_FREEFRAG(wk)); 757 break; 758 759 case D_FREEFILE: 760 /* releasing an inode when its link count drops to 0 */ 761 mp = WK_FREEFILE(wk)->fx_mnt; 762 if (vn_write_suspend_wait(NULL, mp, V_NOWAIT)) 763 panic("%s: freefile on suspended filesystem", 764 "process_worklist_item"); 765 if (mp == matchmnt) 766 matchcnt += 1; 767 handle_workitem_freefile(WK_FREEFILE(wk)); 768 break; 769 770 default: 771 panic("%s_process_worklist: Unknown type %s", 772 "softdep", TYPENAME(wk->wk_type)); 773 /* NOTREACHED */ 774 } 775 return (matchcnt); 776 } 777 778 /* 779 * Move dependencies from one buffer to another. 780 */ 781 static void 782 softdep_move_dependencies(oldbp, newbp) 783 struct buf *oldbp; 784 struct buf *newbp; 785 { 786 struct worklist *wk, *wktail; 787 788 if (LIST_FIRST(&newbp->b_dep) != NULL) 789 panic("softdep_move_dependencies: need merge code"); 790 wktail = 0; 791 ACQUIRE_LOCK(&lk); 792 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 793 LIST_REMOVE(wk, wk_list); 794 if (wktail == 0) 795 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 796 else 797 LIST_INSERT_AFTER(wktail, wk, wk_list); 798 wktail = wk; 799 } 800 FREE_LOCK(&lk); 801 } 802 803 /* 804 * Purge the work list of all items associated with a particular mount point. 805 */ 806 int 807 softdep_flushworklist(oldmnt, countp, td) 808 struct mount *oldmnt; 809 int *countp; 810 struct thread *td; 811 { 812 struct vnode *devvp; 813 int count, error = 0; 814 815 /* 816 * Await our turn to clear out the queue, then serialize access. 817 */ 818 while (softdep_worklist_busy) { 819 softdep_worklist_req += 1; 820 tsleep(&softdep_worklist_req, PRIBIO, "softflush", 0); 821 softdep_worklist_req -= 1; 822 } 823 softdep_worklist_busy = -1; 824 /* 825 * Alternately flush the block device associated with the mount 826 * point and process any dependencies that the flushing 827 * creates. We continue until no more worklist dependencies 828 * are found. 829 */ 830 *countp = 0; 831 devvp = VFSTOUFS(oldmnt)->um_devvp; 832 while ((count = softdep_process_worklist(oldmnt)) > 0) { 833 *countp += count; 834 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td); 835 error = VOP_FSYNC(devvp, td->td_ucred, MNT_WAIT, td); 836 VOP_UNLOCK(devvp, 0, td); 837 if (error) 838 break; 839 } 840 softdep_worklist_busy = 0; 841 if (softdep_worklist_req) 842 wakeup(&softdep_worklist_req); 843 return (error); 844 } 845 846 /* 847 * Flush all vnodes and worklist items associated with a specified mount point. 848 */ 849 int 850 softdep_flushfiles(oldmnt, flags, td) 851 struct mount *oldmnt; 852 int flags; 853 struct thread *td; 854 { 855 int error, count, loopcnt; 856 857 error = 0; 858 859 /* 860 * Alternately flush the vnodes associated with the mount 861 * point and process any dependencies that the flushing 862 * creates. In theory, this loop can happen at most twice, 863 * but we give it a few extra just to be sure. 864 */ 865 for (loopcnt = 10; loopcnt > 0; loopcnt--) { 866 /* 867 * Do another flush in case any vnodes were brought in 868 * as part of the cleanup operations. 869 */ 870 if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0) 871 break; 872 if ((error = softdep_flushworklist(oldmnt, &count, td)) != 0 || 873 count == 0) 874 break; 875 } 876 /* 877 * If we are unmounting then it is an error to fail. If we 878 * are simply trying to downgrade to read-only, then filesystem 879 * activity can keep us busy forever, so we just fail with EBUSY. 880 */ 881 if (loopcnt == 0) { 882 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 883 panic("softdep_flushfiles: looping"); 884 error = EBUSY; 885 } 886 return (error); 887 } 888 889 /* 890 * Structure hashing. 891 * 892 * There are three types of structures that can be looked up: 893 * 1) pagedep structures identified by mount point, inode number, 894 * and logical block. 895 * 2) inodedep structures identified by mount point and inode number. 896 * 3) newblk structures identified by mount point and 897 * physical block number. 898 * 899 * The "pagedep" and "inodedep" dependency structures are hashed 900 * separately from the file blocks and inodes to which they correspond. 901 * This separation helps when the in-memory copy of an inode or 902 * file block must be replaced. It also obviates the need to access 903 * an inode or file page when simply updating (or de-allocating) 904 * dependency structures. Lookup of newblk structures is needed to 905 * find newly allocated blocks when trying to associate them with 906 * their allocdirect or allocindir structure. 907 * 908 * The lookup routines optionally create and hash a new instance when 909 * an existing entry is not found. 910 */ 911 #define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 912 #define NODELAY 0x0002 /* cannot do background work */ 913 914 /* 915 * Structures and routines associated with pagedep caching. 916 */ 917 LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl; 918 u_long pagedep_hash; /* size of hash table - 1 */ 919 #define PAGEDEP_HASH(mp, inum, lbn) \ 920 (&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \ 921 pagedep_hash]) 922 static struct sema pagedep_in_progress; 923 924 /* 925 * Look up a pagedep. Return 1 if found, 0 if not found or found 926 * when asked to allocate but not associated with any buffer. 927 * If not found, allocate if DEPALLOC flag is passed. 928 * Found or allocated entry is returned in pagedeppp. 929 * This routine must be called with splbio interrupts blocked. 930 */ 931 static int 932 pagedep_lookup(ip, lbn, flags, pagedeppp) 933 struct inode *ip; 934 ufs_lbn_t lbn; 935 int flags; 936 struct pagedep **pagedeppp; 937 { 938 struct pagedep *pagedep; 939 struct pagedep_hashhead *pagedephd; 940 struct mount *mp; 941 int i; 942 943 #ifdef DEBUG 944 if (lk.lkt_held == NOHOLDER) 945 panic("pagedep_lookup: lock not held"); 946 #endif 947 mp = ITOV(ip)->v_mount; 948 pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn); 949 top: 950 LIST_FOREACH(pagedep, pagedephd, pd_hash) 951 if (ip->i_number == pagedep->pd_ino && 952 lbn == pagedep->pd_lbn && 953 mp == pagedep->pd_mnt) 954 break; 955 if (pagedep) { 956 *pagedeppp = pagedep; 957 if ((flags & DEPALLOC) != 0 && 958 (pagedep->pd_state & ONWORKLIST) == 0) 959 return (0); 960 return (1); 961 } 962 if ((flags & DEPALLOC) == 0) { 963 *pagedeppp = NULL; 964 return (0); 965 } 966 if (sema_get(&pagedep_in_progress, &lk) == 0) { 967 ACQUIRE_LOCK(&lk); 968 goto top; 969 } 970 MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep), M_PAGEDEP, 971 M_SOFTDEP_FLAGS|M_ZERO); 972 pagedep->pd_list.wk_type = D_PAGEDEP; 973 pagedep->pd_mnt = mp; 974 pagedep->pd_ino = ip->i_number; 975 pagedep->pd_lbn = lbn; 976 LIST_INIT(&pagedep->pd_dirremhd); 977 LIST_INIT(&pagedep->pd_pendinghd); 978 for (i = 0; i < DAHASHSZ; i++) 979 LIST_INIT(&pagedep->pd_diraddhd[i]); 980 ACQUIRE_LOCK(&lk); 981 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 982 sema_release(&pagedep_in_progress); 983 *pagedeppp = pagedep; 984 return (0); 985 } 986 987 /* 988 * Structures and routines associated with inodedep caching. 989 */ 990 LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl; 991 static u_long inodedep_hash; /* size of hash table - 1 */ 992 static long num_inodedep; /* number of inodedep allocated */ 993 #define INODEDEP_HASH(fs, inum) \ 994 (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash]) 995 static struct sema inodedep_in_progress; 996 997 /* 998 * Look up a inodedep. Return 1 if found, 0 if not found. 999 * If not found, allocate if DEPALLOC flag is passed. 1000 * Found or allocated entry is returned in inodedeppp. 1001 * This routine must be called with splbio interrupts blocked. 1002 */ 1003 static int 1004 inodedep_lookup(fs, inum, flags, inodedeppp) 1005 struct fs *fs; 1006 ino_t inum; 1007 int flags; 1008 struct inodedep **inodedeppp; 1009 { 1010 struct inodedep *inodedep; 1011 struct inodedep_hashhead *inodedephd; 1012 int firsttry; 1013 1014 #ifdef DEBUG 1015 if (lk.lkt_held == NOHOLDER) 1016 panic("inodedep_lookup: lock not held"); 1017 #endif 1018 firsttry = 1; 1019 inodedephd = INODEDEP_HASH(fs, inum); 1020 top: 1021 LIST_FOREACH(inodedep, inodedephd, id_hash) 1022 if (inum == inodedep->id_ino && fs == inodedep->id_fs) 1023 break; 1024 if (inodedep) { 1025 *inodedeppp = inodedep; 1026 return (1); 1027 } 1028 if ((flags & DEPALLOC) == 0) { 1029 *inodedeppp = NULL; 1030 return (0); 1031 } 1032 /* 1033 * If we are over our limit, try to improve the situation. 1034 */ 1035 if (num_inodedep > max_softdeps && firsttry && (flags & NODELAY) == 0 && 1036 request_cleanup(FLUSH_INODES, 1)) { 1037 firsttry = 0; 1038 goto top; 1039 } 1040 if (sema_get(&inodedep_in_progress, &lk) == 0) { 1041 ACQUIRE_LOCK(&lk); 1042 goto top; 1043 } 1044 num_inodedep += 1; 1045 MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep), 1046 M_INODEDEP, M_SOFTDEP_FLAGS); 1047 inodedep->id_list.wk_type = D_INODEDEP; 1048 inodedep->id_fs = fs; 1049 inodedep->id_ino = inum; 1050 inodedep->id_state = ALLCOMPLETE; 1051 inodedep->id_nlinkdelta = 0; 1052 inodedep->id_savedino1 = NULL; 1053 inodedep->id_savedsize = -1; 1054 inodedep->id_savedextsize = -1; 1055 inodedep->id_buf = NULL; 1056 LIST_INIT(&inodedep->id_pendinghd); 1057 LIST_INIT(&inodedep->id_inowait); 1058 LIST_INIT(&inodedep->id_bufwait); 1059 TAILQ_INIT(&inodedep->id_inoupdt); 1060 TAILQ_INIT(&inodedep->id_newinoupdt); 1061 TAILQ_INIT(&inodedep->id_extupdt); 1062 TAILQ_INIT(&inodedep->id_newextupdt); 1063 ACQUIRE_LOCK(&lk); 1064 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 1065 sema_release(&inodedep_in_progress); 1066 *inodedeppp = inodedep; 1067 return (0); 1068 } 1069 1070 /* 1071 * Structures and routines associated with newblk caching. 1072 */ 1073 LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl; 1074 u_long newblk_hash; /* size of hash table - 1 */ 1075 #define NEWBLK_HASH(fs, inum) \ 1076 (&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash]) 1077 static struct sema newblk_in_progress; 1078 1079 /* 1080 * Look up a newblk. Return 1 if found, 0 if not found. 1081 * If not found, allocate if DEPALLOC flag is passed. 1082 * Found or allocated entry is returned in newblkpp. 1083 */ 1084 static int 1085 newblk_lookup(fs, newblkno, flags, newblkpp) 1086 struct fs *fs; 1087 ufs2_daddr_t newblkno; 1088 int flags; 1089 struct newblk **newblkpp; 1090 { 1091 struct newblk *newblk; 1092 struct newblk_hashhead *newblkhd; 1093 1094 newblkhd = NEWBLK_HASH(fs, newblkno); 1095 top: 1096 LIST_FOREACH(newblk, newblkhd, nb_hash) 1097 if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs) 1098 break; 1099 if (newblk) { 1100 *newblkpp = newblk; 1101 return (1); 1102 } 1103 if ((flags & DEPALLOC) == 0) { 1104 *newblkpp = NULL; 1105 return (0); 1106 } 1107 if (sema_get(&newblk_in_progress, 0) == 0) 1108 goto top; 1109 MALLOC(newblk, struct newblk *, sizeof(struct newblk), 1110 M_NEWBLK, M_SOFTDEP_FLAGS); 1111 newblk->nb_state = 0; 1112 newblk->nb_fs = fs; 1113 newblk->nb_newblkno = newblkno; 1114 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 1115 sema_release(&newblk_in_progress); 1116 *newblkpp = newblk; 1117 return (0); 1118 } 1119 1120 /* 1121 * Executed during filesystem system initialization before 1122 * mounting any filesystems. 1123 */ 1124 void 1125 softdep_initialize() 1126 { 1127 1128 LIST_INIT(&mkdirlisthd); 1129 LIST_INIT(&softdep_workitem_pending); 1130 max_softdeps = desiredvnodes * 8; 1131 pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 1132 &pagedep_hash); 1133 sema_init(&pagedep_in_progress, "pagedep", PRIBIO, 0); 1134 inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash); 1135 sema_init(&inodedep_in_progress, "inodedep", PRIBIO, 0); 1136 newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash); 1137 sema_init(&newblk_in_progress, "newblk", PRIBIO, 0); 1138 1139 /* hooks through which the main kernel code calls us */ 1140 softdep_process_worklist_hook = softdep_process_worklist; 1141 softdep_fsync_hook = softdep_fsync; 1142 1143 /* initialise bioops hack */ 1144 bioops.io_start = softdep_disk_io_initiation; 1145 bioops.io_complete = softdep_disk_write_complete; 1146 bioops.io_deallocate = softdep_deallocate_dependencies; 1147 bioops.io_movedeps = softdep_move_dependencies; 1148 bioops.io_countdeps = softdep_count_dependencies; 1149 } 1150 1151 /* 1152 * Executed after all filesystems have been unmounted during 1153 * filesystem module unload. 1154 */ 1155 void 1156 softdep_uninitialize() 1157 { 1158 1159 softdep_process_worklist_hook = NULL; 1160 softdep_fsync_hook = NULL; 1161 hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash); 1162 hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash); 1163 hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash); 1164 } 1165 1166 /* 1167 * Called at mount time to notify the dependency code that a 1168 * filesystem wishes to use it. 1169 */ 1170 int 1171 softdep_mount(devvp, mp, fs, cred) 1172 struct vnode *devvp; 1173 struct mount *mp; 1174 struct fs *fs; 1175 struct ucred *cred; 1176 { 1177 struct csum_total cstotal; 1178 struct cg *cgp; 1179 struct buf *bp; 1180 int error, cyl; 1181 1182 mp->mnt_flag &= ~MNT_ASYNC; 1183 mp->mnt_flag |= MNT_SOFTDEP; 1184 /* 1185 * When doing soft updates, the counters in the 1186 * superblock may have gotten out of sync, so we have 1187 * to scan the cylinder groups and recalculate them. 1188 */ 1189 if (fs->fs_clean != 0) 1190 return (0); 1191 bzero(&cstotal, sizeof cstotal); 1192 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 1193 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 1194 fs->fs_cgsize, cred, &bp)) != 0) { 1195 brelse(bp); 1196 return (error); 1197 } 1198 cgp = (struct cg *)bp->b_data; 1199 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 1200 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 1201 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 1202 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 1203 fs->fs_cs(fs, cyl) = cgp->cg_cs; 1204 brelse(bp); 1205 } 1206 #ifdef DEBUG 1207 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 1208 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 1209 #endif 1210 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 1211 return (0); 1212 } 1213 1214 /* 1215 * Protecting the freemaps (or bitmaps). 1216 * 1217 * To eliminate the need to execute fsck before mounting a filesystem 1218 * after a power failure, one must (conservatively) guarantee that the 1219 * on-disk copy of the bitmaps never indicate that a live inode or block is 1220 * free. So, when a block or inode is allocated, the bitmap should be 1221 * updated (on disk) before any new pointers. When a block or inode is 1222 * freed, the bitmap should not be updated until all pointers have been 1223 * reset. The latter dependency is handled by the delayed de-allocation 1224 * approach described below for block and inode de-allocation. The former 1225 * dependency is handled by calling the following procedure when a block or 1226 * inode is allocated. When an inode is allocated an "inodedep" is created 1227 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 1228 * Each "inodedep" is also inserted into the hash indexing structure so 1229 * that any additional link additions can be made dependent on the inode 1230 * allocation. 1231 * 1232 * The ufs filesystem maintains a number of free block counts (e.g., per 1233 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 1234 * in addition to the bitmaps. These counts are used to improve efficiency 1235 * during allocation and therefore must be consistent with the bitmaps. 1236 * There is no convenient way to guarantee post-crash consistency of these 1237 * counts with simple update ordering, for two main reasons: (1) The counts 1238 * and bitmaps for a single cylinder group block are not in the same disk 1239 * sector. If a disk write is interrupted (e.g., by power failure), one may 1240 * be written and the other not. (2) Some of the counts are located in the 1241 * superblock rather than the cylinder group block. So, we focus our soft 1242 * updates implementation on protecting the bitmaps. When mounting a 1243 * filesystem, we recompute the auxiliary counts from the bitmaps. 1244 */ 1245 1246 /* 1247 * Called just after updating the cylinder group block to allocate an inode. 1248 */ 1249 void 1250 softdep_setup_inomapdep(bp, ip, newinum) 1251 struct buf *bp; /* buffer for cylgroup block with inode map */ 1252 struct inode *ip; /* inode related to allocation */ 1253 ino_t newinum; /* new inode number being allocated */ 1254 { 1255 struct inodedep *inodedep; 1256 struct bmsafemap *bmsafemap; 1257 1258 /* 1259 * Create a dependency for the newly allocated inode. 1260 * Panic if it already exists as something is seriously wrong. 1261 * Otherwise add it to the dependency list for the buffer holding 1262 * the cylinder group map from which it was allocated. 1263 */ 1264 ACQUIRE_LOCK(&lk); 1265 if ((inodedep_lookup(ip->i_fs, newinum, DEPALLOC|NODELAY, &inodedep))) { 1266 FREE_LOCK(&lk); 1267 panic("softdep_setup_inomapdep: found inode"); 1268 } 1269 inodedep->id_buf = bp; 1270 inodedep->id_state &= ~DEPCOMPLETE; 1271 bmsafemap = bmsafemap_lookup(bp); 1272 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 1273 FREE_LOCK(&lk); 1274 } 1275 1276 /* 1277 * Called just after updating the cylinder group block to 1278 * allocate block or fragment. 1279 */ 1280 void 1281 softdep_setup_blkmapdep(bp, fs, newblkno) 1282 struct buf *bp; /* buffer for cylgroup block with block map */ 1283 struct fs *fs; /* filesystem doing allocation */ 1284 ufs2_daddr_t newblkno; /* number of newly allocated block */ 1285 { 1286 struct newblk *newblk; 1287 struct bmsafemap *bmsafemap; 1288 1289 /* 1290 * Create a dependency for the newly allocated block. 1291 * Add it to the dependency list for the buffer holding 1292 * the cylinder group map from which it was allocated. 1293 */ 1294 if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0) 1295 panic("softdep_setup_blkmapdep: found block"); 1296 ACQUIRE_LOCK(&lk); 1297 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(bp); 1298 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 1299 FREE_LOCK(&lk); 1300 } 1301 1302 /* 1303 * Find the bmsafemap associated with a cylinder group buffer. 1304 * If none exists, create one. The buffer must be locked when 1305 * this routine is called and this routine must be called with 1306 * splbio interrupts blocked. 1307 */ 1308 static struct bmsafemap * 1309 bmsafemap_lookup(bp) 1310 struct buf *bp; 1311 { 1312 struct bmsafemap *bmsafemap; 1313 struct worklist *wk; 1314 1315 #ifdef DEBUG 1316 if (lk.lkt_held == NOHOLDER) 1317 panic("bmsafemap_lookup: lock not held"); 1318 #endif 1319 LIST_FOREACH(wk, &bp->b_dep, wk_list) 1320 if (wk->wk_type == D_BMSAFEMAP) 1321 return (WK_BMSAFEMAP(wk)); 1322 FREE_LOCK(&lk); 1323 MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap), 1324 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 1325 bmsafemap->sm_list.wk_type = D_BMSAFEMAP; 1326 bmsafemap->sm_list.wk_state = 0; 1327 bmsafemap->sm_buf = bp; 1328 LIST_INIT(&bmsafemap->sm_allocdirecthd); 1329 LIST_INIT(&bmsafemap->sm_allocindirhd); 1330 LIST_INIT(&bmsafemap->sm_inodedephd); 1331 LIST_INIT(&bmsafemap->sm_newblkhd); 1332 ACQUIRE_LOCK(&lk); 1333 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 1334 return (bmsafemap); 1335 } 1336 1337 /* 1338 * Direct block allocation dependencies. 1339 * 1340 * When a new block is allocated, the corresponding disk locations must be 1341 * initialized (with zeros or new data) before the on-disk inode points to 1342 * them. Also, the freemap from which the block was allocated must be 1343 * updated (on disk) before the inode's pointer. These two dependencies are 1344 * independent of each other and are needed for all file blocks and indirect 1345 * blocks that are pointed to directly by the inode. Just before the 1346 * "in-core" version of the inode is updated with a newly allocated block 1347 * number, a procedure (below) is called to setup allocation dependency 1348 * structures. These structures are removed when the corresponding 1349 * dependencies are satisfied or when the block allocation becomes obsolete 1350 * (i.e., the file is deleted, the block is de-allocated, or the block is a 1351 * fragment that gets upgraded). All of these cases are handled in 1352 * procedures described later. 1353 * 1354 * When a file extension causes a fragment to be upgraded, either to a larger 1355 * fragment or to a full block, the on-disk location may change (if the 1356 * previous fragment could not simply be extended). In this case, the old 1357 * fragment must be de-allocated, but not until after the inode's pointer has 1358 * been updated. In most cases, this is handled by later procedures, which 1359 * will construct a "freefrag" structure to be added to the workitem queue 1360 * when the inode update is complete (or obsolete). The main exception to 1361 * this is when an allocation occurs while a pending allocation dependency 1362 * (for the same block pointer) remains. This case is handled in the main 1363 * allocation dependency setup procedure by immediately freeing the 1364 * unreferenced fragments. 1365 */ 1366 void 1367 softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1368 struct inode *ip; /* inode to which block is being added */ 1369 ufs_lbn_t lbn; /* block pointer within inode */ 1370 ufs2_daddr_t newblkno; /* disk block number being added */ 1371 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 1372 long newsize; /* size of new block */ 1373 long oldsize; /* size of new block */ 1374 struct buf *bp; /* bp for allocated block */ 1375 { 1376 struct allocdirect *adp, *oldadp; 1377 struct allocdirectlst *adphead; 1378 struct bmsafemap *bmsafemap; 1379 struct inodedep *inodedep; 1380 struct pagedep *pagedep; 1381 struct newblk *newblk; 1382 1383 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1384 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1385 adp->ad_list.wk_type = D_ALLOCDIRECT; 1386 adp->ad_lbn = lbn; 1387 adp->ad_newblkno = newblkno; 1388 adp->ad_oldblkno = oldblkno; 1389 adp->ad_newsize = newsize; 1390 adp->ad_oldsize = oldsize; 1391 adp->ad_state = ATTACHED; 1392 LIST_INIT(&adp->ad_newdirblk); 1393 if (newblkno == oldblkno) 1394 adp->ad_freefrag = NULL; 1395 else 1396 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1397 1398 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1399 panic("softdep_setup_allocdirect: lost block"); 1400 1401 ACQUIRE_LOCK(&lk); 1402 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1403 adp->ad_inodedep = inodedep; 1404 1405 if (newblk->nb_state == DEPCOMPLETE) { 1406 adp->ad_state |= DEPCOMPLETE; 1407 adp->ad_buf = NULL; 1408 } else { 1409 bmsafemap = newblk->nb_bmsafemap; 1410 adp->ad_buf = bmsafemap->sm_buf; 1411 LIST_REMOVE(newblk, nb_deps); 1412 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1413 } 1414 LIST_REMOVE(newblk, nb_hash); 1415 FREE(newblk, M_NEWBLK); 1416 1417 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1418 if (lbn >= NDADDR) { 1419 /* allocating an indirect block */ 1420 if (oldblkno != 0) { 1421 FREE_LOCK(&lk); 1422 panic("softdep_setup_allocdirect: non-zero indir"); 1423 } 1424 } else { 1425 /* 1426 * Allocating a direct block. 1427 * 1428 * If we are allocating a directory block, then we must 1429 * allocate an associated pagedep to track additions and 1430 * deletions. 1431 */ 1432 if ((ip->i_mode & IFMT) == IFDIR && 1433 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1434 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 1435 } 1436 /* 1437 * The list of allocdirects must be kept in sorted and ascending 1438 * order so that the rollback routines can quickly determine the 1439 * first uncommitted block (the size of the file stored on disk 1440 * ends at the end of the lowest committed fragment, or if there 1441 * are no fragments, at the end of the highest committed block). 1442 * Since files generally grow, the typical case is that the new 1443 * block is to be added at the end of the list. We speed this 1444 * special case by checking against the last allocdirect in the 1445 * list before laboriously traversing the list looking for the 1446 * insertion point. 1447 */ 1448 adphead = &inodedep->id_newinoupdt; 1449 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1450 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1451 /* insert at end of list */ 1452 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1453 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1454 allocdirect_merge(adphead, adp, oldadp); 1455 FREE_LOCK(&lk); 1456 return; 1457 } 1458 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1459 if (oldadp->ad_lbn >= lbn) 1460 break; 1461 } 1462 if (oldadp == NULL) { 1463 FREE_LOCK(&lk); 1464 panic("softdep_setup_allocdirect: lost entry"); 1465 } 1466 /* insert in middle of list */ 1467 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1468 if (oldadp->ad_lbn == lbn) 1469 allocdirect_merge(adphead, adp, oldadp); 1470 FREE_LOCK(&lk); 1471 } 1472 1473 /* 1474 * Replace an old allocdirect dependency with a newer one. 1475 * This routine must be called with splbio interrupts blocked. 1476 */ 1477 static void 1478 allocdirect_merge(adphead, newadp, oldadp) 1479 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 1480 struct allocdirect *newadp; /* allocdirect being added */ 1481 struct allocdirect *oldadp; /* existing allocdirect being checked */ 1482 { 1483 struct worklist *wk; 1484 struct freefrag *freefrag; 1485 struct newdirblk *newdirblk; 1486 1487 #ifdef DEBUG 1488 if (lk.lkt_held == NOHOLDER) 1489 panic("allocdirect_merge: lock not held"); 1490 #endif 1491 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 1492 newadp->ad_oldsize != oldadp->ad_newsize || 1493 newadp->ad_lbn >= NDADDR) { 1494 FREE_LOCK(&lk); 1495 panic("%s %jd != new %jd || old size %ld != new %ld", 1496 "allocdirect_merge: old blkno", 1497 (intmax_t)newadp->ad_oldblkno, 1498 (intmax_t)oldadp->ad_newblkno, 1499 newadp->ad_oldsize, oldadp->ad_newsize); 1500 } 1501 newadp->ad_oldblkno = oldadp->ad_oldblkno; 1502 newadp->ad_oldsize = oldadp->ad_oldsize; 1503 /* 1504 * If the old dependency had a fragment to free or had never 1505 * previously had a block allocated, then the new dependency 1506 * can immediately post its freefrag and adopt the old freefrag. 1507 * This action is done by swapping the freefrag dependencies. 1508 * The new dependency gains the old one's freefrag, and the 1509 * old one gets the new one and then immediately puts it on 1510 * the worklist when it is freed by free_allocdirect. It is 1511 * not possible to do this swap when the old dependency had a 1512 * non-zero size but no previous fragment to free. This condition 1513 * arises when the new block is an extension of the old block. 1514 * Here, the first part of the fragment allocated to the new 1515 * dependency is part of the block currently claimed on disk by 1516 * the old dependency, so cannot legitimately be freed until the 1517 * conditions for the new dependency are fulfilled. 1518 */ 1519 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 1520 freefrag = newadp->ad_freefrag; 1521 newadp->ad_freefrag = oldadp->ad_freefrag; 1522 oldadp->ad_freefrag = freefrag; 1523 } 1524 /* 1525 * If we are tracking a new directory-block allocation, 1526 * move it from the old allocdirect to the new allocdirect. 1527 */ 1528 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 1529 newdirblk = WK_NEWDIRBLK(wk); 1530 WORKLIST_REMOVE(&newdirblk->db_list); 1531 if (LIST_FIRST(&oldadp->ad_newdirblk) != NULL) 1532 panic("allocdirect_merge: extra newdirblk"); 1533 WORKLIST_INSERT(&newadp->ad_newdirblk, &newdirblk->db_list); 1534 } 1535 free_allocdirect(adphead, oldadp, 0); 1536 } 1537 1538 /* 1539 * Allocate a new freefrag structure if needed. 1540 */ 1541 static struct freefrag * 1542 newfreefrag(ip, blkno, size) 1543 struct inode *ip; 1544 ufs2_daddr_t blkno; 1545 long size; 1546 { 1547 struct freefrag *freefrag; 1548 struct fs *fs; 1549 1550 if (blkno == 0) 1551 return (NULL); 1552 fs = ip->i_fs; 1553 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 1554 panic("newfreefrag: frag size"); 1555 MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag), 1556 M_FREEFRAG, M_SOFTDEP_FLAGS); 1557 freefrag->ff_list.wk_type = D_FREEFRAG; 1558 freefrag->ff_state = 0; 1559 freefrag->ff_inum = ip->i_number; 1560 freefrag->ff_mnt = ITOV(ip)->v_mount; 1561 freefrag->ff_blkno = blkno; 1562 freefrag->ff_fragsize = size; 1563 return (freefrag); 1564 } 1565 1566 /* 1567 * This workitem de-allocates fragments that were replaced during 1568 * file block allocation. 1569 */ 1570 static void 1571 handle_workitem_freefrag(freefrag) 1572 struct freefrag *freefrag; 1573 { 1574 struct ufsmount *ump = VFSTOUFS(freefrag->ff_mnt); 1575 1576 ffs_blkfree(ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 1577 freefrag->ff_fragsize, freefrag->ff_inum); 1578 FREE(freefrag, M_FREEFRAG); 1579 } 1580 1581 /* 1582 * Set up a dependency structure for an external attributes data block. 1583 * This routine follows much of the structure of softdep_setup_allocdirect. 1584 * See the description of softdep_setup_allocdirect above for details. 1585 */ 1586 void 1587 softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 1588 struct inode *ip; 1589 ufs_lbn_t lbn; 1590 ufs2_daddr_t newblkno; 1591 ufs2_daddr_t oldblkno; 1592 long newsize; 1593 long oldsize; 1594 struct buf *bp; 1595 { 1596 struct allocdirect *adp, *oldadp; 1597 struct allocdirectlst *adphead; 1598 struct bmsafemap *bmsafemap; 1599 struct inodedep *inodedep; 1600 struct newblk *newblk; 1601 1602 MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect), 1603 M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO); 1604 adp->ad_list.wk_type = D_ALLOCDIRECT; 1605 adp->ad_lbn = lbn; 1606 adp->ad_newblkno = newblkno; 1607 adp->ad_oldblkno = oldblkno; 1608 adp->ad_newsize = newsize; 1609 adp->ad_oldsize = oldsize; 1610 adp->ad_state = ATTACHED | EXTDATA; 1611 LIST_INIT(&adp->ad_newdirblk); 1612 if (newblkno == oldblkno) 1613 adp->ad_freefrag = NULL; 1614 else 1615 adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize); 1616 1617 if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0) 1618 panic("softdep_setup_allocext: lost block"); 1619 1620 ACQUIRE_LOCK(&lk); 1621 inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC | NODELAY, &inodedep); 1622 adp->ad_inodedep = inodedep; 1623 1624 if (newblk->nb_state == DEPCOMPLETE) { 1625 adp->ad_state |= DEPCOMPLETE; 1626 adp->ad_buf = NULL; 1627 } else { 1628 bmsafemap = newblk->nb_bmsafemap; 1629 adp->ad_buf = bmsafemap->sm_buf; 1630 LIST_REMOVE(newblk, nb_deps); 1631 LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps); 1632 } 1633 LIST_REMOVE(newblk, nb_hash); 1634 FREE(newblk, M_NEWBLK); 1635 1636 WORKLIST_INSERT(&bp->b_dep, &adp->ad_list); 1637 if (lbn >= NXADDR) { 1638 FREE_LOCK(&lk); 1639 panic("softdep_setup_allocext: lbn %lld > NXADDR", 1640 (long long)lbn); 1641 } 1642 /* 1643 * The list of allocdirects must be kept in sorted and ascending 1644 * order so that the rollback routines can quickly determine the 1645 * first uncommitted block (the size of the file stored on disk 1646 * ends at the end of the lowest committed fragment, or if there 1647 * are no fragments, at the end of the highest committed block). 1648 * Since files generally grow, the typical case is that the new 1649 * block is to be added at the end of the list. We speed this 1650 * special case by checking against the last allocdirect in the 1651 * list before laboriously traversing the list looking for the 1652 * insertion point. 1653 */ 1654 adphead = &inodedep->id_newextupdt; 1655 oldadp = TAILQ_LAST(adphead, allocdirectlst); 1656 if (oldadp == NULL || oldadp->ad_lbn <= lbn) { 1657 /* insert at end of list */ 1658 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 1659 if (oldadp != NULL && oldadp->ad_lbn == lbn) 1660 allocdirect_merge(adphead, adp, oldadp); 1661 FREE_LOCK(&lk); 1662 return; 1663 } 1664 TAILQ_FOREACH(oldadp, adphead, ad_next) { 1665 if (oldadp->ad_lbn >= lbn) 1666 break; 1667 } 1668 if (oldadp == NULL) { 1669 FREE_LOCK(&lk); 1670 panic("softdep_setup_allocext: lost entry"); 1671 } 1672 /* insert in middle of list */ 1673 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 1674 if (oldadp->ad_lbn == lbn) 1675 allocdirect_merge(adphead, adp, oldadp); 1676 FREE_LOCK(&lk); 1677 } 1678 1679 /* 1680 * Indirect block allocation dependencies. 1681 * 1682 * The same dependencies that exist for a direct block also exist when 1683 * a new block is allocated and pointed to by an entry in a block of 1684 * indirect pointers. The undo/redo states described above are also 1685 * used here. Because an indirect block contains many pointers that 1686 * may have dependencies, a second copy of the entire in-memory indirect 1687 * block is kept. The buffer cache copy is always completely up-to-date. 1688 * The second copy, which is used only as a source for disk writes, 1689 * contains only the safe pointers (i.e., those that have no remaining 1690 * update dependencies). The second copy is freed when all pointers 1691 * are safe. The cache is not allowed to replace indirect blocks with 1692 * pending update dependencies. If a buffer containing an indirect 1693 * block with dependencies is written, these routines will mark it 1694 * dirty again. It can only be successfully written once all the 1695 * dependencies are removed. The ffs_fsync routine in conjunction with 1696 * softdep_sync_metadata work together to get all the dependencies 1697 * removed so that a file can be successfully written to disk. Three 1698 * procedures are used when setting up indirect block pointer 1699 * dependencies. The division is necessary because of the organization 1700 * of the "balloc" routine and because of the distinction between file 1701 * pages and file metadata blocks. 1702 */ 1703 1704 /* 1705 * Allocate a new allocindir structure. 1706 */ 1707 static struct allocindir * 1708 newallocindir(ip, ptrno, newblkno, oldblkno) 1709 struct inode *ip; /* inode for file being extended */ 1710 int ptrno; /* offset of pointer in indirect block */ 1711 ufs2_daddr_t newblkno; /* disk block number being added */ 1712 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 1713 { 1714 struct allocindir *aip; 1715 1716 MALLOC(aip, struct allocindir *, sizeof(struct allocindir), 1717 M_ALLOCINDIR, M_SOFTDEP_FLAGS|M_ZERO); 1718 aip->ai_list.wk_type = D_ALLOCINDIR; 1719 aip->ai_state = ATTACHED; 1720 aip->ai_offset = ptrno; 1721 aip->ai_newblkno = newblkno; 1722 aip->ai_oldblkno = oldblkno; 1723 aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize); 1724 return (aip); 1725 } 1726 1727 /* 1728 * Called just before setting an indirect block pointer 1729 * to a newly allocated file page. 1730 */ 1731 void 1732 softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 1733 struct inode *ip; /* inode for file being extended */ 1734 ufs_lbn_t lbn; /* allocated block number within file */ 1735 struct buf *bp; /* buffer with indirect blk referencing page */ 1736 int ptrno; /* offset of pointer in indirect block */ 1737 ufs2_daddr_t newblkno; /* disk block number being added */ 1738 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 1739 struct buf *nbp; /* buffer holding allocated page */ 1740 { 1741 struct allocindir *aip; 1742 struct pagedep *pagedep; 1743 1744 aip = newallocindir(ip, ptrno, newblkno, oldblkno); 1745 ACQUIRE_LOCK(&lk); 1746 /* 1747 * If we are allocating a directory page, then we must 1748 * allocate an associated pagedep to track additions and 1749 * deletions. 1750 */ 1751 if ((ip->i_mode & IFMT) == IFDIR && 1752 pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0) 1753 WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list); 1754 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1755 FREE_LOCK(&lk); 1756 setup_allocindir_phase2(bp, ip, aip); 1757 } 1758 1759 /* 1760 * Called just before setting an indirect block pointer to a 1761 * newly allocated indirect block. 1762 */ 1763 void 1764 softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 1765 struct buf *nbp; /* newly allocated indirect block */ 1766 struct inode *ip; /* inode for file being extended */ 1767 struct buf *bp; /* indirect block referencing allocated block */ 1768 int ptrno; /* offset of pointer in indirect block */ 1769 ufs2_daddr_t newblkno; /* disk block number being added */ 1770 { 1771 struct allocindir *aip; 1772 1773 aip = newallocindir(ip, ptrno, newblkno, 0); 1774 ACQUIRE_LOCK(&lk); 1775 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list); 1776 FREE_LOCK(&lk); 1777 setup_allocindir_phase2(bp, ip, aip); 1778 } 1779 1780 /* 1781 * Called to finish the allocation of the "aip" allocated 1782 * by one of the two routines above. 1783 */ 1784 static void 1785 setup_allocindir_phase2(bp, ip, aip) 1786 struct buf *bp; /* in-memory copy of the indirect block */ 1787 struct inode *ip; /* inode for file being extended */ 1788 struct allocindir *aip; /* allocindir allocated by the above routines */ 1789 { 1790 struct worklist *wk; 1791 struct indirdep *indirdep, *newindirdep; 1792 struct bmsafemap *bmsafemap; 1793 struct allocindir *oldaip; 1794 struct freefrag *freefrag; 1795 struct newblk *newblk; 1796 ufs2_daddr_t blkno; 1797 1798 if (bp->b_lblkno >= 0) 1799 panic("setup_allocindir_phase2: not indir blk"); 1800 for (indirdep = NULL, newindirdep = NULL; ; ) { 1801 ACQUIRE_LOCK(&lk); 1802 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 1803 if (wk->wk_type != D_INDIRDEP) 1804 continue; 1805 indirdep = WK_INDIRDEP(wk); 1806 break; 1807 } 1808 if (indirdep == NULL && newindirdep) { 1809 indirdep = newindirdep; 1810 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 1811 newindirdep = NULL; 1812 } 1813 FREE_LOCK(&lk); 1814 if (indirdep) { 1815 if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0, 1816 &newblk) == 0) 1817 panic("setup_allocindir: lost block"); 1818 ACQUIRE_LOCK(&lk); 1819 if (newblk->nb_state == DEPCOMPLETE) { 1820 aip->ai_state |= DEPCOMPLETE; 1821 aip->ai_buf = NULL; 1822 } else { 1823 bmsafemap = newblk->nb_bmsafemap; 1824 aip->ai_buf = bmsafemap->sm_buf; 1825 LIST_REMOVE(newblk, nb_deps); 1826 LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd, 1827 aip, ai_deps); 1828 } 1829 LIST_REMOVE(newblk, nb_hash); 1830 FREE(newblk, M_NEWBLK); 1831 aip->ai_indirdep = indirdep; 1832 /* 1833 * Check to see if there is an existing dependency 1834 * for this block. If there is, merge the old 1835 * dependency into the new one. 1836 */ 1837 if (aip->ai_oldblkno == 0) 1838 oldaip = NULL; 1839 else 1840 1841 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) 1842 if (oldaip->ai_offset == aip->ai_offset) 1843 break; 1844 freefrag = NULL; 1845 if (oldaip != NULL) { 1846 if (oldaip->ai_newblkno != aip->ai_oldblkno) { 1847 FREE_LOCK(&lk); 1848 panic("setup_allocindir_phase2: blkno"); 1849 } 1850 aip->ai_oldblkno = oldaip->ai_oldblkno; 1851 freefrag = aip->ai_freefrag; 1852 aip->ai_freefrag = oldaip->ai_freefrag; 1853 oldaip->ai_freefrag = NULL; 1854 free_allocindir(oldaip, NULL); 1855 } 1856 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 1857 if (ip->i_ump->um_fstype == UFS1) 1858 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data) 1859 [aip->ai_offset] = aip->ai_oldblkno; 1860 else 1861 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data) 1862 [aip->ai_offset] = aip->ai_oldblkno; 1863 FREE_LOCK(&lk); 1864 if (freefrag != NULL) 1865 handle_workitem_freefrag(freefrag); 1866 } 1867 if (newindirdep) { 1868 if (indirdep->ir_savebp != NULL) 1869 brelse(newindirdep->ir_savebp); 1870 WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP); 1871 } 1872 if (indirdep) 1873 break; 1874 MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep), 1875 M_INDIRDEP, M_SOFTDEP_FLAGS); 1876 newindirdep->ir_list.wk_type = D_INDIRDEP; 1877 newindirdep->ir_state = ATTACHED; 1878 if (ip->i_ump->um_fstype == UFS1) 1879 newindirdep->ir_state |= UFS1FMT; 1880 LIST_INIT(&newindirdep->ir_deplisthd); 1881 LIST_INIT(&newindirdep->ir_donehd); 1882 if (bp->b_blkno == bp->b_lblkno) { 1883 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 1884 NULL, NULL); 1885 bp->b_blkno = blkno; 1886 } 1887 newindirdep->ir_savebp = 1888 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0); 1889 BUF_KERNPROC(newindirdep->ir_savebp); 1890 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 1891 } 1892 } 1893 1894 /* 1895 * Block de-allocation dependencies. 1896 * 1897 * When blocks are de-allocated, the on-disk pointers must be nullified before 1898 * the blocks are made available for use by other files. (The true 1899 * requirement is that old pointers must be nullified before new on-disk 1900 * pointers are set. We chose this slightly more stringent requirement to 1901 * reduce complexity.) Our implementation handles this dependency by updating 1902 * the inode (or indirect block) appropriately but delaying the actual block 1903 * de-allocation (i.e., freemap and free space count manipulation) until 1904 * after the updated versions reach stable storage. After the disk is 1905 * updated, the blocks can be safely de-allocated whenever it is convenient. 1906 * This implementation handles only the common case of reducing a file's 1907 * length to zero. Other cases are handled by the conventional synchronous 1908 * write approach. 1909 * 1910 * The ffs implementation with which we worked double-checks 1911 * the state of the block pointers and file size as it reduces 1912 * a file's length. Some of this code is replicated here in our 1913 * soft updates implementation. The freeblks->fb_chkcnt field is 1914 * used to transfer a part of this information to the procedure 1915 * that eventually de-allocates the blocks. 1916 * 1917 * This routine should be called from the routine that shortens 1918 * a file's length, before the inode's size or block pointers 1919 * are modified. It will save the block pointer information for 1920 * later release and zero the inode so that the calling routine 1921 * can release it. 1922 */ 1923 void 1924 softdep_setup_freeblocks(ip, length, flags) 1925 struct inode *ip; /* The inode whose length is to be reduced */ 1926 off_t length; /* The new length for the file */ 1927 int flags; /* IO_EXT and/or IO_NORMAL */ 1928 { 1929 struct freeblks *freeblks; 1930 struct inodedep *inodedep; 1931 struct allocdirect *adp; 1932 struct vnode *vp; 1933 struct buf *bp; 1934 struct fs *fs; 1935 ufs2_daddr_t extblocks, datablocks; 1936 int i, delay, error; 1937 1938 fs = ip->i_fs; 1939 if (length != 0) 1940 panic("softdep_setup_freeblocks: non-zero length"); 1941 MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks), 1942 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 1943 freeblks->fb_list.wk_type = D_FREEBLKS; 1944 freeblks->fb_uid = ip->i_uid; 1945 freeblks->fb_previousinum = ip->i_number; 1946 freeblks->fb_devvp = ip->i_devvp; 1947 freeblks->fb_mnt = ITOV(ip)->v_mount; 1948 extblocks = 0; 1949 if (fs->fs_magic == FS_UFS2_MAGIC) 1950 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 1951 datablocks = DIP(ip, i_blocks) - extblocks; 1952 if ((flags & IO_NORMAL) == 0) { 1953 freeblks->fb_oldsize = 0; 1954 freeblks->fb_chkcnt = 0; 1955 } else { 1956 freeblks->fb_oldsize = ip->i_size; 1957 ip->i_size = 0; 1958 DIP(ip, i_size) = 0; 1959 freeblks->fb_chkcnt = datablocks; 1960 for (i = 0; i < NDADDR; i++) { 1961 freeblks->fb_dblks[i] = DIP(ip, i_db[i]); 1962 DIP(ip, i_db[i]) = 0; 1963 } 1964 for (i = 0; i < NIADDR; i++) { 1965 freeblks->fb_iblks[i] = DIP(ip, i_ib[i]); 1966 DIP(ip, i_ib[i]) = 0; 1967 } 1968 /* 1969 * If the file was removed, then the space being freed was 1970 * accounted for then (see softdep_filereleased()). If the 1971 * file is merely being truncated, then we account for it now. 1972 */ 1973 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 1974 fs->fs_pendingblocks += datablocks; 1975 } 1976 if ((flags & IO_EXT) == 0) { 1977 freeblks->fb_oldextsize = 0; 1978 } else { 1979 freeblks->fb_oldextsize = ip->i_din2->di_extsize; 1980 ip->i_din2->di_extsize = 0; 1981 freeblks->fb_chkcnt += extblocks; 1982 for (i = 0; i < NXADDR; i++) { 1983 freeblks->fb_eblks[i] = ip->i_din2->di_extb[i]; 1984 ip->i_din2->di_extb[i] = 0; 1985 } 1986 } 1987 DIP(ip, i_blocks) -= freeblks->fb_chkcnt; 1988 /* 1989 * Push the zero'ed inode to to its disk buffer so that we are free 1990 * to delete its dependencies below. Once the dependencies are gone 1991 * the buffer can be safely released. 1992 */ 1993 if ((error = bread(ip->i_devvp, 1994 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 1995 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 1996 brelse(bp); 1997 softdep_error("softdep_setup_freeblocks", error); 1998 } 1999 if (ip->i_ump->um_fstype == UFS1) 2000 *((struct ufs1_dinode *)bp->b_data + 2001 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 2002 else 2003 *((struct ufs2_dinode *)bp->b_data + 2004 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 2005 /* 2006 * Find and eliminate any inode dependencies. 2007 */ 2008 ACQUIRE_LOCK(&lk); 2009 (void) inodedep_lookup(fs, ip->i_number, DEPALLOC, &inodedep); 2010 if ((inodedep->id_state & IOSTARTED) != 0) { 2011 FREE_LOCK(&lk); 2012 panic("softdep_setup_freeblocks: inode busy"); 2013 } 2014 /* 2015 * Add the freeblks structure to the list of operations that 2016 * must await the zero'ed inode being written to disk. If we 2017 * still have a bitmap dependency (delay == 0), then the inode 2018 * has never been written to disk, so we can process the 2019 * freeblks below once we have deleted the dependencies. 2020 */ 2021 delay = (inodedep->id_state & DEPCOMPLETE); 2022 if (delay) 2023 WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list); 2024 /* 2025 * Because the file length has been truncated to zero, any 2026 * pending block allocation dependency structures associated 2027 * with this inode are obsolete and can simply be de-allocated. 2028 * We must first merge the two dependency lists to get rid of 2029 * any duplicate freefrag structures, then purge the merged list. 2030 * If we still have a bitmap dependency, then the inode has never 2031 * been written to disk, so we can free any fragments without delay. 2032 */ 2033 if (flags & IO_NORMAL) { 2034 merge_inode_lists(&inodedep->id_newinoupdt, 2035 &inodedep->id_inoupdt); 2036 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 2037 free_allocdirect(&inodedep->id_inoupdt, adp, delay); 2038 } 2039 if (flags & IO_EXT) { 2040 merge_inode_lists(&inodedep->id_newextupdt, 2041 &inodedep->id_extupdt); 2042 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 2043 free_allocdirect(&inodedep->id_extupdt, adp, delay); 2044 } 2045 FREE_LOCK(&lk); 2046 bdwrite(bp); 2047 /* 2048 * We must wait for any I/O in progress to finish so that 2049 * all potential buffers on the dirty list will be visible. 2050 * Once they are all there, walk the list and get rid of 2051 * any dependencies. 2052 */ 2053 vp = ITOV(ip); 2054 ACQUIRE_LOCK(&lk); 2055 drain_output(vp, 1); 2056 restart: 2057 VI_LOCK(vp); 2058 TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 2059 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 2060 ((flags & IO_NORMAL) == 0 && 2061 (bp->b_xflags & BX_ALTDATA) == 0)) 2062 continue; 2063 VI_UNLOCK(vp); 2064 if (getdirtybuf(&bp, MNT_WAIT) == 0) 2065 goto restart; 2066 (void) inodedep_lookup(fs, ip->i_number, 0, &inodedep); 2067 deallocate_dependencies(bp, inodedep); 2068 bp->b_flags |= B_INVAL | B_NOCACHE; 2069 FREE_LOCK(&lk); 2070 brelse(bp); 2071 ACQUIRE_LOCK(&lk); 2072 goto restart; 2073 } 2074 VI_UNLOCK(vp); 2075 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) != 0) 2076 (void) free_inodedep(inodedep); 2077 FREE_LOCK(&lk); 2078 /* 2079 * If the inode has never been written to disk (delay == 0), 2080 * then we can process the freeblks now that we have deleted 2081 * the dependencies. 2082 */ 2083 if (!delay) 2084 handle_workitem_freeblocks(freeblks, 0); 2085 } 2086 2087 /* 2088 * Reclaim any dependency structures from a buffer that is about to 2089 * be reallocated to a new vnode. The buffer must be locked, thus, 2090 * no I/O completion operations can occur while we are manipulating 2091 * its associated dependencies. The mutex is held so that other I/O's 2092 * associated with related dependencies do not occur. 2093 */ 2094 static void 2095 deallocate_dependencies(bp, inodedep) 2096 struct buf *bp; 2097 struct inodedep *inodedep; 2098 { 2099 struct worklist *wk; 2100 struct indirdep *indirdep; 2101 struct allocindir *aip; 2102 struct pagedep *pagedep; 2103 struct dirrem *dirrem; 2104 struct diradd *dap; 2105 int i; 2106 2107 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2108 switch (wk->wk_type) { 2109 2110 case D_INDIRDEP: 2111 indirdep = WK_INDIRDEP(wk); 2112 /* 2113 * None of the indirect pointers will ever be visible, 2114 * so they can simply be tossed. GOINGAWAY ensures 2115 * that allocated pointers will be saved in the buffer 2116 * cache until they are freed. Note that they will 2117 * only be able to be found by their physical address 2118 * since the inode mapping the logical address will 2119 * be gone. The save buffer used for the safe copy 2120 * was allocated in setup_allocindir_phase2 using 2121 * the physical address so it could be used for this 2122 * purpose. Hence we swap the safe copy with the real 2123 * copy, allowing the safe copy to be freed and holding 2124 * on to the real copy for later use in indir_trunc. 2125 */ 2126 if (indirdep->ir_state & GOINGAWAY) { 2127 FREE_LOCK(&lk); 2128 panic("deallocate_dependencies: already gone"); 2129 } 2130 indirdep->ir_state |= GOINGAWAY; 2131 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 2132 free_allocindir(aip, inodedep); 2133 if (bp->b_lblkno >= 0 || 2134 bp->b_blkno != indirdep->ir_savebp->b_lblkno) { 2135 FREE_LOCK(&lk); 2136 panic("deallocate_dependencies: not indir"); 2137 } 2138 bcopy(bp->b_data, indirdep->ir_savebp->b_data, 2139 bp->b_bcount); 2140 WORKLIST_REMOVE(wk); 2141 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk); 2142 continue; 2143 2144 case D_PAGEDEP: 2145 pagedep = WK_PAGEDEP(wk); 2146 /* 2147 * None of the directory additions will ever be 2148 * visible, so they can simply be tossed. 2149 */ 2150 for (i = 0; i < DAHASHSZ; i++) 2151 while ((dap = 2152 LIST_FIRST(&pagedep->pd_diraddhd[i]))) 2153 free_diradd(dap); 2154 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0) 2155 free_diradd(dap); 2156 /* 2157 * Copy any directory remove dependencies to the list 2158 * to be processed after the zero'ed inode is written. 2159 * If the inode has already been written, then they 2160 * can be dumped directly onto the work list. 2161 */ 2162 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 2163 LIST_REMOVE(dirrem, dm_next); 2164 dirrem->dm_dirinum = pagedep->pd_ino; 2165 if (inodedep == NULL || 2166 (inodedep->id_state & ALLCOMPLETE) == 2167 ALLCOMPLETE) 2168 add_to_worklist(&dirrem->dm_list); 2169 else 2170 WORKLIST_INSERT(&inodedep->id_bufwait, 2171 &dirrem->dm_list); 2172 } 2173 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2174 LIST_FOREACH(wk, &inodedep->id_bufwait, wk_list) 2175 if (wk->wk_type == D_NEWDIRBLK && 2176 WK_NEWDIRBLK(wk)->db_pagedep == 2177 pagedep) 2178 break; 2179 if (wk != NULL) { 2180 WORKLIST_REMOVE(wk); 2181 free_newdirblk(WK_NEWDIRBLK(wk)); 2182 } else { 2183 FREE_LOCK(&lk); 2184 panic("deallocate_dependencies: " 2185 "lost pagedep"); 2186 } 2187 } 2188 WORKLIST_REMOVE(&pagedep->pd_list); 2189 LIST_REMOVE(pagedep, pd_hash); 2190 WORKITEM_FREE(pagedep, D_PAGEDEP); 2191 continue; 2192 2193 case D_ALLOCINDIR: 2194 free_allocindir(WK_ALLOCINDIR(wk), inodedep); 2195 continue; 2196 2197 case D_ALLOCDIRECT: 2198 case D_INODEDEP: 2199 FREE_LOCK(&lk); 2200 panic("deallocate_dependencies: Unexpected type %s", 2201 TYPENAME(wk->wk_type)); 2202 /* NOTREACHED */ 2203 2204 default: 2205 FREE_LOCK(&lk); 2206 panic("deallocate_dependencies: Unknown type %s", 2207 TYPENAME(wk->wk_type)); 2208 /* NOTREACHED */ 2209 } 2210 } 2211 } 2212 2213 /* 2214 * Free an allocdirect. Generate a new freefrag work request if appropriate. 2215 * This routine must be called with splbio interrupts blocked. 2216 */ 2217 static void 2218 free_allocdirect(adphead, adp, delay) 2219 struct allocdirectlst *adphead; 2220 struct allocdirect *adp; 2221 int delay; 2222 { 2223 struct newdirblk *newdirblk; 2224 struct worklist *wk; 2225 2226 #ifdef DEBUG 2227 if (lk.lkt_held == NOHOLDER) 2228 panic("free_allocdirect: lock not held"); 2229 #endif 2230 if ((adp->ad_state & DEPCOMPLETE) == 0) 2231 LIST_REMOVE(adp, ad_deps); 2232 TAILQ_REMOVE(adphead, adp, ad_next); 2233 if ((adp->ad_state & COMPLETE) == 0) 2234 WORKLIST_REMOVE(&adp->ad_list); 2235 if (adp->ad_freefrag != NULL) { 2236 if (delay) 2237 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2238 &adp->ad_freefrag->ff_list); 2239 else 2240 add_to_worklist(&adp->ad_freefrag->ff_list); 2241 } 2242 if ((wk = LIST_FIRST(&adp->ad_newdirblk)) != NULL) { 2243 newdirblk = WK_NEWDIRBLK(wk); 2244 WORKLIST_REMOVE(&newdirblk->db_list); 2245 if (LIST_FIRST(&adp->ad_newdirblk) != NULL) 2246 panic("free_allocdirect: extra newdirblk"); 2247 if (delay) 2248 WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait, 2249 &newdirblk->db_list); 2250 else 2251 free_newdirblk(newdirblk); 2252 } 2253 WORKITEM_FREE(adp, D_ALLOCDIRECT); 2254 } 2255 2256 /* 2257 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 2258 * This routine must be called with splbio interrupts blocked. 2259 */ 2260 static void 2261 free_newdirblk(newdirblk) 2262 struct newdirblk *newdirblk; 2263 { 2264 struct pagedep *pagedep; 2265 struct diradd *dap; 2266 int i; 2267 2268 #ifdef DEBUG 2269 if (lk.lkt_held == NOHOLDER) 2270 panic("free_newdirblk: lock not held"); 2271 #endif 2272 /* 2273 * If the pagedep is still linked onto the directory buffer 2274 * dependency chain, then some of the entries on the 2275 * pd_pendinghd list may not be committed to disk yet. In 2276 * this case, we will simply clear the NEWBLOCK flag and 2277 * let the pd_pendinghd list be processed when the pagedep 2278 * is next written. If the pagedep is no longer on the buffer 2279 * dependency chain, then all the entries on the pd_pending 2280 * list are committed to disk and we can free them here. 2281 */ 2282 pagedep = newdirblk->db_pagedep; 2283 pagedep->pd_state &= ~NEWBLOCK; 2284 if ((pagedep->pd_state & ONWORKLIST) == 0) 2285 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 2286 free_diradd(dap); 2287 /* 2288 * If no dependencies remain, the pagedep will be freed. 2289 */ 2290 for (i = 0; i < DAHASHSZ; i++) 2291 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL) 2292 break; 2293 if (i == DAHASHSZ && (pagedep->pd_state & ONWORKLIST) == 0) { 2294 LIST_REMOVE(pagedep, pd_hash); 2295 WORKITEM_FREE(pagedep, D_PAGEDEP); 2296 } 2297 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2298 } 2299 2300 /* 2301 * Prepare an inode to be freed. The actual free operation is not 2302 * done until the zero'ed inode has been written to disk. 2303 */ 2304 void 2305 softdep_freefile(pvp, ino, mode) 2306 struct vnode *pvp; 2307 ino_t ino; 2308 int mode; 2309 { 2310 struct inode *ip = VTOI(pvp); 2311 struct inodedep *inodedep; 2312 struct freefile *freefile; 2313 2314 /* 2315 * This sets up the inode de-allocation dependency. 2316 */ 2317 MALLOC(freefile, struct freefile *, sizeof(struct freefile), 2318 M_FREEFILE, M_SOFTDEP_FLAGS); 2319 freefile->fx_list.wk_type = D_FREEFILE; 2320 freefile->fx_list.wk_state = 0; 2321 freefile->fx_mode = mode; 2322 freefile->fx_oldinum = ino; 2323 freefile->fx_devvp = ip->i_devvp; 2324 freefile->fx_mnt = ITOV(ip)->v_mount; 2325 if ((ip->i_flag & IN_SPACECOUNTED) == 0) 2326 ip->i_fs->fs_pendinginodes += 1; 2327 2328 /* 2329 * If the inodedep does not exist, then the zero'ed inode has 2330 * been written to disk. If the allocated inode has never been 2331 * written to disk, then the on-disk inode is zero'ed. In either 2332 * case we can free the file immediately. 2333 */ 2334 ACQUIRE_LOCK(&lk); 2335 if (inodedep_lookup(ip->i_fs, ino, 0, &inodedep) == 0 || 2336 check_inode_unwritten(inodedep)) { 2337 FREE_LOCK(&lk); 2338 handle_workitem_freefile(freefile); 2339 return; 2340 } 2341 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 2342 FREE_LOCK(&lk); 2343 } 2344 2345 /* 2346 * Check to see if an inode has never been written to disk. If 2347 * so free the inodedep and return success, otherwise return failure. 2348 * This routine must be called with splbio interrupts blocked. 2349 * 2350 * If we still have a bitmap dependency, then the inode has never 2351 * been written to disk. Drop the dependency as it is no longer 2352 * necessary since the inode is being deallocated. We set the 2353 * ALLCOMPLETE flags since the bitmap now properly shows that the 2354 * inode is not allocated. Even if the inode is actively being 2355 * written, it has been rolled back to its zero'ed state, so we 2356 * are ensured that a zero inode is what is on the disk. For short 2357 * lived files, this change will usually result in removing all the 2358 * dependencies from the inode so that it can be freed immediately. 2359 */ 2360 static int 2361 check_inode_unwritten(inodedep) 2362 struct inodedep *inodedep; 2363 { 2364 2365 if ((inodedep->id_state & DEPCOMPLETE) != 0 || 2366 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2367 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2368 LIST_FIRST(&inodedep->id_inowait) != NULL || 2369 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2370 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2371 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 2372 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 2373 inodedep->id_nlinkdelta != 0) 2374 return (0); 2375 inodedep->id_state |= ALLCOMPLETE; 2376 LIST_REMOVE(inodedep, id_deps); 2377 inodedep->id_buf = NULL; 2378 if (inodedep->id_state & ONWORKLIST) 2379 WORKLIST_REMOVE(&inodedep->id_list); 2380 if (inodedep->id_savedino1 != NULL) { 2381 FREE(inodedep->id_savedino1, M_INODEDEP); 2382 inodedep->id_savedino1 = NULL; 2383 } 2384 if (free_inodedep(inodedep) == 0) { 2385 FREE_LOCK(&lk); 2386 panic("check_inode_unwritten: busy inode"); 2387 } 2388 return (1); 2389 } 2390 2391 /* 2392 * Try to free an inodedep structure. Return 1 if it could be freed. 2393 */ 2394 static int 2395 free_inodedep(inodedep) 2396 struct inodedep *inodedep; 2397 { 2398 2399 if ((inodedep->id_state & ONWORKLIST) != 0 || 2400 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 2401 LIST_FIRST(&inodedep->id_pendinghd) != NULL || 2402 LIST_FIRST(&inodedep->id_bufwait) != NULL || 2403 LIST_FIRST(&inodedep->id_inowait) != NULL || 2404 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 2405 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL || 2406 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 2407 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 2408 inodedep->id_nlinkdelta != 0 || inodedep->id_savedino1 != NULL) 2409 return (0); 2410 LIST_REMOVE(inodedep, id_hash); 2411 WORKITEM_FREE(inodedep, D_INODEDEP); 2412 num_inodedep -= 1; 2413 return (1); 2414 } 2415 2416 /* 2417 * This workitem routine performs the block de-allocation. 2418 * The workitem is added to the pending list after the updated 2419 * inode block has been written to disk. As mentioned above, 2420 * checks regarding the number of blocks de-allocated (compared 2421 * to the number of blocks allocated for the file) are also 2422 * performed in this function. 2423 */ 2424 static void 2425 handle_workitem_freeblocks(freeblks, flags) 2426 struct freeblks *freeblks; 2427 int flags; 2428 { 2429 struct inode *ip; 2430 struct vnode *vp; 2431 struct fs *fs; 2432 int i, nblocks, level, bsize; 2433 ufs2_daddr_t bn, blocksreleased = 0; 2434 int error, allerror = 0; 2435 ufs_lbn_t baselbns[NIADDR], tmpval; 2436 2437 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2438 tmpval = 1; 2439 baselbns[0] = NDADDR; 2440 for (i = 1; i < NIADDR; i++) { 2441 tmpval *= NINDIR(fs); 2442 baselbns[i] = baselbns[i - 1] + tmpval; 2443 } 2444 nblocks = btodb(fs->fs_bsize); 2445 blocksreleased = 0; 2446 /* 2447 * Release all extended attribute blocks or frags. 2448 */ 2449 if (freeblks->fb_oldextsize > 0) { 2450 for (i = (NXADDR - 1); i >= 0; i--) { 2451 if ((bn = freeblks->fb_eblks[i]) == 0) 2452 continue; 2453 bsize = sblksize(fs, freeblks->fb_oldextsize, i); 2454 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2455 freeblks->fb_previousinum); 2456 blocksreleased += btodb(bsize); 2457 } 2458 } 2459 /* 2460 * Release all data blocks or frags. 2461 */ 2462 if (freeblks->fb_oldsize > 0) { 2463 /* 2464 * Indirect blocks first. 2465 */ 2466 for (level = (NIADDR - 1); level >= 0; level--) { 2467 if ((bn = freeblks->fb_iblks[level]) == 0) 2468 continue; 2469 if ((error = indir_trunc(freeblks, fsbtodb(fs, bn), 2470 level, baselbns[level], &blocksreleased)) == 0) 2471 allerror = error; 2472 ffs_blkfree(fs, freeblks->fb_devvp, bn, fs->fs_bsize, 2473 freeblks->fb_previousinum); 2474 fs->fs_pendingblocks -= nblocks; 2475 blocksreleased += nblocks; 2476 } 2477 /* 2478 * All direct blocks or frags. 2479 */ 2480 for (i = (NDADDR - 1); i >= 0; i--) { 2481 if ((bn = freeblks->fb_dblks[i]) == 0) 2482 continue; 2483 bsize = sblksize(fs, freeblks->fb_oldsize, i); 2484 ffs_blkfree(fs, freeblks->fb_devvp, bn, bsize, 2485 freeblks->fb_previousinum); 2486 fs->fs_pendingblocks -= btodb(bsize); 2487 blocksreleased += btodb(bsize); 2488 } 2489 } 2490 /* 2491 * If we still have not finished background cleanup, then check 2492 * to see if the block count needs to be adjusted. 2493 */ 2494 if (freeblks->fb_chkcnt != blocksreleased && 2495 (fs->fs_flags & FS_UNCLEAN) != 0 && 2496 VFS_VGET(freeblks->fb_mnt, freeblks->fb_previousinum, 2497 (flags & LK_NOWAIT) | LK_EXCLUSIVE, &vp) == 0) { 2498 ip = VTOI(vp); 2499 DIP(ip, i_blocks) += freeblks->fb_chkcnt - blocksreleased; 2500 ip->i_flag |= IN_CHANGE; 2501 vput(vp); 2502 } 2503 2504 #ifdef DIAGNOSTIC 2505 if (freeblks->fb_chkcnt != blocksreleased && 2506 ((fs->fs_flags & FS_UNCLEAN) == 0 || (flags & LK_NOWAIT) != 0)) 2507 printf("handle_workitem_freeblocks: block count\n"); 2508 if (allerror) 2509 softdep_error("handle_workitem_freeblks", allerror); 2510 #endif /* DIAGNOSTIC */ 2511 2512 WORKITEM_FREE(freeblks, D_FREEBLKS); 2513 } 2514 2515 /* 2516 * Release blocks associated with the inode ip and stored in the indirect 2517 * block dbn. If level is greater than SINGLE, the block is an indirect block 2518 * and recursive calls to indirtrunc must be used to cleanse other indirect 2519 * blocks. 2520 */ 2521 static int 2522 indir_trunc(freeblks, dbn, level, lbn, countp) 2523 struct freeblks *freeblks; 2524 ufs2_daddr_t dbn; 2525 int level; 2526 ufs_lbn_t lbn; 2527 ufs2_daddr_t *countp; 2528 { 2529 struct buf *bp; 2530 struct fs *fs; 2531 struct worklist *wk; 2532 struct indirdep *indirdep; 2533 ufs1_daddr_t *bap1 = 0; 2534 ufs2_daddr_t nb, *bap2 = 0; 2535 ufs_lbn_t lbnadd; 2536 int i, nblocks, ufs1fmt; 2537 int error, allerror = 0; 2538 2539 fs = VFSTOUFS(freeblks->fb_mnt)->um_fs; 2540 lbnadd = 1; 2541 for (i = level; i > 0; i--) 2542 lbnadd *= NINDIR(fs); 2543 /* 2544 * Get buffer of block pointers to be freed. This routine is not 2545 * called until the zero'ed inode has been written, so it is safe 2546 * to free blocks as they are encountered. Because the inode has 2547 * been zero'ed, calls to bmap on these blocks will fail. So, we 2548 * have to use the on-disk address and the block device for the 2549 * filesystem to look them up. If the file was deleted before its 2550 * indirect blocks were all written to disk, the routine that set 2551 * us up (deallocate_dependencies) will have arranged to leave 2552 * a complete copy of the indirect block in memory for our use. 2553 * Otherwise we have to read the blocks in from the disk. 2554 */ 2555 ACQUIRE_LOCK(&lk); 2556 if ((bp = incore(freeblks->fb_devvp, dbn)) != NULL && 2557 (wk = LIST_FIRST(&bp->b_dep)) != NULL) { 2558 if (wk->wk_type != D_INDIRDEP || 2559 (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp || 2560 (indirdep->ir_state & GOINGAWAY) == 0) { 2561 FREE_LOCK(&lk); 2562 panic("indir_trunc: lost indirdep"); 2563 } 2564 WORKLIST_REMOVE(wk); 2565 WORKITEM_FREE(indirdep, D_INDIRDEP); 2566 if (LIST_FIRST(&bp->b_dep) != NULL) { 2567 FREE_LOCK(&lk); 2568 panic("indir_trunc: dangling dep"); 2569 } 2570 FREE_LOCK(&lk); 2571 } else { 2572 FREE_LOCK(&lk); 2573 error = bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 2574 NOCRED, &bp); 2575 if (error) { 2576 brelse(bp); 2577 return (error); 2578 } 2579 } 2580 /* 2581 * Recursively free indirect blocks. 2582 */ 2583 if (VFSTOUFS(freeblks->fb_mnt)->um_fstype == UFS1) { 2584 ufs1fmt = 1; 2585 bap1 = (ufs1_daddr_t *)bp->b_data; 2586 } else { 2587 ufs1fmt = 0; 2588 bap2 = (ufs2_daddr_t *)bp->b_data; 2589 } 2590 nblocks = btodb(fs->fs_bsize); 2591 for (i = NINDIR(fs) - 1; i >= 0; i--) { 2592 if (ufs1fmt) 2593 nb = bap1[i]; 2594 else 2595 nb = bap2[i]; 2596 if (nb == 0) 2597 continue; 2598 if (level != 0) { 2599 if ((error = indir_trunc(freeblks, fsbtodb(fs, nb), 2600 level - 1, lbn + (i * lbnadd), countp)) != 0) 2601 allerror = error; 2602 } 2603 ffs_blkfree(fs, freeblks->fb_devvp, nb, fs->fs_bsize, 2604 freeblks->fb_previousinum); 2605 fs->fs_pendingblocks -= nblocks; 2606 *countp += nblocks; 2607 } 2608 bp->b_flags |= B_INVAL | B_NOCACHE; 2609 brelse(bp); 2610 return (allerror); 2611 } 2612 2613 /* 2614 * Free an allocindir. 2615 * This routine must be called with splbio interrupts blocked. 2616 */ 2617 static void 2618 free_allocindir(aip, inodedep) 2619 struct allocindir *aip; 2620 struct inodedep *inodedep; 2621 { 2622 struct freefrag *freefrag; 2623 2624 #ifdef DEBUG 2625 if (lk.lkt_held == NOHOLDER) 2626 panic("free_allocindir: lock not held"); 2627 #endif 2628 if ((aip->ai_state & DEPCOMPLETE) == 0) 2629 LIST_REMOVE(aip, ai_deps); 2630 if (aip->ai_state & ONWORKLIST) 2631 WORKLIST_REMOVE(&aip->ai_list); 2632 LIST_REMOVE(aip, ai_next); 2633 if ((freefrag = aip->ai_freefrag) != NULL) { 2634 if (inodedep == NULL) 2635 add_to_worklist(&freefrag->ff_list); 2636 else 2637 WORKLIST_INSERT(&inodedep->id_bufwait, 2638 &freefrag->ff_list); 2639 } 2640 WORKITEM_FREE(aip, D_ALLOCINDIR); 2641 } 2642 2643 /* 2644 * Directory entry addition dependencies. 2645 * 2646 * When adding a new directory entry, the inode (with its incremented link 2647 * count) must be written to disk before the directory entry's pointer to it. 2648 * Also, if the inode is newly allocated, the corresponding freemap must be 2649 * updated (on disk) before the directory entry's pointer. These requirements 2650 * are met via undo/redo on the directory entry's pointer, which consists 2651 * simply of the inode number. 2652 * 2653 * As directory entries are added and deleted, the free space within a 2654 * directory block can become fragmented. The ufs filesystem will compact 2655 * a fragmented directory block to make space for a new entry. When this 2656 * occurs, the offsets of previously added entries change. Any "diradd" 2657 * dependency structures corresponding to these entries must be updated with 2658 * the new offsets. 2659 */ 2660 2661 /* 2662 * This routine is called after the in-memory inode's link 2663 * count has been incremented, but before the directory entry's 2664 * pointer to the inode has been set. 2665 */ 2666 int 2667 softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 2668 struct buf *bp; /* buffer containing directory block */ 2669 struct inode *dp; /* inode for directory */ 2670 off_t diroffset; /* offset of new entry in directory */ 2671 ino_t newinum; /* inode referenced by new directory entry */ 2672 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 2673 int isnewblk; /* entry is in a newly allocated block */ 2674 { 2675 int offset; /* offset of new entry within directory block */ 2676 ufs_lbn_t lbn; /* block in directory containing new entry */ 2677 struct fs *fs; 2678 struct diradd *dap; 2679 struct allocdirect *adp; 2680 struct pagedep *pagedep; 2681 struct inodedep *inodedep; 2682 struct newdirblk *newdirblk = 0; 2683 struct mkdir *mkdir1, *mkdir2; 2684 2685 /* 2686 * Whiteouts have no dependencies. 2687 */ 2688 if (newinum == WINO) { 2689 if (newdirbp != NULL) 2690 bdwrite(newdirbp); 2691 return (0); 2692 } 2693 2694 fs = dp->i_fs; 2695 lbn = lblkno(fs, diroffset); 2696 offset = blkoff(fs, diroffset); 2697 MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD, 2698 M_SOFTDEP_FLAGS|M_ZERO); 2699 dap->da_list.wk_type = D_DIRADD; 2700 dap->da_offset = offset; 2701 dap->da_newinum = newinum; 2702 dap->da_state = ATTACHED; 2703 if (isnewblk && lbn < NDADDR && fragoff(fs, diroffset) == 0) { 2704 MALLOC(newdirblk, struct newdirblk *, sizeof(struct newdirblk), 2705 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 2706 newdirblk->db_list.wk_type = D_NEWDIRBLK; 2707 newdirblk->db_state = 0; 2708 } 2709 if (newdirbp == NULL) { 2710 dap->da_state |= DEPCOMPLETE; 2711 ACQUIRE_LOCK(&lk); 2712 } else { 2713 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 2714 MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2715 M_SOFTDEP_FLAGS); 2716 mkdir1->md_list.wk_type = D_MKDIR; 2717 mkdir1->md_state = MKDIR_BODY; 2718 mkdir1->md_diradd = dap; 2719 MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR, 2720 M_SOFTDEP_FLAGS); 2721 mkdir2->md_list.wk_type = D_MKDIR; 2722 mkdir2->md_state = MKDIR_PARENT; 2723 mkdir2->md_diradd = dap; 2724 /* 2725 * Dependency on "." and ".." being written to disk. 2726 */ 2727 mkdir1->md_buf = newdirbp; 2728 ACQUIRE_LOCK(&lk); 2729 LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs); 2730 WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list); 2731 FREE_LOCK(&lk); 2732 bdwrite(newdirbp); 2733 /* 2734 * Dependency on link count increase for parent directory 2735 */ 2736 ACQUIRE_LOCK(&lk); 2737 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0 2738 || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 2739 dap->da_state &= ~MKDIR_PARENT; 2740 WORKITEM_FREE(mkdir2, D_MKDIR); 2741 } else { 2742 LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs); 2743 WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list); 2744 } 2745 } 2746 /* 2747 * Link into parent directory pagedep to await its being written. 2748 */ 2749 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 2750 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2751 dap->da_pagedep = pagedep; 2752 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 2753 da_pdlist); 2754 /* 2755 * Link into its inodedep. Put it on the id_bufwait list if the inode 2756 * is not yet written. If it is written, do the post-inode write 2757 * processing to put it on the id_pendinghd list. 2758 */ 2759 (void) inodedep_lookup(fs, newinum, DEPALLOC, &inodedep); 2760 if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 2761 diradd_inode_written(dap, inodedep); 2762 else 2763 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 2764 if (isnewblk) { 2765 /* 2766 * Directories growing into indirect blocks are rare 2767 * enough and the frequency of new block allocation 2768 * in those cases even more rare, that we choose not 2769 * to bother tracking them. Rather we simply force the 2770 * new directory entry to disk. 2771 */ 2772 if (lbn >= NDADDR) { 2773 FREE_LOCK(&lk); 2774 /* 2775 * We only have a new allocation when at the 2776 * beginning of a new block, not when we are 2777 * expanding into an existing block. 2778 */ 2779 if (blkoff(fs, diroffset) == 0) 2780 return (1); 2781 return (0); 2782 } 2783 /* 2784 * We only have a new allocation when at the beginning 2785 * of a new fragment, not when we are expanding into an 2786 * existing fragment. Also, there is nothing to do if we 2787 * are already tracking this block. 2788 */ 2789 if (fragoff(fs, diroffset) != 0) { 2790 FREE_LOCK(&lk); 2791 return (0); 2792 } 2793 if ((pagedep->pd_state & NEWBLOCK) != 0) { 2794 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 2795 FREE_LOCK(&lk); 2796 return (0); 2797 } 2798 /* 2799 * Find our associated allocdirect and have it track us. 2800 */ 2801 if (inodedep_lookup(fs, dp->i_number, 0, &inodedep) == 0) 2802 panic("softdep_setup_directory_add: lost inodedep"); 2803 adp = TAILQ_LAST(&inodedep->id_newinoupdt, allocdirectlst); 2804 if (adp == NULL || adp->ad_lbn != lbn) { 2805 FREE_LOCK(&lk); 2806 panic("softdep_setup_directory_add: lost entry"); 2807 } 2808 pagedep->pd_state |= NEWBLOCK; 2809 newdirblk->db_pagedep = pagedep; 2810 WORKLIST_INSERT(&adp->ad_newdirblk, &newdirblk->db_list); 2811 } 2812 FREE_LOCK(&lk); 2813 return (0); 2814 } 2815 2816 /* 2817 * This procedure is called to change the offset of a directory 2818 * entry when compacting a directory block which must be owned 2819 * exclusively by the caller. Note that the actual entry movement 2820 * must be done in this procedure to ensure that no I/O completions 2821 * occur while the move is in progress. 2822 */ 2823 void 2824 softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize) 2825 struct inode *dp; /* inode for directory */ 2826 caddr_t base; /* address of dp->i_offset */ 2827 caddr_t oldloc; /* address of old directory location */ 2828 caddr_t newloc; /* address of new directory location */ 2829 int entrysize; /* size of directory entry */ 2830 { 2831 int offset, oldoffset, newoffset; 2832 struct pagedep *pagedep; 2833 struct diradd *dap; 2834 ufs_lbn_t lbn; 2835 2836 ACQUIRE_LOCK(&lk); 2837 lbn = lblkno(dp->i_fs, dp->i_offset); 2838 offset = blkoff(dp->i_fs, dp->i_offset); 2839 if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0) 2840 goto done; 2841 oldoffset = offset + (oldloc - base); 2842 newoffset = offset + (newloc - base); 2843 2844 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) { 2845 if (dap->da_offset != oldoffset) 2846 continue; 2847 dap->da_offset = newoffset; 2848 if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset)) 2849 break; 2850 LIST_REMOVE(dap, da_pdlist); 2851 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)], 2852 dap, da_pdlist); 2853 break; 2854 } 2855 if (dap == NULL) { 2856 2857 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) { 2858 if (dap->da_offset == oldoffset) { 2859 dap->da_offset = newoffset; 2860 break; 2861 } 2862 } 2863 } 2864 done: 2865 bcopy(oldloc, newloc, entrysize); 2866 FREE_LOCK(&lk); 2867 } 2868 2869 /* 2870 * Free a diradd dependency structure. This routine must be called 2871 * with splbio interrupts blocked. 2872 */ 2873 static void 2874 free_diradd(dap) 2875 struct diradd *dap; 2876 { 2877 struct dirrem *dirrem; 2878 struct pagedep *pagedep; 2879 struct inodedep *inodedep; 2880 struct mkdir *mkdir, *nextmd; 2881 2882 #ifdef DEBUG 2883 if (lk.lkt_held == NOHOLDER) 2884 panic("free_diradd: lock not held"); 2885 #endif 2886 WORKLIST_REMOVE(&dap->da_list); 2887 LIST_REMOVE(dap, da_pdlist); 2888 if ((dap->da_state & DIRCHG) == 0) { 2889 pagedep = dap->da_pagedep; 2890 } else { 2891 dirrem = dap->da_previous; 2892 pagedep = dirrem->dm_pagedep; 2893 dirrem->dm_dirinum = pagedep->pd_ino; 2894 add_to_worklist(&dirrem->dm_list); 2895 } 2896 if (inodedep_lookup(VFSTOUFS(pagedep->pd_mnt)->um_fs, dap->da_newinum, 2897 0, &inodedep) != 0) 2898 (void) free_inodedep(inodedep); 2899 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2900 for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) { 2901 nextmd = LIST_NEXT(mkdir, md_mkdirs); 2902 if (mkdir->md_diradd != dap) 2903 continue; 2904 dap->da_state &= ~mkdir->md_state; 2905 WORKLIST_REMOVE(&mkdir->md_list); 2906 LIST_REMOVE(mkdir, md_mkdirs); 2907 WORKITEM_FREE(mkdir, D_MKDIR); 2908 } 2909 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 2910 FREE_LOCK(&lk); 2911 panic("free_diradd: unfound ref"); 2912 } 2913 } 2914 WORKITEM_FREE(dap, D_DIRADD); 2915 } 2916 2917 /* 2918 * Directory entry removal dependencies. 2919 * 2920 * When removing a directory entry, the entry's inode pointer must be 2921 * zero'ed on disk before the corresponding inode's link count is decremented 2922 * (possibly freeing the inode for re-use). This dependency is handled by 2923 * updating the directory entry but delaying the inode count reduction until 2924 * after the directory block has been written to disk. After this point, the 2925 * inode count can be decremented whenever it is convenient. 2926 */ 2927 2928 /* 2929 * This routine should be called immediately after removing 2930 * a directory entry. The inode's link count should not be 2931 * decremented by the calling procedure -- the soft updates 2932 * code will do this task when it is safe. 2933 */ 2934 void 2935 softdep_setup_remove(bp, dp, ip, isrmdir) 2936 struct buf *bp; /* buffer containing directory block */ 2937 struct inode *dp; /* inode for the directory being modified */ 2938 struct inode *ip; /* inode for directory entry being removed */ 2939 int isrmdir; /* indicates if doing RMDIR */ 2940 { 2941 struct dirrem *dirrem, *prevdirrem; 2942 2943 /* 2944 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. 2945 */ 2946 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 2947 2948 /* 2949 * If the COMPLETE flag is clear, then there were no active 2950 * entries and we want to roll back to a zeroed entry until 2951 * the new inode is committed to disk. If the COMPLETE flag is 2952 * set then we have deleted an entry that never made it to 2953 * disk. If the entry we deleted resulted from a name change, 2954 * then the old name still resides on disk. We cannot delete 2955 * its inode (returned to us in prevdirrem) until the zeroed 2956 * directory entry gets to disk. The new inode has never been 2957 * referenced on the disk, so can be deleted immediately. 2958 */ 2959 if ((dirrem->dm_state & COMPLETE) == 0) { 2960 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 2961 dm_next); 2962 FREE_LOCK(&lk); 2963 } else { 2964 if (prevdirrem != NULL) 2965 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 2966 prevdirrem, dm_next); 2967 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 2968 FREE_LOCK(&lk); 2969 handle_workitem_remove(dirrem, NULL); 2970 } 2971 } 2972 2973 /* 2974 * Allocate a new dirrem if appropriate and return it along with 2975 * its associated pagedep. Called without a lock, returns with lock. 2976 */ 2977 static long num_dirrem; /* number of dirrem allocated */ 2978 static struct dirrem * 2979 newdirrem(bp, dp, ip, isrmdir, prevdirremp) 2980 struct buf *bp; /* buffer containing directory block */ 2981 struct inode *dp; /* inode for the directory being modified */ 2982 struct inode *ip; /* inode for directory entry being removed */ 2983 int isrmdir; /* indicates if doing RMDIR */ 2984 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 2985 { 2986 int offset; 2987 ufs_lbn_t lbn; 2988 struct diradd *dap; 2989 struct dirrem *dirrem; 2990 struct pagedep *pagedep; 2991 2992 /* 2993 * Whiteouts have no deletion dependencies. 2994 */ 2995 if (ip == NULL) 2996 panic("newdirrem: whiteout"); 2997 /* 2998 * If we are over our limit, try to improve the situation. 2999 * Limiting the number of dirrem structures will also limit 3000 * the number of freefile and freeblks structures. 3001 */ 3002 if (num_dirrem > max_softdeps / 2) 3003 (void) request_cleanup(FLUSH_REMOVE, 0); 3004 num_dirrem += 1; 3005 MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem), 3006 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 3007 dirrem->dm_list.wk_type = D_DIRREM; 3008 dirrem->dm_state = isrmdir ? RMDIR : 0; 3009 dirrem->dm_mnt = ITOV(ip)->v_mount; 3010 dirrem->dm_oldinum = ip->i_number; 3011 *prevdirremp = NULL; 3012 3013 ACQUIRE_LOCK(&lk); 3014 lbn = lblkno(dp->i_fs, dp->i_offset); 3015 offset = blkoff(dp->i_fs, dp->i_offset); 3016 if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0) 3017 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 3018 dirrem->dm_pagedep = pagedep; 3019 /* 3020 * Check for a diradd dependency for the same directory entry. 3021 * If present, then both dependencies become obsolete and can 3022 * be de-allocated. Check for an entry on both the pd_dirraddhd 3023 * list and the pd_pendinghd list. 3024 */ 3025 3026 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 3027 if (dap->da_offset == offset) 3028 break; 3029 if (dap == NULL) { 3030 3031 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 3032 if (dap->da_offset == offset) 3033 break; 3034 if (dap == NULL) 3035 return (dirrem); 3036 } 3037 /* 3038 * Must be ATTACHED at this point. 3039 */ 3040 if ((dap->da_state & ATTACHED) == 0) { 3041 FREE_LOCK(&lk); 3042 panic("newdirrem: not ATTACHED"); 3043 } 3044 if (dap->da_newinum != ip->i_number) { 3045 FREE_LOCK(&lk); 3046 panic("newdirrem: inum %d should be %d", 3047 ip->i_number, dap->da_newinum); 3048 } 3049 /* 3050 * If we are deleting a changed name that never made it to disk, 3051 * then return the dirrem describing the previous inode (which 3052 * represents the inode currently referenced from this entry on disk). 3053 */ 3054 if ((dap->da_state & DIRCHG) != 0) { 3055 *prevdirremp = dap->da_previous; 3056 dap->da_state &= ~DIRCHG; 3057 dap->da_pagedep = pagedep; 3058 } 3059 /* 3060 * We are deleting an entry that never made it to disk. 3061 * Mark it COMPLETE so we can delete its inode immediately. 3062 */ 3063 dirrem->dm_state |= COMPLETE; 3064 free_diradd(dap); 3065 return (dirrem); 3066 } 3067 3068 /* 3069 * Directory entry change dependencies. 3070 * 3071 * Changing an existing directory entry requires that an add operation 3072 * be completed first followed by a deletion. The semantics for the addition 3073 * are identical to the description of adding a new entry above except 3074 * that the rollback is to the old inode number rather than zero. Once 3075 * the addition dependency is completed, the removal is done as described 3076 * in the removal routine above. 3077 */ 3078 3079 /* 3080 * This routine should be called immediately after changing 3081 * a directory entry. The inode's link count should not be 3082 * decremented by the calling procedure -- the soft updates 3083 * code will perform this task when it is safe. 3084 */ 3085 void 3086 softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 3087 struct buf *bp; /* buffer containing directory block */ 3088 struct inode *dp; /* inode for the directory being modified */ 3089 struct inode *ip; /* inode for directory entry being removed */ 3090 ino_t newinum; /* new inode number for changed entry */ 3091 int isrmdir; /* indicates if doing RMDIR */ 3092 { 3093 int offset; 3094 struct diradd *dap = NULL; 3095 struct dirrem *dirrem, *prevdirrem; 3096 struct pagedep *pagedep; 3097 struct inodedep *inodedep; 3098 3099 offset = blkoff(dp->i_fs, dp->i_offset); 3100 3101 /* 3102 * Whiteouts do not need diradd dependencies. 3103 */ 3104 if (newinum != WINO) { 3105 MALLOC(dap, struct diradd *, sizeof(struct diradd), 3106 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 3107 dap->da_list.wk_type = D_DIRADD; 3108 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 3109 dap->da_offset = offset; 3110 dap->da_newinum = newinum; 3111 } 3112 3113 /* 3114 * Allocate a new dirrem and ACQUIRE_LOCK. 3115 */ 3116 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 3117 pagedep = dirrem->dm_pagedep; 3118 /* 3119 * The possible values for isrmdir: 3120 * 0 - non-directory file rename 3121 * 1 - directory rename within same directory 3122 * inum - directory rename to new directory of given inode number 3123 * When renaming to a new directory, we are both deleting and 3124 * creating a new directory entry, so the link count on the new 3125 * directory should not change. Thus we do not need the followup 3126 * dirrem which is usually done in handle_workitem_remove. We set 3127 * the DIRCHG flag to tell handle_workitem_remove to skip the 3128 * followup dirrem. 3129 */ 3130 if (isrmdir > 1) 3131 dirrem->dm_state |= DIRCHG; 3132 3133 /* 3134 * Whiteouts have no additional dependencies, 3135 * so just put the dirrem on the correct list. 3136 */ 3137 if (newinum == WINO) { 3138 if ((dirrem->dm_state & COMPLETE) == 0) { 3139 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 3140 dm_next); 3141 } else { 3142 dirrem->dm_dirinum = pagedep->pd_ino; 3143 add_to_worklist(&dirrem->dm_list); 3144 } 3145 FREE_LOCK(&lk); 3146 return; 3147 } 3148 3149 /* 3150 * If the COMPLETE flag is clear, then there were no active 3151 * entries and we want to roll back to the previous inode until 3152 * the new inode is committed to disk. If the COMPLETE flag is 3153 * set, then we have deleted an entry that never made it to disk. 3154 * If the entry we deleted resulted from a name change, then the old 3155 * inode reference still resides on disk. Any rollback that we do 3156 * needs to be to that old inode (returned to us in prevdirrem). If 3157 * the entry we deleted resulted from a create, then there is 3158 * no entry on the disk, so we want to roll back to zero rather 3159 * than the uncommitted inode. In either of the COMPLETE cases we 3160 * want to immediately free the unwritten and unreferenced inode. 3161 */ 3162 if ((dirrem->dm_state & COMPLETE) == 0) { 3163 dap->da_previous = dirrem; 3164 } else { 3165 if (prevdirrem != NULL) { 3166 dap->da_previous = prevdirrem; 3167 } else { 3168 dap->da_state &= ~DIRCHG; 3169 dap->da_pagedep = pagedep; 3170 } 3171 dirrem->dm_dirinum = pagedep->pd_ino; 3172 add_to_worklist(&dirrem->dm_list); 3173 } 3174 /* 3175 * Link into its inodedep. Put it on the id_bufwait list if the inode 3176 * is not yet written. If it is written, do the post-inode write 3177 * processing to put it on the id_pendinghd list. 3178 */ 3179 if (inodedep_lookup(dp->i_fs, newinum, DEPALLOC, &inodedep) == 0 || 3180 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 3181 dap->da_state |= COMPLETE; 3182 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 3183 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 3184 } else { 3185 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 3186 dap, da_pdlist); 3187 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 3188 } 3189 FREE_LOCK(&lk); 3190 } 3191 3192 /* 3193 * Called whenever the link count on an inode is changed. 3194 * It creates an inode dependency so that the new reference(s) 3195 * to the inode cannot be committed to disk until the updated 3196 * inode has been written. 3197 */ 3198 void 3199 softdep_change_linkcnt(ip) 3200 struct inode *ip; /* the inode with the increased link count */ 3201 { 3202 struct inodedep *inodedep; 3203 3204 ACQUIRE_LOCK(&lk); 3205 (void) inodedep_lookup(ip->i_fs, ip->i_number, DEPALLOC, &inodedep); 3206 if (ip->i_nlink < ip->i_effnlink) { 3207 FREE_LOCK(&lk); 3208 panic("softdep_change_linkcnt: bad delta"); 3209 } 3210 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3211 FREE_LOCK(&lk); 3212 } 3213 3214 /* 3215 * Called when the effective link count and the reference count 3216 * on an inode drops to zero. At this point there are no names 3217 * referencing the file in the filesystem and no active file 3218 * references. The space associated with the file will be freed 3219 * as soon as the necessary soft dependencies are cleared. 3220 */ 3221 void 3222 softdep_releasefile(ip) 3223 struct inode *ip; /* inode with the zero effective link count */ 3224 { 3225 struct inodedep *inodedep; 3226 struct fs *fs; 3227 int extblocks; 3228 3229 if (ip->i_effnlink > 0) 3230 panic("softdep_filerelease: file still referenced"); 3231 /* 3232 * We may be called several times as the real reference count 3233 * drops to zero. We only want to account for the space once. 3234 */ 3235 if (ip->i_flag & IN_SPACECOUNTED) 3236 return; 3237 /* 3238 * We have to deactivate a snapshot otherwise copyonwrites may 3239 * add blocks and the cleanup may remove blocks after we have 3240 * tried to account for them. 3241 */ 3242 if ((ip->i_flags & SF_SNAPSHOT) != 0) 3243 ffs_snapremove(ITOV(ip)); 3244 /* 3245 * If we are tracking an nlinkdelta, we have to also remember 3246 * whether we accounted for the freed space yet. 3247 */ 3248 ACQUIRE_LOCK(&lk); 3249 if ((inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep))) 3250 inodedep->id_state |= SPACECOUNTED; 3251 FREE_LOCK(&lk); 3252 fs = ip->i_fs; 3253 extblocks = 0; 3254 if (fs->fs_magic == FS_UFS2_MAGIC) 3255 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 3256 ip->i_fs->fs_pendingblocks += DIP(ip, i_blocks) - extblocks; 3257 ip->i_fs->fs_pendinginodes += 1; 3258 ip->i_flag |= IN_SPACECOUNTED; 3259 } 3260 3261 /* 3262 * This workitem decrements the inode's link count. 3263 * If the link count reaches zero, the file is removed. 3264 */ 3265 static void 3266 handle_workitem_remove(dirrem, xp) 3267 struct dirrem *dirrem; 3268 struct vnode *xp; 3269 { 3270 struct thread *td = curthread; 3271 struct inodedep *inodedep; 3272 struct vnode *vp; 3273 struct inode *ip; 3274 ino_t oldinum; 3275 int error; 3276 3277 if ((vp = xp) == NULL && 3278 (error = VFS_VGET(dirrem->dm_mnt, dirrem->dm_oldinum, LK_EXCLUSIVE, 3279 &vp)) != 0) { 3280 softdep_error("handle_workitem_remove: vget", error); 3281 return; 3282 } 3283 ip = VTOI(vp); 3284 ACQUIRE_LOCK(&lk); 3285 if ((inodedep_lookup(ip->i_fs, dirrem->dm_oldinum, 0, &inodedep)) == 0){ 3286 FREE_LOCK(&lk); 3287 panic("handle_workitem_remove: lost inodedep"); 3288 } 3289 /* 3290 * Normal file deletion. 3291 */ 3292 if ((dirrem->dm_state & RMDIR) == 0) { 3293 ip->i_nlink--; 3294 DIP(ip, i_nlink) = ip->i_nlink; 3295 ip->i_flag |= IN_CHANGE; 3296 if (ip->i_nlink < ip->i_effnlink) { 3297 FREE_LOCK(&lk); 3298 panic("handle_workitem_remove: bad file delta"); 3299 } 3300 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3301 FREE_LOCK(&lk); 3302 vput(vp); 3303 num_dirrem -= 1; 3304 WORKITEM_FREE(dirrem, D_DIRREM); 3305 return; 3306 } 3307 /* 3308 * Directory deletion. Decrement reference count for both the 3309 * just deleted parent directory entry and the reference for ".". 3310 * Next truncate the directory to length zero. When the 3311 * truncation completes, arrange to have the reference count on 3312 * the parent decremented to account for the loss of "..". 3313 */ 3314 ip->i_nlink -= 2; 3315 DIP(ip, i_nlink) = ip->i_nlink; 3316 ip->i_flag |= IN_CHANGE; 3317 if (ip->i_nlink < ip->i_effnlink) { 3318 FREE_LOCK(&lk); 3319 panic("handle_workitem_remove: bad dir delta"); 3320 } 3321 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 3322 FREE_LOCK(&lk); 3323 if ((error = UFS_TRUNCATE(vp, (off_t)0, 0, td->td_ucred, td)) != 0) 3324 softdep_error("handle_workitem_remove: truncate", error); 3325 /* 3326 * Rename a directory to a new parent. Since, we are both deleting 3327 * and creating a new directory entry, the link count on the new 3328 * directory should not change. Thus we skip the followup dirrem. 3329 */ 3330 if (dirrem->dm_state & DIRCHG) { 3331 vput(vp); 3332 num_dirrem -= 1; 3333 WORKITEM_FREE(dirrem, D_DIRREM); 3334 return; 3335 } 3336 /* 3337 * If the inodedep does not exist, then the zero'ed inode has 3338 * been written to disk. If the allocated inode has never been 3339 * written to disk, then the on-disk inode is zero'ed. In either 3340 * case we can remove the file immediately. 3341 */ 3342 ACQUIRE_LOCK(&lk); 3343 dirrem->dm_state = 0; 3344 oldinum = dirrem->dm_oldinum; 3345 dirrem->dm_oldinum = dirrem->dm_dirinum; 3346 if (inodedep_lookup(ip->i_fs, oldinum, 0, &inodedep) == 0 || 3347 check_inode_unwritten(inodedep)) { 3348 FREE_LOCK(&lk); 3349 vput(vp); 3350 handle_workitem_remove(dirrem, NULL); 3351 return; 3352 } 3353 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 3354 FREE_LOCK(&lk); 3355 vput(vp); 3356 } 3357 3358 /* 3359 * Inode de-allocation dependencies. 3360 * 3361 * When an inode's link count is reduced to zero, it can be de-allocated. We 3362 * found it convenient to postpone de-allocation until after the inode is 3363 * written to disk with its new link count (zero). At this point, all of the 3364 * on-disk inode's block pointers are nullified and, with careful dependency 3365 * list ordering, all dependencies related to the inode will be satisfied and 3366 * the corresponding dependency structures de-allocated. So, if/when the 3367 * inode is reused, there will be no mixing of old dependencies with new 3368 * ones. This artificial dependency is set up by the block de-allocation 3369 * procedure above (softdep_setup_freeblocks) and completed by the 3370 * following procedure. 3371 */ 3372 static void 3373 handle_workitem_freefile(freefile) 3374 struct freefile *freefile; 3375 { 3376 struct fs *fs; 3377 struct inodedep *idp; 3378 int error; 3379 3380 fs = VFSTOUFS(freefile->fx_mnt)->um_fs; 3381 #ifdef DEBUG 3382 ACQUIRE_LOCK(&lk); 3383 error = inodedep_lookup(fs, freefile->fx_oldinum, 0, &idp); 3384 FREE_LOCK(&lk); 3385 if (error) 3386 panic("handle_workitem_freefile: inodedep survived"); 3387 #endif 3388 fs->fs_pendinginodes -= 1; 3389 if ((error = ffs_freefile(fs, freefile->fx_devvp, freefile->fx_oldinum, 3390 freefile->fx_mode)) != 0) 3391 softdep_error("handle_workitem_freefile", error); 3392 WORKITEM_FREE(freefile, D_FREEFILE); 3393 } 3394 3395 /* 3396 * Disk writes. 3397 * 3398 * The dependency structures constructed above are most actively used when file 3399 * system blocks are written to disk. No constraints are placed on when a 3400 * block can be written, but unsatisfied update dependencies are made safe by 3401 * modifying (or replacing) the source memory for the duration of the disk 3402 * write. When the disk write completes, the memory block is again brought 3403 * up-to-date. 3404 * 3405 * In-core inode structure reclamation. 3406 * 3407 * Because there are a finite number of "in-core" inode structures, they are 3408 * reused regularly. By transferring all inode-related dependencies to the 3409 * in-memory inode block and indexing them separately (via "inodedep"s), we 3410 * can allow "in-core" inode structures to be reused at any time and avoid 3411 * any increase in contention. 3412 * 3413 * Called just before entering the device driver to initiate a new disk I/O. 3414 * The buffer must be locked, thus, no I/O completion operations can occur 3415 * while we are manipulating its associated dependencies. 3416 */ 3417 static void 3418 softdep_disk_io_initiation(bp) 3419 struct buf *bp; /* structure describing disk write to occur */ 3420 { 3421 struct worklist *wk, *nextwk; 3422 struct indirdep *indirdep; 3423 struct inodedep *inodedep; 3424 3425 /* 3426 * We only care about write operations. There should never 3427 * be dependencies for reads. 3428 */ 3429 if (bp->b_iocmd == BIO_READ) 3430 panic("softdep_disk_io_initiation: read"); 3431 /* 3432 * Do any necessary pre-I/O processing. 3433 */ 3434 for (wk = LIST_FIRST(&bp->b_dep); wk; wk = nextwk) { 3435 nextwk = LIST_NEXT(wk, wk_list); 3436 switch (wk->wk_type) { 3437 3438 case D_PAGEDEP: 3439 initiate_write_filepage(WK_PAGEDEP(wk), bp); 3440 continue; 3441 3442 case D_INODEDEP: 3443 inodedep = WK_INODEDEP(wk); 3444 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 3445 initiate_write_inodeblock_ufs1(inodedep, bp); 3446 else 3447 initiate_write_inodeblock_ufs2(inodedep, bp); 3448 continue; 3449 3450 case D_INDIRDEP: 3451 indirdep = WK_INDIRDEP(wk); 3452 if (indirdep->ir_state & GOINGAWAY) 3453 panic("disk_io_initiation: indirdep gone"); 3454 /* 3455 * If there are no remaining dependencies, this 3456 * will be writing the real pointers, so the 3457 * dependency can be freed. 3458 */ 3459 if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) { 3460 indirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE; 3461 brelse(indirdep->ir_savebp); 3462 /* inline expand WORKLIST_REMOVE(wk); */ 3463 wk->wk_state &= ~ONWORKLIST; 3464 LIST_REMOVE(wk, wk_list); 3465 WORKITEM_FREE(indirdep, D_INDIRDEP); 3466 continue; 3467 } 3468 /* 3469 * Replace up-to-date version with safe version. 3470 */ 3471 MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount, 3472 M_INDIRDEP, M_SOFTDEP_FLAGS); 3473 ACQUIRE_LOCK(&lk); 3474 indirdep->ir_state &= ~ATTACHED; 3475 indirdep->ir_state |= UNDONE; 3476 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 3477 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 3478 bp->b_bcount); 3479 FREE_LOCK(&lk); 3480 continue; 3481 3482 case D_MKDIR: 3483 case D_BMSAFEMAP: 3484 case D_ALLOCDIRECT: 3485 case D_ALLOCINDIR: 3486 continue; 3487 3488 default: 3489 panic("handle_disk_io_initiation: Unexpected type %s", 3490 TYPENAME(wk->wk_type)); 3491 /* NOTREACHED */ 3492 } 3493 } 3494 } 3495 3496 /* 3497 * Called from within the procedure above to deal with unsatisfied 3498 * allocation dependencies in a directory. The buffer must be locked, 3499 * thus, no I/O completion operations can occur while we are 3500 * manipulating its associated dependencies. 3501 */ 3502 static void 3503 initiate_write_filepage(pagedep, bp) 3504 struct pagedep *pagedep; 3505 struct buf *bp; 3506 { 3507 struct diradd *dap; 3508 struct direct *ep; 3509 int i; 3510 3511 if (pagedep->pd_state & IOSTARTED) { 3512 /* 3513 * This can only happen if there is a driver that does not 3514 * understand chaining. Here biodone will reissue the call 3515 * to strategy for the incomplete buffers. 3516 */ 3517 printf("initiate_write_filepage: already started\n"); 3518 return; 3519 } 3520 pagedep->pd_state |= IOSTARTED; 3521 ACQUIRE_LOCK(&lk); 3522 for (i = 0; i < DAHASHSZ; i++) { 3523 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 3524 ep = (struct direct *) 3525 ((char *)bp->b_data + dap->da_offset); 3526 if (ep->d_ino != dap->da_newinum) { 3527 FREE_LOCK(&lk); 3528 panic("%s: dir inum %d != new %d", 3529 "initiate_write_filepage", 3530 ep->d_ino, dap->da_newinum); 3531 } 3532 if (dap->da_state & DIRCHG) 3533 ep->d_ino = dap->da_previous->dm_oldinum; 3534 else 3535 ep->d_ino = 0; 3536 dap->da_state &= ~ATTACHED; 3537 dap->da_state |= UNDONE; 3538 } 3539 } 3540 FREE_LOCK(&lk); 3541 } 3542 3543 /* 3544 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 3545 * Note that any bug fixes made to this routine must be done in the 3546 * version found below. 3547 * 3548 * Called from within the procedure above to deal with unsatisfied 3549 * allocation dependencies in an inodeblock. The buffer must be 3550 * locked, thus, no I/O completion operations can occur while we 3551 * are manipulating its associated dependencies. 3552 */ 3553 static void 3554 initiate_write_inodeblock_ufs1(inodedep, bp) 3555 struct inodedep *inodedep; 3556 struct buf *bp; /* The inode block */ 3557 { 3558 struct allocdirect *adp, *lastadp; 3559 struct ufs1_dinode *dp; 3560 struct fs *fs; 3561 ufs_lbn_t i, prevlbn = 0; 3562 int deplist; 3563 3564 if (inodedep->id_state & IOSTARTED) 3565 panic("initiate_write_inodeblock_ufs1: already started"); 3566 inodedep->id_state |= IOSTARTED; 3567 fs = inodedep->id_fs; 3568 dp = (struct ufs1_dinode *)bp->b_data + 3569 ino_to_fsbo(fs, inodedep->id_ino); 3570 /* 3571 * If the bitmap is not yet written, then the allocated 3572 * inode cannot be written to disk. 3573 */ 3574 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3575 if (inodedep->id_savedino1 != NULL) 3576 panic("initiate_write_inodeblock_ufs1: I/O underway"); 3577 MALLOC(inodedep->id_savedino1, struct ufs1_dinode *, 3578 sizeof(struct ufs1_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3579 *inodedep->id_savedino1 = *dp; 3580 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 3581 return; 3582 } 3583 /* 3584 * If no dependencies, then there is nothing to roll back. 3585 */ 3586 inodedep->id_savedsize = dp->di_size; 3587 inodedep->id_savedextsize = 0; 3588 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL) 3589 return; 3590 /* 3591 * Set the dependencies to busy. 3592 */ 3593 ACQUIRE_LOCK(&lk); 3594 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3595 adp = TAILQ_NEXT(adp, ad_next)) { 3596 #ifdef DIAGNOSTIC 3597 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3598 FREE_LOCK(&lk); 3599 panic("softdep_write_inodeblock: lbn order"); 3600 } 3601 prevlbn = adp->ad_lbn; 3602 if (adp->ad_lbn < NDADDR && 3603 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3604 FREE_LOCK(&lk); 3605 panic("%s: direct pointer #%jd mismatch %d != %jd", 3606 "softdep_write_inodeblock", 3607 (intmax_t)adp->ad_lbn, 3608 dp->di_db[adp->ad_lbn], 3609 (intmax_t)adp->ad_newblkno); 3610 } 3611 if (adp->ad_lbn >= NDADDR && 3612 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3613 FREE_LOCK(&lk); 3614 panic("%s: indirect pointer #%jd mismatch %d != %jd", 3615 "softdep_write_inodeblock", 3616 (intmax_t)adp->ad_lbn - NDADDR, 3617 dp->di_ib[adp->ad_lbn - NDADDR], 3618 (intmax_t)adp->ad_newblkno); 3619 } 3620 deplist |= 1 << adp->ad_lbn; 3621 if ((adp->ad_state & ATTACHED) == 0) { 3622 FREE_LOCK(&lk); 3623 panic("softdep_write_inodeblock: Unknown state 0x%x", 3624 adp->ad_state); 3625 } 3626 #endif /* DIAGNOSTIC */ 3627 adp->ad_state &= ~ATTACHED; 3628 adp->ad_state |= UNDONE; 3629 } 3630 /* 3631 * The on-disk inode cannot claim to be any larger than the last 3632 * fragment that has been written. Otherwise, the on-disk inode 3633 * might have fragments that were not the last block in the file 3634 * which would corrupt the filesystem. 3635 */ 3636 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3637 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3638 if (adp->ad_lbn >= NDADDR) 3639 break; 3640 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3641 /* keep going until hitting a rollback to a frag */ 3642 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3643 continue; 3644 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3645 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3646 #ifdef DIAGNOSTIC 3647 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3648 FREE_LOCK(&lk); 3649 panic("softdep_write_inodeblock: lost dep1"); 3650 } 3651 #endif /* DIAGNOSTIC */ 3652 dp->di_db[i] = 0; 3653 } 3654 for (i = 0; i < NIADDR; i++) { 3655 #ifdef DIAGNOSTIC 3656 if (dp->di_ib[i] != 0 && 3657 (deplist & ((1 << NDADDR) << i)) == 0) { 3658 FREE_LOCK(&lk); 3659 panic("softdep_write_inodeblock: lost dep2"); 3660 } 3661 #endif /* DIAGNOSTIC */ 3662 dp->di_ib[i] = 0; 3663 } 3664 FREE_LOCK(&lk); 3665 return; 3666 } 3667 /* 3668 * If we have zero'ed out the last allocated block of the file, 3669 * roll back the size to the last currently allocated block. 3670 * We know that this last allocated block is a full-sized as 3671 * we already checked for fragments in the loop above. 3672 */ 3673 if (lastadp != NULL && 3674 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3675 for (i = lastadp->ad_lbn; i >= 0; i--) 3676 if (dp->di_db[i] != 0) 3677 break; 3678 dp->di_size = (i + 1) * fs->fs_bsize; 3679 } 3680 /* 3681 * The only dependencies are for indirect blocks. 3682 * 3683 * The file size for indirect block additions is not guaranteed. 3684 * Such a guarantee would be non-trivial to achieve. The conventional 3685 * synchronous write implementation also does not make this guarantee. 3686 * Fsck should catch and fix discrepancies. Arguably, the file size 3687 * can be over-estimated without destroying integrity when the file 3688 * moves into the indirect blocks (i.e., is large). If we want to 3689 * postpone fsck, we are stuck with this argument. 3690 */ 3691 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3692 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3693 FREE_LOCK(&lk); 3694 } 3695 3696 /* 3697 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 3698 * Note that any bug fixes made to this routine must be done in the 3699 * version found above. 3700 * 3701 * Called from within the procedure above to deal with unsatisfied 3702 * allocation dependencies in an inodeblock. The buffer must be 3703 * locked, thus, no I/O completion operations can occur while we 3704 * are manipulating its associated dependencies. 3705 */ 3706 static void 3707 initiate_write_inodeblock_ufs2(inodedep, bp) 3708 struct inodedep *inodedep; 3709 struct buf *bp; /* The inode block */ 3710 { 3711 struct allocdirect *adp, *lastadp; 3712 struct ufs2_dinode *dp; 3713 struct fs *fs; 3714 ufs_lbn_t i, prevlbn = 0; 3715 int deplist; 3716 3717 if (inodedep->id_state & IOSTARTED) 3718 panic("initiate_write_inodeblock_ufs2: already started"); 3719 inodedep->id_state |= IOSTARTED; 3720 fs = inodedep->id_fs; 3721 dp = (struct ufs2_dinode *)bp->b_data + 3722 ino_to_fsbo(fs, inodedep->id_ino); 3723 /* 3724 * If the bitmap is not yet written, then the allocated 3725 * inode cannot be written to disk. 3726 */ 3727 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 3728 if (inodedep->id_savedino2 != NULL) 3729 panic("initiate_write_inodeblock_ufs2: I/O underway"); 3730 MALLOC(inodedep->id_savedino2, struct ufs2_dinode *, 3731 sizeof(struct ufs2_dinode), M_INODEDEP, M_SOFTDEP_FLAGS); 3732 *inodedep->id_savedino2 = *dp; 3733 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 3734 return; 3735 } 3736 /* 3737 * If no dependencies, then there is nothing to roll back. 3738 */ 3739 inodedep->id_savedsize = dp->di_size; 3740 inodedep->id_savedextsize = dp->di_extsize; 3741 if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL && 3742 TAILQ_FIRST(&inodedep->id_extupdt) == NULL) 3743 return; 3744 /* 3745 * Set the ext data dependencies to busy. 3746 */ 3747 ACQUIRE_LOCK(&lk); 3748 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 3749 adp = TAILQ_NEXT(adp, ad_next)) { 3750 #ifdef DIAGNOSTIC 3751 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3752 FREE_LOCK(&lk); 3753 panic("softdep_write_inodeblock: lbn order"); 3754 } 3755 prevlbn = adp->ad_lbn; 3756 if (dp->di_extb[adp->ad_lbn] != adp->ad_newblkno) { 3757 FREE_LOCK(&lk); 3758 panic("%s: direct pointer #%jd mismatch %jd != %jd", 3759 "softdep_write_inodeblock", 3760 (intmax_t)adp->ad_lbn, 3761 (intmax_t)dp->di_extb[adp->ad_lbn], 3762 (intmax_t)adp->ad_newblkno); 3763 } 3764 deplist |= 1 << adp->ad_lbn; 3765 if ((adp->ad_state & ATTACHED) == 0) { 3766 FREE_LOCK(&lk); 3767 panic("softdep_write_inodeblock: Unknown state 0x%x", 3768 adp->ad_state); 3769 } 3770 #endif /* DIAGNOSTIC */ 3771 adp->ad_state &= ~ATTACHED; 3772 adp->ad_state |= UNDONE; 3773 } 3774 /* 3775 * The on-disk inode cannot claim to be any larger than the last 3776 * fragment that has been written. Otherwise, the on-disk inode 3777 * might have fragments that were not the last block in the ext 3778 * data which would corrupt the filesystem. 3779 */ 3780 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 3781 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3782 dp->di_extb[adp->ad_lbn] = adp->ad_oldblkno; 3783 /* keep going until hitting a rollback to a frag */ 3784 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3785 continue; 3786 dp->di_extsize = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3787 for (i = adp->ad_lbn + 1; i < NXADDR; i++) { 3788 #ifdef DIAGNOSTIC 3789 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) { 3790 FREE_LOCK(&lk); 3791 panic("softdep_write_inodeblock: lost dep1"); 3792 } 3793 #endif /* DIAGNOSTIC */ 3794 dp->di_extb[i] = 0; 3795 } 3796 lastadp = NULL; 3797 break; 3798 } 3799 /* 3800 * If we have zero'ed out the last allocated block of the ext 3801 * data, roll back the size to the last currently allocated block. 3802 * We know that this last allocated block is a full-sized as 3803 * we already checked for fragments in the loop above. 3804 */ 3805 if (lastadp != NULL && 3806 dp->di_extsize <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3807 for (i = lastadp->ad_lbn; i >= 0; i--) 3808 if (dp->di_extb[i] != 0) 3809 break; 3810 dp->di_extsize = (i + 1) * fs->fs_bsize; 3811 } 3812 /* 3813 * Set the file data dependencies to busy. 3814 */ 3815 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3816 adp = TAILQ_NEXT(adp, ad_next)) { 3817 #ifdef DIAGNOSTIC 3818 if (deplist != 0 && prevlbn >= adp->ad_lbn) { 3819 FREE_LOCK(&lk); 3820 panic("softdep_write_inodeblock: lbn order"); 3821 } 3822 prevlbn = adp->ad_lbn; 3823 if (adp->ad_lbn < NDADDR && 3824 dp->di_db[adp->ad_lbn] != adp->ad_newblkno) { 3825 FREE_LOCK(&lk); 3826 panic("%s: direct pointer #%jd mismatch %jd != %jd", 3827 "softdep_write_inodeblock", 3828 (intmax_t)adp->ad_lbn, 3829 (intmax_t)dp->di_db[adp->ad_lbn], 3830 (intmax_t)adp->ad_newblkno); 3831 } 3832 if (adp->ad_lbn >= NDADDR && 3833 dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno) { 3834 FREE_LOCK(&lk); 3835 panic("%s indirect pointer #%jd mismatch %jd != %jd", 3836 "softdep_write_inodeblock:", 3837 (intmax_t)adp->ad_lbn - NDADDR, 3838 (intmax_t)dp->di_ib[adp->ad_lbn - NDADDR], 3839 (intmax_t)adp->ad_newblkno); 3840 } 3841 deplist |= 1 << adp->ad_lbn; 3842 if ((adp->ad_state & ATTACHED) == 0) { 3843 FREE_LOCK(&lk); 3844 panic("softdep_write_inodeblock: Unknown state 0x%x", 3845 adp->ad_state); 3846 } 3847 #endif /* DIAGNOSTIC */ 3848 adp->ad_state &= ~ATTACHED; 3849 adp->ad_state |= UNDONE; 3850 } 3851 /* 3852 * The on-disk inode cannot claim to be any larger than the last 3853 * fragment that has been written. Otherwise, the on-disk inode 3854 * might have fragments that were not the last block in the file 3855 * which would corrupt the filesystem. 3856 */ 3857 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 3858 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 3859 if (adp->ad_lbn >= NDADDR) 3860 break; 3861 dp->di_db[adp->ad_lbn] = adp->ad_oldblkno; 3862 /* keep going until hitting a rollback to a frag */ 3863 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 3864 continue; 3865 dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize; 3866 for (i = adp->ad_lbn + 1; i < NDADDR; i++) { 3867 #ifdef DIAGNOSTIC 3868 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) { 3869 FREE_LOCK(&lk); 3870 panic("softdep_write_inodeblock: lost dep2"); 3871 } 3872 #endif /* DIAGNOSTIC */ 3873 dp->di_db[i] = 0; 3874 } 3875 for (i = 0; i < NIADDR; i++) { 3876 #ifdef DIAGNOSTIC 3877 if (dp->di_ib[i] != 0 && 3878 (deplist & ((1 << NDADDR) << i)) == 0) { 3879 FREE_LOCK(&lk); 3880 panic("softdep_write_inodeblock: lost dep3"); 3881 } 3882 #endif /* DIAGNOSTIC */ 3883 dp->di_ib[i] = 0; 3884 } 3885 FREE_LOCK(&lk); 3886 return; 3887 } 3888 /* 3889 * If we have zero'ed out the last allocated block of the file, 3890 * roll back the size to the last currently allocated block. 3891 * We know that this last allocated block is a full-sized as 3892 * we already checked for fragments in the loop above. 3893 */ 3894 if (lastadp != NULL && 3895 dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) { 3896 for (i = lastadp->ad_lbn; i >= 0; i--) 3897 if (dp->di_db[i] != 0) 3898 break; 3899 dp->di_size = (i + 1) * fs->fs_bsize; 3900 } 3901 /* 3902 * The only dependencies are for indirect blocks. 3903 * 3904 * The file size for indirect block additions is not guaranteed. 3905 * Such a guarantee would be non-trivial to achieve. The conventional 3906 * synchronous write implementation also does not make this guarantee. 3907 * Fsck should catch and fix discrepancies. Arguably, the file size 3908 * can be over-estimated without destroying integrity when the file 3909 * moves into the indirect blocks (i.e., is large). If we want to 3910 * postpone fsck, we are stuck with this argument. 3911 */ 3912 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 3913 dp->di_ib[adp->ad_lbn - NDADDR] = 0; 3914 FREE_LOCK(&lk); 3915 } 3916 3917 /* 3918 * This routine is called during the completion interrupt 3919 * service routine for a disk write (from the procedure called 3920 * by the device driver to inform the filesystem caches of 3921 * a request completion). It should be called early in this 3922 * procedure, before the block is made available to other 3923 * processes or other routines are called. 3924 */ 3925 static void 3926 softdep_disk_write_complete(bp) 3927 struct buf *bp; /* describes the completed disk write */ 3928 { 3929 struct worklist *wk; 3930 struct workhead reattach; 3931 struct newblk *newblk; 3932 struct allocindir *aip; 3933 struct allocdirect *adp; 3934 struct indirdep *indirdep; 3935 struct inodedep *inodedep; 3936 struct bmsafemap *bmsafemap; 3937 3938 #ifdef DEBUG 3939 if (lk.lkt_held != NOHOLDER) 3940 panic("softdep_disk_write_complete: lock is held"); 3941 lk.lkt_held = SPECIAL_FLAG; 3942 #endif 3943 LIST_INIT(&reattach); 3944 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 3945 WORKLIST_REMOVE(wk); 3946 switch (wk->wk_type) { 3947 3948 case D_PAGEDEP: 3949 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 3950 WORKLIST_INSERT(&reattach, wk); 3951 continue; 3952 3953 case D_INODEDEP: 3954 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 3955 WORKLIST_INSERT(&reattach, wk); 3956 continue; 3957 3958 case D_BMSAFEMAP: 3959 bmsafemap = WK_BMSAFEMAP(wk); 3960 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) { 3961 newblk->nb_state |= DEPCOMPLETE; 3962 newblk->nb_bmsafemap = NULL; 3963 LIST_REMOVE(newblk, nb_deps); 3964 } 3965 while ((adp = 3966 LIST_FIRST(&bmsafemap->sm_allocdirecthd))) { 3967 adp->ad_state |= DEPCOMPLETE; 3968 adp->ad_buf = NULL; 3969 LIST_REMOVE(adp, ad_deps); 3970 handle_allocdirect_partdone(adp); 3971 } 3972 while ((aip = 3973 LIST_FIRST(&bmsafemap->sm_allocindirhd))) { 3974 aip->ai_state |= DEPCOMPLETE; 3975 aip->ai_buf = NULL; 3976 LIST_REMOVE(aip, ai_deps); 3977 handle_allocindir_partdone(aip); 3978 } 3979 while ((inodedep = 3980 LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) { 3981 inodedep->id_state |= DEPCOMPLETE; 3982 LIST_REMOVE(inodedep, id_deps); 3983 inodedep->id_buf = NULL; 3984 } 3985 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 3986 continue; 3987 3988 case D_MKDIR: 3989 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 3990 continue; 3991 3992 case D_ALLOCDIRECT: 3993 adp = WK_ALLOCDIRECT(wk); 3994 adp->ad_state |= COMPLETE; 3995 handle_allocdirect_partdone(adp); 3996 continue; 3997 3998 case D_ALLOCINDIR: 3999 aip = WK_ALLOCINDIR(wk); 4000 aip->ai_state |= COMPLETE; 4001 handle_allocindir_partdone(aip); 4002 continue; 4003 4004 case D_INDIRDEP: 4005 indirdep = WK_INDIRDEP(wk); 4006 if (indirdep->ir_state & GOINGAWAY) { 4007 lk.lkt_held = NOHOLDER; 4008 panic("disk_write_complete: indirdep gone"); 4009 } 4010 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 4011 FREE(indirdep->ir_saveddata, M_INDIRDEP); 4012 indirdep->ir_saveddata = 0; 4013 indirdep->ir_state &= ~UNDONE; 4014 indirdep->ir_state |= ATTACHED; 4015 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 4016 handle_allocindir_partdone(aip); 4017 if (aip == LIST_FIRST(&indirdep->ir_donehd)) { 4018 lk.lkt_held = NOHOLDER; 4019 panic("disk_write_complete: not gone"); 4020 } 4021 } 4022 WORKLIST_INSERT(&reattach, wk); 4023 if ((bp->b_flags & B_DELWRI) == 0) 4024 stat_indir_blk_ptrs++; 4025 bdirty(bp); 4026 continue; 4027 4028 default: 4029 lk.lkt_held = NOHOLDER; 4030 panic("handle_disk_write_complete: Unknown type %s", 4031 TYPENAME(wk->wk_type)); 4032 /* NOTREACHED */ 4033 } 4034 } 4035 /* 4036 * Reattach any requests that must be redone. 4037 */ 4038 while ((wk = LIST_FIRST(&reattach)) != NULL) { 4039 WORKLIST_REMOVE(wk); 4040 WORKLIST_INSERT(&bp->b_dep, wk); 4041 } 4042 #ifdef DEBUG 4043 if (lk.lkt_held != SPECIAL_FLAG) 4044 panic("softdep_disk_write_complete: lock lost"); 4045 lk.lkt_held = NOHOLDER; 4046 #endif 4047 } 4048 4049 /* 4050 * Called from within softdep_disk_write_complete above. Note that 4051 * this routine is always called from interrupt level with further 4052 * splbio interrupts blocked. 4053 */ 4054 static void 4055 handle_allocdirect_partdone(adp) 4056 struct allocdirect *adp; /* the completed allocdirect */ 4057 { 4058 struct allocdirectlst *listhead; 4059 struct allocdirect *listadp; 4060 struct inodedep *inodedep; 4061 long bsize, delay; 4062 4063 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 4064 return; 4065 if (adp->ad_buf != NULL) { 4066 lk.lkt_held = NOHOLDER; 4067 panic("handle_allocdirect_partdone: dangling dep"); 4068 } 4069 /* 4070 * The on-disk inode cannot claim to be any larger than the last 4071 * fragment that has been written. Otherwise, the on-disk inode 4072 * might have fragments that were not the last block in the file 4073 * which would corrupt the filesystem. Thus, we cannot free any 4074 * allocdirects after one whose ad_oldblkno claims a fragment as 4075 * these blocks must be rolled back to zero before writing the inode. 4076 * We check the currently active set of allocdirects in id_inoupdt 4077 * or id_extupdt as appropriate. 4078 */ 4079 inodedep = adp->ad_inodedep; 4080 bsize = inodedep->id_fs->fs_bsize; 4081 if (adp->ad_state & EXTDATA) 4082 listhead = &inodedep->id_extupdt; 4083 else 4084 listhead = &inodedep->id_inoupdt; 4085 TAILQ_FOREACH(listadp, listhead, ad_next) { 4086 /* found our block */ 4087 if (listadp == adp) 4088 break; 4089 /* continue if ad_oldlbn is not a fragment */ 4090 if (listadp->ad_oldsize == 0 || 4091 listadp->ad_oldsize == bsize) 4092 continue; 4093 /* hit a fragment */ 4094 return; 4095 } 4096 /* 4097 * If we have reached the end of the current list without 4098 * finding the just finished dependency, then it must be 4099 * on the future dependency list. Future dependencies cannot 4100 * be freed until they are moved to the current list. 4101 */ 4102 if (listadp == NULL) { 4103 #ifdef DEBUG 4104 if (adp->ad_state & EXTDATA) 4105 listhead = &inodedep->id_newextupdt; 4106 else 4107 listhead = &inodedep->id_newinoupdt; 4108 TAILQ_FOREACH(listadp, listhead, ad_next) 4109 /* found our block */ 4110 if (listadp == adp) 4111 break; 4112 if (listadp == NULL) { 4113 lk.lkt_held = NOHOLDER; 4114 panic("handle_allocdirect_partdone: lost dep"); 4115 } 4116 #endif /* DEBUG */ 4117 return; 4118 } 4119 /* 4120 * If we have found the just finished dependency, then free 4121 * it along with anything that follows it that is complete. 4122 * If the inode still has a bitmap dependency, then it has 4123 * never been written to disk, hence the on-disk inode cannot 4124 * reference the old fragment so we can free it without delay. 4125 */ 4126 delay = (inodedep->id_state & DEPCOMPLETE); 4127 for (; adp; adp = listadp) { 4128 listadp = TAILQ_NEXT(adp, ad_next); 4129 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 4130 return; 4131 free_allocdirect(listhead, adp, delay); 4132 } 4133 } 4134 4135 /* 4136 * Called from within softdep_disk_write_complete above. Note that 4137 * this routine is always called from interrupt level with further 4138 * splbio interrupts blocked. 4139 */ 4140 static void 4141 handle_allocindir_partdone(aip) 4142 struct allocindir *aip; /* the completed allocindir */ 4143 { 4144 struct indirdep *indirdep; 4145 4146 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 4147 return; 4148 if (aip->ai_buf != NULL) { 4149 lk.lkt_held = NOHOLDER; 4150 panic("handle_allocindir_partdone: dangling dependency"); 4151 } 4152 indirdep = aip->ai_indirdep; 4153 if (indirdep->ir_state & UNDONE) { 4154 LIST_REMOVE(aip, ai_next); 4155 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 4156 return; 4157 } 4158 if (indirdep->ir_state & UFS1FMT) 4159 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 4160 aip->ai_newblkno; 4161 else 4162 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 4163 aip->ai_newblkno; 4164 LIST_REMOVE(aip, ai_next); 4165 if (aip->ai_freefrag != NULL) 4166 add_to_worklist(&aip->ai_freefrag->ff_list); 4167 WORKITEM_FREE(aip, D_ALLOCINDIR); 4168 } 4169 4170 /* 4171 * Called from within softdep_disk_write_complete above to restore 4172 * in-memory inode block contents to their most up-to-date state. Note 4173 * that this routine is always called from interrupt level with further 4174 * splbio interrupts blocked. 4175 */ 4176 static int 4177 handle_written_inodeblock(inodedep, bp) 4178 struct inodedep *inodedep; 4179 struct buf *bp; /* buffer containing the inode block */ 4180 { 4181 struct worklist *wk, *filefree; 4182 struct allocdirect *adp, *nextadp; 4183 struct ufs1_dinode *dp1 = NULL; 4184 struct ufs2_dinode *dp2 = NULL; 4185 int hadchanges, fstype; 4186 4187 if ((inodedep->id_state & IOSTARTED) == 0) { 4188 lk.lkt_held = NOHOLDER; 4189 panic("handle_written_inodeblock: not started"); 4190 } 4191 inodedep->id_state &= ~IOSTARTED; 4192 inodedep->id_state |= COMPLETE; 4193 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 4194 fstype = UFS1; 4195 dp1 = (struct ufs1_dinode *)bp->b_data + 4196 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 4197 } else { 4198 fstype = UFS2; 4199 dp2 = (struct ufs2_dinode *)bp->b_data + 4200 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 4201 } 4202 /* 4203 * If we had to rollback the inode allocation because of 4204 * bitmaps being incomplete, then simply restore it. 4205 * Keep the block dirty so that it will not be reclaimed until 4206 * all associated dependencies have been cleared and the 4207 * corresponding updates written to disk. 4208 */ 4209 if (inodedep->id_savedino1 != NULL) { 4210 if (fstype == UFS1) 4211 *dp1 = *inodedep->id_savedino1; 4212 else 4213 *dp2 = *inodedep->id_savedino2; 4214 FREE(inodedep->id_savedino1, M_INODEDEP); 4215 inodedep->id_savedino1 = NULL; 4216 if ((bp->b_flags & B_DELWRI) == 0) 4217 stat_inode_bitmap++; 4218 bdirty(bp); 4219 return (1); 4220 } 4221 /* 4222 * Roll forward anything that had to be rolled back before 4223 * the inode could be updated. 4224 */ 4225 hadchanges = 0; 4226 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 4227 nextadp = TAILQ_NEXT(adp, ad_next); 4228 if (adp->ad_state & ATTACHED) { 4229 lk.lkt_held = NOHOLDER; 4230 panic("handle_written_inodeblock: new entry"); 4231 } 4232 if (fstype == UFS1) { 4233 if (adp->ad_lbn < NDADDR) { 4234 if (dp1->di_db[adp->ad_lbn]!=adp->ad_oldblkno) { 4235 lk.lkt_held = NOHOLDER; 4236 panic("%s %s #%jd mismatch %d != %jd", 4237 "handle_written_inodeblock:", 4238 "direct pointer", 4239 (intmax_t)adp->ad_lbn, 4240 dp1->di_db[adp->ad_lbn], 4241 (intmax_t)adp->ad_oldblkno); 4242 } 4243 dp1->di_db[adp->ad_lbn] = adp->ad_newblkno; 4244 } else { 4245 if (dp1->di_ib[adp->ad_lbn - NDADDR] != 0) { 4246 lk.lkt_held = NOHOLDER; 4247 panic("%s: %s #%jd allocated as %d", 4248 "handle_written_inodeblock", 4249 "indirect pointer", 4250 (intmax_t)adp->ad_lbn - NDADDR, 4251 dp1->di_ib[adp->ad_lbn - NDADDR]); 4252 } 4253 dp1->di_ib[adp->ad_lbn - NDADDR] = 4254 adp->ad_newblkno; 4255 } 4256 } else { 4257 if (adp->ad_lbn < NDADDR) { 4258 if (dp2->di_db[adp->ad_lbn]!=adp->ad_oldblkno) { 4259 lk.lkt_held = NOHOLDER; 4260 panic("%s: %s #%jd %s %jd != %jd", 4261 "handle_written_inodeblock", 4262 "direct pointer", 4263 (intmax_t)adp->ad_lbn, "mismatch", 4264 (intmax_t)dp2->di_db[adp->ad_lbn], 4265 (intmax_t)adp->ad_oldblkno); 4266 } 4267 dp2->di_db[adp->ad_lbn] = adp->ad_newblkno; 4268 } else { 4269 if (dp2->di_ib[adp->ad_lbn - NDADDR] != 0) { 4270 lk.lkt_held = NOHOLDER; 4271 panic("%s: %s #%jd allocated as %jd", 4272 "handle_written_inodeblock", 4273 "indirect pointer", 4274 (intmax_t)adp->ad_lbn - NDADDR, 4275 (intmax_t) 4276 dp2->di_ib[adp->ad_lbn - NDADDR]); 4277 } 4278 dp2->di_ib[adp->ad_lbn - NDADDR] = 4279 adp->ad_newblkno; 4280 } 4281 } 4282 adp->ad_state &= ~UNDONE; 4283 adp->ad_state |= ATTACHED; 4284 hadchanges = 1; 4285 } 4286 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 4287 nextadp = TAILQ_NEXT(adp, ad_next); 4288 if (adp->ad_state & ATTACHED) { 4289 lk.lkt_held = NOHOLDER; 4290 panic("handle_written_inodeblock: new entry"); 4291 } 4292 if (dp2->di_extb[adp->ad_lbn] != adp->ad_oldblkno) { 4293 lk.lkt_held = NOHOLDER; 4294 panic("%s: direct pointers #%jd %s %jd != %jd", 4295 "handle_written_inodeblock", 4296 (intmax_t)adp->ad_lbn, "mismatch", 4297 (intmax_t)dp2->di_extb[adp->ad_lbn], 4298 (intmax_t)adp->ad_oldblkno); 4299 } 4300 dp2->di_extb[adp->ad_lbn] = adp->ad_newblkno; 4301 adp->ad_state &= ~UNDONE; 4302 adp->ad_state |= ATTACHED; 4303 hadchanges = 1; 4304 } 4305 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 4306 stat_direct_blk_ptrs++; 4307 /* 4308 * Reset the file size to its most up-to-date value. 4309 */ 4310 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) { 4311 lk.lkt_held = NOHOLDER; 4312 panic("handle_written_inodeblock: bad size"); 4313 } 4314 if (fstype == UFS1) { 4315 if (dp1->di_size != inodedep->id_savedsize) { 4316 dp1->di_size = inodedep->id_savedsize; 4317 hadchanges = 1; 4318 } 4319 } else { 4320 if (dp2->di_size != inodedep->id_savedsize) { 4321 dp2->di_size = inodedep->id_savedsize; 4322 hadchanges = 1; 4323 } 4324 if (dp2->di_extsize != inodedep->id_savedextsize) { 4325 dp2->di_extsize = inodedep->id_savedextsize; 4326 hadchanges = 1; 4327 } 4328 } 4329 inodedep->id_savedsize = -1; 4330 inodedep->id_savedextsize = -1; 4331 /* 4332 * If there were any rollbacks in the inode block, then it must be 4333 * marked dirty so that its will eventually get written back in 4334 * its correct form. 4335 */ 4336 if (hadchanges) 4337 bdirty(bp); 4338 /* 4339 * Process any allocdirects that completed during the update. 4340 */ 4341 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 4342 handle_allocdirect_partdone(adp); 4343 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 4344 handle_allocdirect_partdone(adp); 4345 /* 4346 * Process deallocations that were held pending until the 4347 * inode had been written to disk. Freeing of the inode 4348 * is delayed until after all blocks have been freed to 4349 * avoid creation of new <vfsid, inum, lbn> triples 4350 * before the old ones have been deleted. 4351 */ 4352 filefree = NULL; 4353 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 4354 WORKLIST_REMOVE(wk); 4355 switch (wk->wk_type) { 4356 4357 case D_FREEFILE: 4358 /* 4359 * We defer adding filefree to the worklist until 4360 * all other additions have been made to ensure 4361 * that it will be done after all the old blocks 4362 * have been freed. 4363 */ 4364 if (filefree != NULL) { 4365 lk.lkt_held = NOHOLDER; 4366 panic("handle_written_inodeblock: filefree"); 4367 } 4368 filefree = wk; 4369 continue; 4370 4371 case D_MKDIR: 4372 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 4373 continue; 4374 4375 case D_DIRADD: 4376 diradd_inode_written(WK_DIRADD(wk), inodedep); 4377 continue; 4378 4379 case D_FREEBLKS: 4380 case D_FREEFRAG: 4381 case D_DIRREM: 4382 add_to_worklist(wk); 4383 continue; 4384 4385 case D_NEWDIRBLK: 4386 free_newdirblk(WK_NEWDIRBLK(wk)); 4387 continue; 4388 4389 default: 4390 lk.lkt_held = NOHOLDER; 4391 panic("handle_written_inodeblock: Unknown type %s", 4392 TYPENAME(wk->wk_type)); 4393 /* NOTREACHED */ 4394 } 4395 } 4396 if (filefree != NULL) { 4397 if (free_inodedep(inodedep) == 0) { 4398 lk.lkt_held = NOHOLDER; 4399 panic("handle_written_inodeblock: live inodedep"); 4400 } 4401 add_to_worklist(filefree); 4402 return (0); 4403 } 4404 4405 /* 4406 * If no outstanding dependencies, free it. 4407 */ 4408 if (free_inodedep(inodedep) || 4409 (TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 4410 TAILQ_FIRST(&inodedep->id_extupdt) == 0)) 4411 return (0); 4412 return (hadchanges); 4413 } 4414 4415 /* 4416 * Process a diradd entry after its dependent inode has been written. 4417 * This routine must be called with splbio interrupts blocked. 4418 */ 4419 static void 4420 diradd_inode_written(dap, inodedep) 4421 struct diradd *dap; 4422 struct inodedep *inodedep; 4423 { 4424 struct pagedep *pagedep; 4425 4426 dap->da_state |= COMPLETE; 4427 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4428 if (dap->da_state & DIRCHG) 4429 pagedep = dap->da_previous->dm_pagedep; 4430 else 4431 pagedep = dap->da_pagedep; 4432 LIST_REMOVE(dap, da_pdlist); 4433 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 4434 } 4435 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 4436 } 4437 4438 /* 4439 * Handle the completion of a mkdir dependency. 4440 */ 4441 static void 4442 handle_written_mkdir(mkdir, type) 4443 struct mkdir *mkdir; 4444 int type; 4445 { 4446 struct diradd *dap; 4447 struct pagedep *pagedep; 4448 4449 if (mkdir->md_state != type) { 4450 lk.lkt_held = NOHOLDER; 4451 panic("handle_written_mkdir: bad type"); 4452 } 4453 dap = mkdir->md_diradd; 4454 dap->da_state &= ~type; 4455 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 4456 dap->da_state |= DEPCOMPLETE; 4457 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4458 if (dap->da_state & DIRCHG) 4459 pagedep = dap->da_previous->dm_pagedep; 4460 else 4461 pagedep = dap->da_pagedep; 4462 LIST_REMOVE(dap, da_pdlist); 4463 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 4464 } 4465 LIST_REMOVE(mkdir, md_mkdirs); 4466 WORKITEM_FREE(mkdir, D_MKDIR); 4467 } 4468 4469 /* 4470 * Called from within softdep_disk_write_complete above. 4471 * A write operation was just completed. Removed inodes can 4472 * now be freed and associated block pointers may be committed. 4473 * Note that this routine is always called from interrupt level 4474 * with further splbio interrupts blocked. 4475 */ 4476 static int 4477 handle_written_filepage(pagedep, bp) 4478 struct pagedep *pagedep; 4479 struct buf *bp; /* buffer containing the written page */ 4480 { 4481 struct dirrem *dirrem; 4482 struct diradd *dap, *nextdap; 4483 struct direct *ep; 4484 int i, chgs; 4485 4486 if ((pagedep->pd_state & IOSTARTED) == 0) { 4487 lk.lkt_held = NOHOLDER; 4488 panic("handle_written_filepage: not started"); 4489 } 4490 pagedep->pd_state &= ~IOSTARTED; 4491 /* 4492 * Process any directory removals that have been committed. 4493 */ 4494 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 4495 LIST_REMOVE(dirrem, dm_next); 4496 dirrem->dm_dirinum = pagedep->pd_ino; 4497 add_to_worklist(&dirrem->dm_list); 4498 } 4499 /* 4500 * Free any directory additions that have been committed. 4501 * If it is a newly allocated block, we have to wait until 4502 * the on-disk directory inode claims the new block. 4503 */ 4504 if ((pagedep->pd_state & NEWBLOCK) == 0) 4505 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 4506 free_diradd(dap); 4507 /* 4508 * Uncommitted directory entries must be restored. 4509 */ 4510 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 4511 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 4512 dap = nextdap) { 4513 nextdap = LIST_NEXT(dap, da_pdlist); 4514 if (dap->da_state & ATTACHED) { 4515 lk.lkt_held = NOHOLDER; 4516 panic("handle_written_filepage: attached"); 4517 } 4518 ep = (struct direct *) 4519 ((char *)bp->b_data + dap->da_offset); 4520 ep->d_ino = dap->da_newinum; 4521 dap->da_state &= ~UNDONE; 4522 dap->da_state |= ATTACHED; 4523 chgs = 1; 4524 /* 4525 * If the inode referenced by the directory has 4526 * been written out, then the dependency can be 4527 * moved to the pending list. 4528 */ 4529 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 4530 LIST_REMOVE(dap, da_pdlist); 4531 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 4532 da_pdlist); 4533 } 4534 } 4535 } 4536 /* 4537 * If there were any rollbacks in the directory, then it must be 4538 * marked dirty so that its will eventually get written back in 4539 * its correct form. 4540 */ 4541 if (chgs) { 4542 if ((bp->b_flags & B_DELWRI) == 0) 4543 stat_dir_entry++; 4544 bdirty(bp); 4545 return (1); 4546 } 4547 /* 4548 * If we are not waiting for a new directory block to be 4549 * claimed by its inode, then the pagedep will be freed. 4550 * Otherwise it will remain to track any new entries on 4551 * the page in case they are fsync'ed. 4552 */ 4553 if ((pagedep->pd_state & NEWBLOCK) == 0) { 4554 LIST_REMOVE(pagedep, pd_hash); 4555 WORKITEM_FREE(pagedep, D_PAGEDEP); 4556 } 4557 return (0); 4558 } 4559 4560 /* 4561 * Writing back in-core inode structures. 4562 * 4563 * The filesystem only accesses an inode's contents when it occupies an 4564 * "in-core" inode structure. These "in-core" structures are separate from 4565 * the page frames used to cache inode blocks. Only the latter are 4566 * transferred to/from the disk. So, when the updated contents of the 4567 * "in-core" inode structure are copied to the corresponding in-memory inode 4568 * block, the dependencies are also transferred. The following procedure is 4569 * called when copying a dirty "in-core" inode to a cached inode block. 4570 */ 4571 4572 /* 4573 * Called when an inode is loaded from disk. If the effective link count 4574 * differed from the actual link count when it was last flushed, then we 4575 * need to ensure that the correct effective link count is put back. 4576 */ 4577 void 4578 softdep_load_inodeblock(ip) 4579 struct inode *ip; /* the "in_core" copy of the inode */ 4580 { 4581 struct inodedep *inodedep; 4582 4583 /* 4584 * Check for alternate nlink count. 4585 */ 4586 ip->i_effnlink = ip->i_nlink; 4587 ACQUIRE_LOCK(&lk); 4588 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4589 FREE_LOCK(&lk); 4590 return; 4591 } 4592 ip->i_effnlink -= inodedep->id_nlinkdelta; 4593 if (inodedep->id_state & SPACECOUNTED) 4594 ip->i_flag |= IN_SPACECOUNTED; 4595 FREE_LOCK(&lk); 4596 } 4597 4598 /* 4599 * This routine is called just before the "in-core" inode 4600 * information is to be copied to the in-memory inode block. 4601 * Recall that an inode block contains several inodes. If 4602 * the force flag is set, then the dependencies will be 4603 * cleared so that the update can always be made. Note that 4604 * the buffer is locked when this routine is called, so we 4605 * will never be in the middle of writing the inode block 4606 * to disk. 4607 */ 4608 void 4609 softdep_update_inodeblock(ip, bp, waitfor) 4610 struct inode *ip; /* the "in_core" copy of the inode */ 4611 struct buf *bp; /* the buffer containing the inode block */ 4612 int waitfor; /* nonzero => update must be allowed */ 4613 { 4614 struct inodedep *inodedep; 4615 struct worklist *wk; 4616 int error, gotit; 4617 4618 /* 4619 * If the effective link count is not equal to the actual link 4620 * count, then we must track the difference in an inodedep while 4621 * the inode is (potentially) tossed out of the cache. Otherwise, 4622 * if there is no existing inodedep, then there are no dependencies 4623 * to track. 4624 */ 4625 ACQUIRE_LOCK(&lk); 4626 if (inodedep_lookup(ip->i_fs, ip->i_number, 0, &inodedep) == 0) { 4627 FREE_LOCK(&lk); 4628 if (ip->i_effnlink != ip->i_nlink) 4629 panic("softdep_update_inodeblock: bad link count"); 4630 return; 4631 } 4632 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) { 4633 FREE_LOCK(&lk); 4634 panic("softdep_update_inodeblock: bad delta"); 4635 } 4636 /* 4637 * Changes have been initiated. Anything depending on these 4638 * changes cannot occur until this inode has been written. 4639 */ 4640 inodedep->id_state &= ~COMPLETE; 4641 if ((inodedep->id_state & ONWORKLIST) == 0) 4642 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 4643 /* 4644 * Any new dependencies associated with the incore inode must 4645 * now be moved to the list associated with the buffer holding 4646 * the in-memory copy of the inode. Once merged process any 4647 * allocdirects that are completed by the merger. 4648 */ 4649 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 4650 if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL) 4651 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt)); 4652 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 4653 if (TAILQ_FIRST(&inodedep->id_extupdt) != NULL) 4654 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt)); 4655 /* 4656 * Now that the inode has been pushed into the buffer, the 4657 * operations dependent on the inode being written to disk 4658 * can be moved to the id_bufwait so that they will be 4659 * processed when the buffer I/O completes. 4660 */ 4661 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 4662 WORKLIST_REMOVE(wk); 4663 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 4664 } 4665 /* 4666 * Newly allocated inodes cannot be written until the bitmap 4667 * that allocates them have been written (indicated by 4668 * DEPCOMPLETE being set in id_state). If we are doing a 4669 * forced sync (e.g., an fsync on a file), we force the bitmap 4670 * to be written so that the update can be done. 4671 */ 4672 if ((inodedep->id_state & DEPCOMPLETE) != 0 || waitfor == 0) { 4673 FREE_LOCK(&lk); 4674 return; 4675 } 4676 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 4677 FREE_LOCK(&lk); 4678 if (gotit && 4679 (error = BUF_WRITE(inodedep->id_buf)) != 0) 4680 softdep_error("softdep_update_inodeblock: bwrite", error); 4681 if ((inodedep->id_state & DEPCOMPLETE) == 0) 4682 panic("softdep_update_inodeblock: update failed"); 4683 } 4684 4685 /* 4686 * Merge the a new inode dependency list (such as id_newinoupdt) into an 4687 * old inode dependency list (such as id_inoupdt). This routine must be 4688 * called with splbio interrupts blocked. 4689 */ 4690 static void 4691 merge_inode_lists(newlisthead, oldlisthead) 4692 struct allocdirectlst *newlisthead; 4693 struct allocdirectlst *oldlisthead; 4694 { 4695 struct allocdirect *listadp, *newadp; 4696 4697 newadp = TAILQ_FIRST(newlisthead); 4698 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 4699 if (listadp->ad_lbn < newadp->ad_lbn) { 4700 listadp = TAILQ_NEXT(listadp, ad_next); 4701 continue; 4702 } 4703 TAILQ_REMOVE(newlisthead, newadp, ad_next); 4704 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 4705 if (listadp->ad_lbn == newadp->ad_lbn) { 4706 allocdirect_merge(oldlisthead, newadp, 4707 listadp); 4708 listadp = newadp; 4709 } 4710 newadp = TAILQ_FIRST(newlisthead); 4711 } 4712 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 4713 TAILQ_REMOVE(newlisthead, newadp, ad_next); 4714 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 4715 } 4716 } 4717 4718 /* 4719 * If we are doing an fsync, then we must ensure that any directory 4720 * entries for the inode have been written after the inode gets to disk. 4721 */ 4722 int 4723 softdep_fsync(vp) 4724 struct vnode *vp; /* the "in_core" copy of the inode */ 4725 { 4726 struct inodedep *inodedep; 4727 struct pagedep *pagedep; 4728 struct worklist *wk; 4729 struct diradd *dap; 4730 struct mount *mnt; 4731 struct vnode *pvp; 4732 struct inode *ip; 4733 struct buf *bp; 4734 struct fs *fs; 4735 struct thread *td = curthread; 4736 int error, flushparent; 4737 ino_t parentino; 4738 ufs_lbn_t lbn; 4739 4740 ip = VTOI(vp); 4741 fs = ip->i_fs; 4742 ACQUIRE_LOCK(&lk); 4743 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) { 4744 FREE_LOCK(&lk); 4745 return (0); 4746 } 4747 if (LIST_FIRST(&inodedep->id_inowait) != NULL || 4748 LIST_FIRST(&inodedep->id_bufwait) != NULL || 4749 TAILQ_FIRST(&inodedep->id_extupdt) != NULL || 4750 TAILQ_FIRST(&inodedep->id_newextupdt) != NULL || 4751 TAILQ_FIRST(&inodedep->id_inoupdt) != NULL || 4752 TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL) { 4753 FREE_LOCK(&lk); 4754 panic("softdep_fsync: pending ops"); 4755 } 4756 for (error = 0, flushparent = 0; ; ) { 4757 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 4758 break; 4759 if (wk->wk_type != D_DIRADD) { 4760 FREE_LOCK(&lk); 4761 panic("softdep_fsync: Unexpected type %s", 4762 TYPENAME(wk->wk_type)); 4763 } 4764 dap = WK_DIRADD(wk); 4765 /* 4766 * Flush our parent if this directory entry has a MKDIR_PARENT 4767 * dependency or is contained in a newly allocated block. 4768 */ 4769 if (dap->da_state & DIRCHG) 4770 pagedep = dap->da_previous->dm_pagedep; 4771 else 4772 pagedep = dap->da_pagedep; 4773 mnt = pagedep->pd_mnt; 4774 parentino = pagedep->pd_ino; 4775 lbn = pagedep->pd_lbn; 4776 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) { 4777 FREE_LOCK(&lk); 4778 panic("softdep_fsync: dirty"); 4779 } 4780 if ((dap->da_state & MKDIR_PARENT) || 4781 (pagedep->pd_state & NEWBLOCK)) 4782 flushparent = 1; 4783 else 4784 flushparent = 0; 4785 /* 4786 * If we are being fsync'ed as part of vgone'ing this vnode, 4787 * then we will not be able to release and recover the 4788 * vnode below, so we just have to give up on writing its 4789 * directory entry out. It will eventually be written, just 4790 * not now, but then the user was not asking to have it 4791 * written, so we are not breaking any promises. 4792 */ 4793 mp_fixme("This operation is not atomic wrt the rest of the code"); 4794 VI_LOCK(vp); 4795 if (vp->v_iflag & VI_XLOCK) { 4796 VI_UNLOCK(vp); 4797 break; 4798 } else 4799 VI_UNLOCK(vp); 4800 /* 4801 * We prevent deadlock by always fetching inodes from the 4802 * root, moving down the directory tree. Thus, when fetching 4803 * our parent directory, we first try to get the lock. If 4804 * that fails, we must unlock ourselves before requesting 4805 * the lock on our parent. See the comment in ufs_lookup 4806 * for details on possible races. 4807 */ 4808 FREE_LOCK(&lk); 4809 if (VFS_VGET(mnt, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp)) { 4810 VOP_UNLOCK(vp, 0, td); 4811 error = VFS_VGET(mnt, parentino, LK_EXCLUSIVE, &pvp); 4812 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 4813 if (error != 0) 4814 return (error); 4815 } 4816 /* 4817 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 4818 * that are contained in direct blocks will be resolved by 4819 * doing a UFS_UPDATE. Pagedeps contained in indirect blocks 4820 * may require a complete sync'ing of the directory. So, we 4821 * try the cheap and fast UFS_UPDATE first, and if that fails, 4822 * then we do the slower VOP_FSYNC of the directory. 4823 */ 4824 if (flushparent) { 4825 if ((error = UFS_UPDATE(pvp, 1)) != 0) { 4826 vput(pvp); 4827 return (error); 4828 } 4829 if ((pagedep->pd_state & NEWBLOCK) && 4830 (error = VOP_FSYNC(pvp, td->td_ucred, MNT_WAIT, td))) { 4831 vput(pvp); 4832 return (error); 4833 } 4834 } 4835 /* 4836 * Flush directory page containing the inode's name. 4837 */ 4838 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 4839 &bp); 4840 if (error == 0) 4841 error = BUF_WRITE(bp); 4842 else 4843 brelse(bp); 4844 vput(pvp); 4845 if (error != 0) 4846 return (error); 4847 ACQUIRE_LOCK(&lk); 4848 if (inodedep_lookup(fs, ip->i_number, 0, &inodedep) == 0) 4849 break; 4850 } 4851 FREE_LOCK(&lk); 4852 return (0); 4853 } 4854 4855 /* 4856 * Flush all the dirty bitmaps associated with the block device 4857 * before flushing the rest of the dirty blocks so as to reduce 4858 * the number of dependencies that will have to be rolled back. 4859 */ 4860 void 4861 softdep_fsync_mountdev(vp) 4862 struct vnode *vp; 4863 { 4864 struct buf *bp, *nbp; 4865 struct worklist *wk; 4866 4867 if (!vn_isdisk(vp, NULL)) 4868 panic("softdep_fsync_mountdev: vnode not a disk"); 4869 ACQUIRE_LOCK(&lk); 4870 VI_LOCK(vp); 4871 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 4872 nbp = TAILQ_NEXT(bp, b_vnbufs); 4873 VI_UNLOCK(vp); 4874 /* 4875 * If it is already scheduled, skip to the next buffer. 4876 */ 4877 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) { 4878 VI_LOCK(vp); 4879 continue; 4880 } 4881 if ((bp->b_flags & B_DELWRI) == 0) { 4882 FREE_LOCK(&lk); 4883 panic("softdep_fsync_mountdev: not dirty"); 4884 } 4885 /* 4886 * We are only interested in bitmaps with outstanding 4887 * dependencies. 4888 */ 4889 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 4890 wk->wk_type != D_BMSAFEMAP || 4891 (bp->b_xflags & BX_BKGRDINPROG)) { 4892 BUF_UNLOCK(bp); 4893 VI_LOCK(vp); 4894 continue; 4895 } 4896 bremfree(bp); 4897 FREE_LOCK(&lk); 4898 (void) bawrite(bp); 4899 ACQUIRE_LOCK(&lk); 4900 /* 4901 * Since we may have slept during the I/O, we need 4902 * to start from a known point. 4903 */ 4904 VI_LOCK(vp); 4905 nbp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4906 } 4907 VI_UNLOCK(vp); 4908 drain_output(vp, 1); 4909 FREE_LOCK(&lk); 4910 } 4911 4912 /* 4913 * This routine is called when we are trying to synchronously flush a 4914 * file. This routine must eliminate any filesystem metadata dependencies 4915 * so that the syncing routine can succeed by pushing the dirty blocks 4916 * associated with the file. If any I/O errors occur, they are returned. 4917 */ 4918 int 4919 softdep_sync_metadata(ap) 4920 struct vop_fsync_args /* { 4921 struct vnode *a_vp; 4922 struct ucred *a_cred; 4923 int a_waitfor; 4924 struct thread *a_td; 4925 } */ *ap; 4926 { 4927 struct vnode *vp = ap->a_vp; 4928 struct pagedep *pagedep; 4929 struct allocdirect *adp; 4930 struct allocindir *aip; 4931 struct buf *bp, *nbp; 4932 struct worklist *wk; 4933 int i, error, waitfor; 4934 4935 /* 4936 * Check whether this vnode is involved in a filesystem 4937 * that is doing soft dependency processing. 4938 */ 4939 if (!vn_isdisk(vp, NULL)) { 4940 if (!DOINGSOFTDEP(vp)) 4941 return (0); 4942 } else 4943 if (vp->v_rdev->si_mountpoint == NULL || 4944 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP) == 0) 4945 return (0); 4946 /* 4947 * Ensure that any direct block dependencies have been cleared. 4948 */ 4949 ACQUIRE_LOCK(&lk); 4950 if ((error = flush_inodedep_deps(VTOI(vp)->i_fs, VTOI(vp)->i_number))) { 4951 FREE_LOCK(&lk); 4952 return (error); 4953 } 4954 /* 4955 * For most files, the only metadata dependencies are the 4956 * cylinder group maps that allocate their inode or blocks. 4957 * The block allocation dependencies can be found by traversing 4958 * the dependency lists for any buffers that remain on their 4959 * dirty buffer list. The inode allocation dependency will 4960 * be resolved when the inode is updated with MNT_WAIT. 4961 * This work is done in two passes. The first pass grabs most 4962 * of the buffers and begins asynchronously writing them. The 4963 * only way to wait for these asynchronous writes is to sleep 4964 * on the filesystem vnode which may stay busy for a long time 4965 * if the filesystem is active. So, instead, we make a second 4966 * pass over the dependencies blocking on each write. In the 4967 * usual case we will be blocking against a write that we 4968 * initiated, so when it is done the dependency will have been 4969 * resolved. Thus the second pass is expected to end quickly. 4970 */ 4971 waitfor = MNT_NOWAIT; 4972 top: 4973 /* 4974 * We must wait for any I/O in progress to finish so that 4975 * all potential buffers on the dirty list will be visible. 4976 */ 4977 drain_output(vp, 1); 4978 if (getdirtybuf(&TAILQ_FIRST(&vp->v_dirtyblkhd), MNT_WAIT) == 0) { 4979 FREE_LOCK(&lk); 4980 return (0); 4981 } 4982 mp_fixme("The locking is somewhat complicated nonexistant here."); 4983 bp = TAILQ_FIRST(&vp->v_dirtyblkhd); 4984 /* While syncing snapshots, we must allow recursive lookups */ 4985 bp->b_lock.lk_flags |= LK_CANRECURSE; 4986 loop: 4987 /* 4988 * As we hold the buffer locked, none of its dependencies 4989 * will disappear. 4990 */ 4991 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 4992 switch (wk->wk_type) { 4993 4994 case D_ALLOCDIRECT: 4995 adp = WK_ALLOCDIRECT(wk); 4996 if (adp->ad_state & DEPCOMPLETE) 4997 continue; 4998 nbp = adp->ad_buf; 4999 if (getdirtybuf(&nbp, waitfor) == 0) 5000 continue; 5001 FREE_LOCK(&lk); 5002 if (waitfor == MNT_NOWAIT) { 5003 bawrite(nbp); 5004 } else if ((error = BUF_WRITE(nbp)) != 0) { 5005 break; 5006 } 5007 ACQUIRE_LOCK(&lk); 5008 continue; 5009 5010 case D_ALLOCINDIR: 5011 aip = WK_ALLOCINDIR(wk); 5012 if (aip->ai_state & DEPCOMPLETE) 5013 continue; 5014 nbp = aip->ai_buf; 5015 if (getdirtybuf(&nbp, waitfor) == 0) 5016 continue; 5017 FREE_LOCK(&lk); 5018 if (waitfor == MNT_NOWAIT) { 5019 bawrite(nbp); 5020 } else if ((error = BUF_WRITE(nbp)) != 0) { 5021 break; 5022 } 5023 ACQUIRE_LOCK(&lk); 5024 continue; 5025 5026 case D_INDIRDEP: 5027 restart: 5028 5029 LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) { 5030 if (aip->ai_state & DEPCOMPLETE) 5031 continue; 5032 nbp = aip->ai_buf; 5033 if (getdirtybuf(&nbp, MNT_WAIT) == 0) 5034 goto restart; 5035 FREE_LOCK(&lk); 5036 if ((error = BUF_WRITE(nbp)) != 0) { 5037 break; 5038 } 5039 ACQUIRE_LOCK(&lk); 5040 goto restart; 5041 } 5042 continue; 5043 5044 case D_INODEDEP: 5045 if ((error = flush_inodedep_deps(WK_INODEDEP(wk)->id_fs, 5046 WK_INODEDEP(wk)->id_ino)) != 0) { 5047 FREE_LOCK(&lk); 5048 break; 5049 } 5050 continue; 5051 5052 case D_PAGEDEP: 5053 /* 5054 * We are trying to sync a directory that may 5055 * have dependencies on both its own metadata 5056 * and/or dependencies on the inodes of any 5057 * recently allocated files. We walk its diradd 5058 * lists pushing out the associated inode. 5059 */ 5060 pagedep = WK_PAGEDEP(wk); 5061 for (i = 0; i < DAHASHSZ; i++) { 5062 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 5063 continue; 5064 if ((error = 5065 flush_pagedep_deps(vp, pagedep->pd_mnt, 5066 &pagedep->pd_diraddhd[i]))) { 5067 FREE_LOCK(&lk); 5068 break; 5069 } 5070 } 5071 continue; 5072 5073 case D_MKDIR: 5074 /* 5075 * This case should never happen if the vnode has 5076 * been properly sync'ed. However, if this function 5077 * is used at a place where the vnode has not yet 5078 * been sync'ed, this dependency can show up. So, 5079 * rather than panic, just flush it. 5080 */ 5081 nbp = WK_MKDIR(wk)->md_buf; 5082 if (getdirtybuf(&nbp, waitfor) == 0) 5083 continue; 5084 FREE_LOCK(&lk); 5085 if (waitfor == MNT_NOWAIT) { 5086 bawrite(nbp); 5087 } else if ((error = BUF_WRITE(nbp)) != 0) { 5088 break; 5089 } 5090 ACQUIRE_LOCK(&lk); 5091 continue; 5092 5093 case D_BMSAFEMAP: 5094 /* 5095 * This case should never happen if the vnode has 5096 * been properly sync'ed. However, if this function 5097 * is used at a place where the vnode has not yet 5098 * been sync'ed, this dependency can show up. So, 5099 * rather than panic, just flush it. 5100 */ 5101 nbp = WK_BMSAFEMAP(wk)->sm_buf; 5102 if (getdirtybuf(&nbp, waitfor) == 0) 5103 continue; 5104 FREE_LOCK(&lk); 5105 if (waitfor == MNT_NOWAIT) { 5106 bawrite(nbp); 5107 } else if ((error = BUF_WRITE(nbp)) != 0) { 5108 break; 5109 } 5110 ACQUIRE_LOCK(&lk); 5111 continue; 5112 5113 default: 5114 FREE_LOCK(&lk); 5115 panic("softdep_sync_metadata: Unknown type %s", 5116 TYPENAME(wk->wk_type)); 5117 /* NOTREACHED */ 5118 } 5119 /* We reach here only in error and unlocked */ 5120 if (error == 0) 5121 panic("softdep_sync_metadata: zero error"); 5122 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 5123 bawrite(bp); 5124 return (error); 5125 } 5126 (void) getdirtybuf(&TAILQ_NEXT(bp, b_vnbufs), MNT_WAIT); 5127 nbp = TAILQ_NEXT(bp, b_vnbufs); 5128 FREE_LOCK(&lk); 5129 bp->b_lock.lk_flags &= ~LK_CANRECURSE; 5130 bawrite(bp); 5131 ACQUIRE_LOCK(&lk); 5132 if (nbp != NULL) { 5133 bp = nbp; 5134 goto loop; 5135 } 5136 /* 5137 * The brief unlock is to allow any pent up dependency 5138 * processing to be done. Then proceed with the second pass. 5139 */ 5140 if (waitfor == MNT_NOWAIT) { 5141 waitfor = MNT_WAIT; 5142 FREE_LOCK(&lk); 5143 ACQUIRE_LOCK(&lk); 5144 goto top; 5145 } 5146 5147 /* 5148 * If we have managed to get rid of all the dirty buffers, 5149 * then we are done. For certain directories and block 5150 * devices, we may need to do further work. 5151 * 5152 * We must wait for any I/O in progress to finish so that 5153 * all potential buffers on the dirty list will be visible. 5154 */ 5155 drain_output(vp, 1); 5156 if (TAILQ_FIRST(&vp->v_dirtyblkhd) == NULL) { 5157 FREE_LOCK(&lk); 5158 return (0); 5159 } 5160 5161 FREE_LOCK(&lk); 5162 /* 5163 * If we are trying to sync a block device, some of its buffers may 5164 * contain metadata that cannot be written until the contents of some 5165 * partially written files have been written to disk. The only easy 5166 * way to accomplish this is to sync the entire filesystem (luckily 5167 * this happens rarely). 5168 */ 5169 if (vn_isdisk(vp, NULL) && 5170 vp->v_rdev->si_mountpoint && !VOP_ISLOCKED(vp, NULL) && 5171 (error = VFS_SYNC(vp->v_rdev->si_mountpoint, MNT_WAIT, ap->a_cred, 5172 ap->a_td)) != 0) 5173 return (error); 5174 return (0); 5175 } 5176 5177 /* 5178 * Flush the dependencies associated with an inodedep. 5179 * Called with splbio blocked. 5180 */ 5181 static int 5182 flush_inodedep_deps(fs, ino) 5183 struct fs *fs; 5184 ino_t ino; 5185 { 5186 struct inodedep *inodedep; 5187 int error, waitfor; 5188 5189 /* 5190 * This work is done in two passes. The first pass grabs most 5191 * of the buffers and begins asynchronously writing them. The 5192 * only way to wait for these asynchronous writes is to sleep 5193 * on the filesystem vnode which may stay busy for a long time 5194 * if the filesystem is active. So, instead, we make a second 5195 * pass over the dependencies blocking on each write. In the 5196 * usual case we will be blocking against a write that we 5197 * initiated, so when it is done the dependency will have been 5198 * resolved. Thus the second pass is expected to end quickly. 5199 * We give a brief window at the top of the loop to allow 5200 * any pending I/O to complete. 5201 */ 5202 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 5203 if (error) 5204 return (error); 5205 FREE_LOCK(&lk); 5206 ACQUIRE_LOCK(&lk); 5207 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5208 return (0); 5209 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 5210 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 5211 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 5212 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 5213 continue; 5214 /* 5215 * If pass2, we are done, otherwise do pass 2. 5216 */ 5217 if (waitfor == MNT_WAIT) 5218 break; 5219 waitfor = MNT_WAIT; 5220 } 5221 /* 5222 * Try freeing inodedep in case all dependencies have been removed. 5223 */ 5224 if (inodedep_lookup(fs, ino, 0, &inodedep) != 0) 5225 (void) free_inodedep(inodedep); 5226 return (0); 5227 } 5228 5229 /* 5230 * Flush an inode dependency list. 5231 * Called with splbio blocked. 5232 */ 5233 static int 5234 flush_deplist(listhead, waitfor, errorp) 5235 struct allocdirectlst *listhead; 5236 int waitfor; 5237 int *errorp; 5238 { 5239 struct allocdirect *adp; 5240 struct buf *bp; 5241 5242 TAILQ_FOREACH(adp, listhead, ad_next) { 5243 if (adp->ad_state & DEPCOMPLETE) 5244 continue; 5245 bp = adp->ad_buf; 5246 if (getdirtybuf(&bp, waitfor) == 0) { 5247 if (waitfor == MNT_NOWAIT) 5248 continue; 5249 return (1); 5250 } 5251 FREE_LOCK(&lk); 5252 if (waitfor == MNT_NOWAIT) { 5253 bawrite(bp); 5254 } else if ((*errorp = BUF_WRITE(bp)) != 0) { 5255 ACQUIRE_LOCK(&lk); 5256 return (1); 5257 } 5258 ACQUIRE_LOCK(&lk); 5259 return (1); 5260 } 5261 return (0); 5262 } 5263 5264 /* 5265 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 5266 * Called with splbio blocked. 5267 */ 5268 static int 5269 flush_pagedep_deps(pvp, mp, diraddhdp) 5270 struct vnode *pvp; 5271 struct mount *mp; 5272 struct diraddhd *diraddhdp; 5273 { 5274 struct thread *td = curthread; 5275 struct inodedep *inodedep; 5276 struct ufsmount *ump; 5277 struct diradd *dap; 5278 struct vnode *vp; 5279 int gotit, error = 0; 5280 struct buf *bp; 5281 ino_t inum; 5282 5283 ump = VFSTOUFS(mp); 5284 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 5285 /* 5286 * Flush ourselves if this directory entry 5287 * has a MKDIR_PARENT dependency. 5288 */ 5289 if (dap->da_state & MKDIR_PARENT) { 5290 FREE_LOCK(&lk); 5291 if ((error = UFS_UPDATE(pvp, 1)) != 0) 5292 break; 5293 ACQUIRE_LOCK(&lk); 5294 /* 5295 * If that cleared dependencies, go on to next. 5296 */ 5297 if (dap != LIST_FIRST(diraddhdp)) 5298 continue; 5299 if (dap->da_state & MKDIR_PARENT) { 5300 FREE_LOCK(&lk); 5301 panic("flush_pagedep_deps: MKDIR_PARENT"); 5302 } 5303 } 5304 /* 5305 * A newly allocated directory must have its "." and 5306 * ".." entries written out before its name can be 5307 * committed in its parent. We do not want or need 5308 * the full semantics of a synchronous VOP_FSYNC as 5309 * that may end up here again, once for each directory 5310 * level in the filesystem. Instead, we push the blocks 5311 * and wait for them to clear. We have to fsync twice 5312 * because the first call may choose to defer blocks 5313 * that still have dependencies, but deferral will 5314 * happen at most once. 5315 */ 5316 inum = dap->da_newinum; 5317 if (dap->da_state & MKDIR_BODY) { 5318 FREE_LOCK(&lk); 5319 if ((error = VFS_VGET(mp, inum, LK_EXCLUSIVE, &vp))) 5320 break; 5321 if ((error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td)) || 5322 (error=VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) { 5323 vput(vp); 5324 break; 5325 } 5326 drain_output(vp, 0); 5327 vput(vp); 5328 ACQUIRE_LOCK(&lk); 5329 /* 5330 * If that cleared dependencies, go on to next. 5331 */ 5332 if (dap != LIST_FIRST(diraddhdp)) 5333 continue; 5334 if (dap->da_state & MKDIR_BODY) { 5335 FREE_LOCK(&lk); 5336 panic("flush_pagedep_deps: MKDIR_BODY"); 5337 } 5338 } 5339 /* 5340 * Flush the inode on which the directory entry depends. 5341 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 5342 * the only remaining dependency is that the updated inode 5343 * count must get pushed to disk. The inode has already 5344 * been pushed into its inode buffer (via VOP_UPDATE) at 5345 * the time of the reference count change. So we need only 5346 * locate that buffer, ensure that there will be no rollback 5347 * caused by a bitmap dependency, then write the inode buffer. 5348 */ 5349 if (inodedep_lookup(ump->um_fs, inum, 0, &inodedep) == 0) { 5350 FREE_LOCK(&lk); 5351 panic("flush_pagedep_deps: lost inode"); 5352 } 5353 /* 5354 * If the inode still has bitmap dependencies, 5355 * push them to disk. 5356 */ 5357 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5358 gotit = getdirtybuf(&inodedep->id_buf, MNT_WAIT); 5359 FREE_LOCK(&lk); 5360 if (gotit && 5361 (error = BUF_WRITE(inodedep->id_buf)) != 0) 5362 break; 5363 ACQUIRE_LOCK(&lk); 5364 if (dap != LIST_FIRST(diraddhdp)) 5365 continue; 5366 } 5367 /* 5368 * If the inode is still sitting in a buffer waiting 5369 * to be written, push it to disk. 5370 */ 5371 FREE_LOCK(&lk); 5372 if ((error = bread(ump->um_devvp, 5373 fsbtodb(ump->um_fs, ino_to_fsba(ump->um_fs, inum)), 5374 (int)ump->um_fs->fs_bsize, NOCRED, &bp)) != 0) { 5375 brelse(bp); 5376 break; 5377 } 5378 if ((error = BUF_WRITE(bp)) != 0) 5379 break; 5380 ACQUIRE_LOCK(&lk); 5381 /* 5382 * If we have failed to get rid of all the dependencies 5383 * then something is seriously wrong. 5384 */ 5385 if (dap == LIST_FIRST(diraddhdp)) { 5386 FREE_LOCK(&lk); 5387 panic("flush_pagedep_deps: flush failed"); 5388 } 5389 } 5390 if (error) 5391 ACQUIRE_LOCK(&lk); 5392 return (error); 5393 } 5394 5395 /* 5396 * A large burst of file addition or deletion activity can drive the 5397 * memory load excessively high. First attempt to slow things down 5398 * using the techniques below. If that fails, this routine requests 5399 * the offending operations to fall back to running synchronously 5400 * until the memory load returns to a reasonable level. 5401 */ 5402 int 5403 softdep_slowdown(vp) 5404 struct vnode *vp; 5405 { 5406 int max_softdeps_hard; 5407 5408 max_softdeps_hard = max_softdeps * 11 / 10; 5409 if (num_dirrem < max_softdeps_hard / 2 && 5410 num_inodedep < max_softdeps_hard) 5411 return (0); 5412 stat_sync_limit_hit += 1; 5413 return (1); 5414 } 5415 5416 /* 5417 * Called by the allocation routines when they are about to fail 5418 * in the hope that we can free up some disk space. 5419 * 5420 * First check to see if the work list has anything on it. If it has, 5421 * clean up entries until we successfully free some space. Because this 5422 * process holds inodes locked, we cannot handle any remove requests 5423 * that might block on a locked inode as that could lead to deadlock. 5424 * If the worklist yields no free space, encourage the syncer daemon 5425 * to help us. In no event will we try for longer than tickdelay seconds. 5426 */ 5427 int 5428 softdep_request_cleanup(fs, vp) 5429 struct fs *fs; 5430 struct vnode *vp; 5431 { 5432 long starttime; 5433 ufs2_daddr_t needed; 5434 5435 needed = fs->fs_cstotal.cs_nbfree + fs->fs_contigsumsize; 5436 starttime = time_second + tickdelay; 5437 /* 5438 * If we are being called because of a process doing a 5439 * copy-on-write, then it is not safe to update the vnode 5440 * as we may recurse into the copy-on-write routine. 5441 */ 5442 if ((curthread->td_proc->p_flag & P_COWINPROGRESS) == 0 && 5443 UFS_UPDATE(vp, 1) != 0) 5444 return (0); 5445 while (fs->fs_pendingblocks > 0 && fs->fs_cstotal.cs_nbfree <= needed) { 5446 if (time_second > starttime) 5447 return (0); 5448 if (num_on_worklist > 0 && 5449 process_worklist_item(NULL, LK_NOWAIT) != -1) { 5450 stat_worklist_push += 1; 5451 continue; 5452 } 5453 request_cleanup(FLUSH_REMOVE_WAIT, 0); 5454 } 5455 return (1); 5456 } 5457 5458 /* 5459 * If memory utilization has gotten too high, deliberately slow things 5460 * down and speed up the I/O processing. 5461 */ 5462 static int 5463 request_cleanup(resource, islocked) 5464 int resource; 5465 int islocked; 5466 { 5467 struct thread *td = curthread; 5468 5469 /* 5470 * We never hold up the filesystem syncer process. 5471 */ 5472 if (td == filesys_syncer) 5473 return (0); 5474 /* 5475 * First check to see if the work list has gotten backlogged. 5476 * If it has, co-opt this process to help clean up two entries. 5477 * Because this process may hold inodes locked, we cannot 5478 * handle any remove requests that might block on a locked 5479 * inode as that could lead to deadlock. 5480 */ 5481 if (num_on_worklist > max_softdeps / 10) { 5482 if (islocked) 5483 FREE_LOCK(&lk); 5484 process_worklist_item(NULL, LK_NOWAIT); 5485 process_worklist_item(NULL, LK_NOWAIT); 5486 stat_worklist_push += 2; 5487 if (islocked) 5488 ACQUIRE_LOCK(&lk); 5489 return(1); 5490 } 5491 /* 5492 * Next, we attempt to speed up the syncer process. If that 5493 * is successful, then we allow the process to continue. 5494 */ 5495 if (speedup_syncer() && resource != FLUSH_REMOVE_WAIT) 5496 return(0); 5497 /* 5498 * If we are resource constrained on inode dependencies, try 5499 * flushing some dirty inodes. Otherwise, we are constrained 5500 * by file deletions, so try accelerating flushes of directories 5501 * with removal dependencies. We would like to do the cleanup 5502 * here, but we probably hold an inode locked at this point and 5503 * that might deadlock against one that we try to clean. So, 5504 * the best that we can do is request the syncer daemon to do 5505 * the cleanup for us. 5506 */ 5507 switch (resource) { 5508 5509 case FLUSH_INODES: 5510 stat_ino_limit_push += 1; 5511 req_clear_inodedeps += 1; 5512 stat_countp = &stat_ino_limit_hit; 5513 break; 5514 5515 case FLUSH_REMOVE: 5516 case FLUSH_REMOVE_WAIT: 5517 stat_blk_limit_push += 1; 5518 req_clear_remove += 1; 5519 stat_countp = &stat_blk_limit_hit; 5520 break; 5521 5522 default: 5523 if (islocked) 5524 FREE_LOCK(&lk); 5525 panic("request_cleanup: unknown type"); 5526 } 5527 /* 5528 * Hopefully the syncer daemon will catch up and awaken us. 5529 * We wait at most tickdelay before proceeding in any case. 5530 */ 5531 if (islocked == 0) 5532 ACQUIRE_LOCK(&lk); 5533 proc_waiting += 1; 5534 if (handle.callout == NULL) 5535 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 5536 interlocked_sleep(&lk, SLEEP, (caddr_t)&proc_waiting, NULL, PPAUSE, 5537 "softupdate", 0); 5538 proc_waiting -= 1; 5539 if (islocked == 0) 5540 FREE_LOCK(&lk); 5541 return (1); 5542 } 5543 5544 /* 5545 * Awaken processes pausing in request_cleanup and clear proc_waiting 5546 * to indicate that there is no longer a timer running. 5547 */ 5548 static void 5549 pause_timer(arg) 5550 void *arg; 5551 { 5552 5553 *stat_countp += 1; 5554 wakeup_one(&proc_waiting); 5555 if (proc_waiting > 0) 5556 handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2); 5557 else 5558 handle.callout = NULL; 5559 } 5560 5561 /* 5562 * Flush out a directory with at least one removal dependency in an effort to 5563 * reduce the number of dirrem, freefile, and freeblks dependency structures. 5564 */ 5565 static void 5566 clear_remove(td) 5567 struct thread *td; 5568 { 5569 struct pagedep_hashhead *pagedephd; 5570 struct pagedep *pagedep; 5571 static int next = 0; 5572 struct mount *mp; 5573 struct vnode *vp; 5574 int error, cnt; 5575 ino_t ino; 5576 5577 ACQUIRE_LOCK(&lk); 5578 for (cnt = 0; cnt < pagedep_hash; cnt++) { 5579 pagedephd = &pagedep_hashtbl[next++]; 5580 if (next >= pagedep_hash) 5581 next = 0; 5582 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 5583 if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL) 5584 continue; 5585 mp = pagedep->pd_mnt; 5586 ino = pagedep->pd_ino; 5587 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5588 continue; 5589 FREE_LOCK(&lk); 5590 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp))) { 5591 softdep_error("clear_remove: vget", error); 5592 vn_finished_write(mp); 5593 return; 5594 } 5595 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5596 softdep_error("clear_remove: fsync", error); 5597 drain_output(vp, 0); 5598 vput(vp); 5599 vn_finished_write(mp); 5600 return; 5601 } 5602 } 5603 FREE_LOCK(&lk); 5604 } 5605 5606 /* 5607 * Clear out a block of dirty inodes in an effort to reduce 5608 * the number of inodedep dependency structures. 5609 */ 5610 static void 5611 clear_inodedeps(td) 5612 struct thread *td; 5613 { 5614 struct inodedep_hashhead *inodedephd; 5615 struct inodedep *inodedep; 5616 static int next = 0; 5617 struct mount *mp; 5618 struct vnode *vp; 5619 struct fs *fs; 5620 int error, cnt; 5621 ino_t firstino, lastino, ino; 5622 5623 ACQUIRE_LOCK(&lk); 5624 /* 5625 * Pick a random inode dependency to be cleared. 5626 * We will then gather up all the inodes in its block 5627 * that have dependencies and flush them out. 5628 */ 5629 for (cnt = 0; cnt < inodedep_hash; cnt++) { 5630 inodedephd = &inodedep_hashtbl[next++]; 5631 if (next >= inodedep_hash) 5632 next = 0; 5633 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 5634 break; 5635 } 5636 if (inodedep == NULL) 5637 return; 5638 /* 5639 * Ugly code to find mount point given pointer to superblock. 5640 */ 5641 fs = inodedep->id_fs; 5642 TAILQ_FOREACH(mp, &mountlist, mnt_list) 5643 if ((mp->mnt_flag & MNT_SOFTDEP) && fs == VFSTOUFS(mp)->um_fs) 5644 break; 5645 /* 5646 * Find the last inode in the block with dependencies. 5647 */ 5648 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 5649 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 5650 if (inodedep_lookup(fs, lastino, 0, &inodedep) != 0) 5651 break; 5652 /* 5653 * Asynchronously push all but the last inode with dependencies. 5654 * Synchronously push the last inode with dependencies to ensure 5655 * that the inode block gets written to free up the inodedeps. 5656 */ 5657 for (ino = firstino; ino <= lastino; ino++) { 5658 if (inodedep_lookup(fs, ino, 0, &inodedep) == 0) 5659 continue; 5660 FREE_LOCK(&lk); 5661 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 5662 continue; 5663 if ((error = VFS_VGET(mp, ino, LK_EXCLUSIVE, &vp)) != 0) { 5664 softdep_error("clear_inodedeps: vget", error); 5665 vn_finished_write(mp); 5666 return; 5667 } 5668 if (ino == lastino) { 5669 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_WAIT, td))) 5670 softdep_error("clear_inodedeps: fsync1", error); 5671 } else { 5672 if ((error = VOP_FSYNC(vp, td->td_ucred, MNT_NOWAIT, td))) 5673 softdep_error("clear_inodedeps: fsync2", error); 5674 drain_output(vp, 0); 5675 } 5676 vput(vp); 5677 vn_finished_write(mp); 5678 ACQUIRE_LOCK(&lk); 5679 } 5680 FREE_LOCK(&lk); 5681 } 5682 5683 /* 5684 * Function to determine if the buffer has outstanding dependencies 5685 * that will cause a roll-back if the buffer is written. If wantcount 5686 * is set, return number of dependencies, otherwise just yes or no. 5687 */ 5688 static int 5689 softdep_count_dependencies(bp, wantcount) 5690 struct buf *bp; 5691 int wantcount; 5692 { 5693 struct worklist *wk; 5694 struct inodedep *inodedep; 5695 struct indirdep *indirdep; 5696 struct allocindir *aip; 5697 struct pagedep *pagedep; 5698 struct diradd *dap; 5699 int i, retval; 5700 5701 retval = 0; 5702 ACQUIRE_LOCK(&lk); 5703 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5704 switch (wk->wk_type) { 5705 5706 case D_INODEDEP: 5707 inodedep = WK_INODEDEP(wk); 5708 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 5709 /* bitmap allocation dependency */ 5710 retval += 1; 5711 if (!wantcount) 5712 goto out; 5713 } 5714 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 5715 /* direct block pointer dependency */ 5716 retval += 1; 5717 if (!wantcount) 5718 goto out; 5719 } 5720 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 5721 /* direct block pointer dependency */ 5722 retval += 1; 5723 if (!wantcount) 5724 goto out; 5725 } 5726 continue; 5727 5728 case D_INDIRDEP: 5729 indirdep = WK_INDIRDEP(wk); 5730 5731 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 5732 /* indirect block pointer dependency */ 5733 retval += 1; 5734 if (!wantcount) 5735 goto out; 5736 } 5737 continue; 5738 5739 case D_PAGEDEP: 5740 pagedep = WK_PAGEDEP(wk); 5741 for (i = 0; i < DAHASHSZ; i++) { 5742 5743 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 5744 /* directory entry dependency */ 5745 retval += 1; 5746 if (!wantcount) 5747 goto out; 5748 } 5749 } 5750 continue; 5751 5752 case D_BMSAFEMAP: 5753 case D_ALLOCDIRECT: 5754 case D_ALLOCINDIR: 5755 case D_MKDIR: 5756 /* never a dependency on these blocks */ 5757 continue; 5758 5759 default: 5760 FREE_LOCK(&lk); 5761 panic("softdep_check_for_rollback: Unexpected type %s", 5762 TYPENAME(wk->wk_type)); 5763 /* NOTREACHED */ 5764 } 5765 } 5766 out: 5767 FREE_LOCK(&lk); 5768 return retval; 5769 } 5770 5771 /* 5772 * Acquire exclusive access to a buffer. 5773 * Must be called with splbio blocked. 5774 * Return 1 if buffer was acquired. 5775 */ 5776 static int 5777 getdirtybuf(bpp, waitfor) 5778 struct buf **bpp; 5779 int waitfor; 5780 { 5781 struct buf *bp; 5782 int error; 5783 5784 for (;;) { 5785 if ((bp = *bpp) == NULL) 5786 return (0); 5787 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 5788 if ((bp->b_xflags & BX_BKGRDINPROG) == 0) 5789 break; 5790 BUF_UNLOCK(bp); 5791 if (waitfor != MNT_WAIT) 5792 return (0); 5793 bp->b_xflags |= BX_BKGRDWAIT; 5794 interlocked_sleep(&lk, SLEEP, &bp->b_xflags, NULL, 5795 PRIBIO, "getbuf", 0); 5796 continue; 5797 } 5798 if (waitfor != MNT_WAIT) 5799 return (0); 5800 error = interlocked_sleep(&lk, LOCKBUF, bp, NULL, 5801 LK_EXCLUSIVE | LK_SLEEPFAIL, 0, 0); 5802 if (error != ENOLCK) { 5803 FREE_LOCK(&lk); 5804 panic("getdirtybuf: inconsistent lock"); 5805 } 5806 } 5807 if ((bp->b_flags & B_DELWRI) == 0) { 5808 BUF_UNLOCK(bp); 5809 return (0); 5810 } 5811 bremfree(bp); 5812 return (1); 5813 } 5814 5815 /* 5816 * Wait for pending output on a vnode to complete. 5817 * Must be called with vnode locked. 5818 */ 5819 static void 5820 drain_output(vp, islocked) 5821 struct vnode *vp; 5822 int islocked; 5823 { 5824 5825 if (!islocked) 5826 ACQUIRE_LOCK(&lk); 5827 VI_LOCK(vp); 5828 while (vp->v_numoutput) { 5829 vp->v_iflag |= VI_BWAIT; 5830 interlocked_sleep(&lk, SLEEP, (caddr_t)&vp->v_numoutput, 5831 VI_MTX(vp), PRIBIO + 1, "drainvp", 0); 5832 } 5833 VI_UNLOCK(vp); 5834 if (!islocked) 5835 FREE_LOCK(&lk); 5836 } 5837 5838 /* 5839 * Called whenever a buffer that is being invalidated or reallocated 5840 * contains dependencies. This should only happen if an I/O error has 5841 * occurred. The routine is called with the buffer locked. 5842 */ 5843 static void 5844 softdep_deallocate_dependencies(bp) 5845 struct buf *bp; 5846 { 5847 5848 if ((bp->b_ioflags & BIO_ERROR) == 0) 5849 panic("softdep_deallocate_dependencies: dangling deps"); 5850 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 5851 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 5852 } 5853 5854 /* 5855 * Function to handle asynchronous write errors in the filesystem. 5856 */ 5857 static void 5858 softdep_error(func, error) 5859 char *func; 5860 int error; 5861 { 5862 5863 /* XXX should do something better! */ 5864 printf("%s: got error %d while accessing filesystem\n", func, error); 5865 } 5866