1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Portions Copyright 2011 Martin Matuska 24 * Copyright (c) 2012, 2019 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/txg_impl.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/spa_impl.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/dsl_pool.h> 33 #include <sys/dsl_scan.h> 34 #include <sys/zil.h> 35 #include <sys/callb.h> 36 #include <sys/trace_zfs.h> 37 38 /* 39 * ZFS Transaction Groups 40 * ---------------------- 41 * 42 * ZFS transaction groups are, as the name implies, groups of transactions 43 * that act on persistent state. ZFS asserts consistency at the granularity of 44 * these transaction groups. Each successive transaction group (txg) is 45 * assigned a 64-bit consecutive identifier. There are three active 46 * transaction group states: open, quiescing, or syncing. At any given time, 47 * there may be an active txg associated with each state; each active txg may 48 * either be processing, or blocked waiting to enter the next state. There may 49 * be up to three active txgs, and there is always a txg in the open state 50 * (though it may be blocked waiting to enter the quiescing state). In broad 51 * strokes, transactions -- operations that change in-memory structures -- are 52 * accepted into the txg in the open state, and are completed while the txg is 53 * in the open or quiescing states. The accumulated changes are written to 54 * disk in the syncing state. 55 * 56 * Open 57 * 58 * When a new txg becomes active, it first enters the open state. New 59 * transactions -- updates to in-memory structures -- are assigned to the 60 * currently open txg. There is always a txg in the open state so that ZFS can 61 * accept new changes (though the txg may refuse new changes if it has hit 62 * some limit). ZFS advances the open txg to the next state for a variety of 63 * reasons such as it hitting a time or size threshold, or the execution of an 64 * administrative action that must be completed in the syncing state. 65 * 66 * Quiescing 67 * 68 * After a txg exits the open state, it enters the quiescing state. The 69 * quiescing state is intended to provide a buffer between accepting new 70 * transactions in the open state and writing them out to stable storage in 71 * the syncing state. While quiescing, transactions can continue their 72 * operation without delaying either of the other states. Typically, a txg is 73 * in the quiescing state very briefly since the operations are bounded by 74 * software latencies rather than, say, slower I/O latencies. After all 75 * transactions complete, the txg is ready to enter the next state. 76 * 77 * Syncing 78 * 79 * In the syncing state, the in-memory state built up during the open and (to 80 * a lesser degree) the quiescing states is written to stable storage. The 81 * process of writing out modified data can, in turn modify more data. For 82 * example when we write new blocks, we need to allocate space for them; those 83 * allocations modify metadata (space maps)... which themselves must be 84 * written to stable storage. During the sync state, ZFS iterates, writing out 85 * data until it converges and all in-memory changes have been written out. 86 * The first such pass is the largest as it encompasses all the modified user 87 * data (as opposed to filesystem metadata). Subsequent passes typically have 88 * far less data to write as they consist exclusively of filesystem metadata. 89 * 90 * To ensure convergence, after a certain number of passes ZFS begins 91 * overwriting locations on stable storage that had been allocated earlier in 92 * the syncing state (and subsequently freed). ZFS usually allocates new 93 * blocks to optimize for large, continuous, writes. For the syncing state to 94 * converge however it must complete a pass where no new blocks are allocated 95 * since each allocation requires a modification of persistent metadata. 96 * Further, to hasten convergence, after a prescribed number of passes, ZFS 97 * also defers frees, and stops compressing. 98 * 99 * In addition to writing out user data, we must also execute synctasks during 100 * the syncing context. A synctask is the mechanism by which some 101 * administrative activities work such as creating and destroying snapshots or 102 * datasets. Note that when a synctask is initiated it enters the open txg, 103 * and ZFS then pushes that txg as quickly as possible to completion of the 104 * syncing state in order to reduce the latency of the administrative 105 * activity. To complete the syncing state, ZFS writes out a new uberblock, 106 * the root of the tree of blocks that comprise all state stored on the ZFS 107 * pool. Finally, if there is a quiesced txg waiting, we signal that it can 108 * now transition to the syncing state. 109 */ 110 111 static void txg_sync_thread(void *arg); 112 static void txg_quiesce_thread(void *arg); 113 114 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */ 115 116 /* 117 * Prepare the txg subsystem. 118 */ 119 void 120 txg_init(dsl_pool_t *dp, uint64_t txg) 121 { 122 tx_state_t *tx = &dp->dp_tx; 123 int c; 124 bzero(tx, sizeof (tx_state_t)); 125 126 tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); 127 128 for (c = 0; c < max_ncpus; c++) { 129 int i; 130 131 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); 132 mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP, 133 NULL); 134 for (i = 0; i < TXG_SIZE; i++) { 135 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT, 136 NULL); 137 list_create(&tx->tx_cpu[c].tc_callbacks[i], 138 sizeof (dmu_tx_callback_t), 139 offsetof(dmu_tx_callback_t, dcb_node)); 140 } 141 } 142 143 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL); 144 145 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL); 146 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL); 147 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL); 148 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL); 149 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL); 150 151 tx->tx_open_txg = txg; 152 } 153 154 /* 155 * Close down the txg subsystem. 156 */ 157 void 158 txg_fini(dsl_pool_t *dp) 159 { 160 tx_state_t *tx = &dp->dp_tx; 161 int c; 162 163 ASSERT0(tx->tx_threads); 164 165 mutex_destroy(&tx->tx_sync_lock); 166 167 cv_destroy(&tx->tx_sync_more_cv); 168 cv_destroy(&tx->tx_sync_done_cv); 169 cv_destroy(&tx->tx_quiesce_more_cv); 170 cv_destroy(&tx->tx_quiesce_done_cv); 171 cv_destroy(&tx->tx_exit_cv); 172 173 for (c = 0; c < max_ncpus; c++) { 174 int i; 175 176 mutex_destroy(&tx->tx_cpu[c].tc_open_lock); 177 mutex_destroy(&tx->tx_cpu[c].tc_lock); 178 for (i = 0; i < TXG_SIZE; i++) { 179 cv_destroy(&tx->tx_cpu[c].tc_cv[i]); 180 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]); 181 } 182 } 183 184 if (tx->tx_commit_cb_taskq != NULL) 185 taskq_destroy(tx->tx_commit_cb_taskq); 186 187 vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); 188 189 bzero(tx, sizeof (tx_state_t)); 190 } 191 192 /* 193 * Start syncing transaction groups. 194 */ 195 void 196 txg_sync_start(dsl_pool_t *dp) 197 { 198 tx_state_t *tx = &dp->dp_tx; 199 200 mutex_enter(&tx->tx_sync_lock); 201 202 dprintf("pool %p\n", dp); 203 204 ASSERT0(tx->tx_threads); 205 206 tx->tx_threads = 2; 207 208 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread, 209 dp, 0, &p0, TS_RUN, defclsyspri); 210 211 /* 212 * The sync thread can need a larger-than-default stack size on 213 * 32-bit x86. This is due in part to nested pools and 214 * scrub_visitbp() recursion. 215 */ 216 tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread, 217 dp, 0, &p0, TS_RUN, defclsyspri); 218 219 mutex_exit(&tx->tx_sync_lock); 220 } 221 222 static void 223 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr) 224 { 225 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG); 226 mutex_enter(&tx->tx_sync_lock); 227 } 228 229 static void 230 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp) 231 { 232 ASSERT(*tpp != NULL); 233 *tpp = NULL; 234 tx->tx_threads--; 235 cv_broadcast(&tx->tx_exit_cv); 236 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */ 237 thread_exit(); 238 } 239 240 static void 241 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time) 242 { 243 CALLB_CPR_SAFE_BEGIN(cpr); 244 245 /* 246 * cv_wait_sig() is used instead of cv_wait() in order to prevent 247 * this process from incorrectly contributing to the system load 248 * average when idle. 249 */ 250 if (time) { 251 (void) cv_timedwait_sig(cv, &tx->tx_sync_lock, 252 ddi_get_lbolt() + time); 253 } else { 254 cv_wait_sig(cv, &tx->tx_sync_lock); 255 } 256 257 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock); 258 } 259 260 /* 261 * Stop syncing transaction groups. 262 */ 263 void 264 txg_sync_stop(dsl_pool_t *dp) 265 { 266 tx_state_t *tx = &dp->dp_tx; 267 268 dprintf("pool %p\n", dp); 269 /* 270 * Finish off any work in progress. 271 */ 272 ASSERT3U(tx->tx_threads, ==, 2); 273 274 /* 275 * We need to ensure that we've vacated the deferred metaslab trees. 276 */ 277 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE); 278 279 /* 280 * Wake all sync threads and wait for them to die. 281 */ 282 mutex_enter(&tx->tx_sync_lock); 283 284 ASSERT3U(tx->tx_threads, ==, 2); 285 286 tx->tx_exiting = 1; 287 288 cv_broadcast(&tx->tx_quiesce_more_cv); 289 cv_broadcast(&tx->tx_quiesce_done_cv); 290 cv_broadcast(&tx->tx_sync_more_cv); 291 292 while (tx->tx_threads != 0) 293 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); 294 295 tx->tx_exiting = 0; 296 297 mutex_exit(&tx->tx_sync_lock); 298 } 299 300 uint64_t 301 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) 302 { 303 tx_state_t *tx = &dp->dp_tx; 304 tx_cpu_t *tc; 305 uint64_t txg; 306 307 /* 308 * It appears the processor id is simply used as a "random" 309 * number to index into the array, and there isn't any other 310 * significance to the chosen tx_cpu. Because.. Why not use 311 * the current cpu to index into the array? 312 */ 313 kpreempt_disable(); 314 tc = &tx->tx_cpu[CPU_SEQID]; 315 kpreempt_enable(); 316 317 mutex_enter(&tc->tc_open_lock); 318 txg = tx->tx_open_txg; 319 320 mutex_enter(&tc->tc_lock); 321 tc->tc_count[txg & TXG_MASK]++; 322 mutex_exit(&tc->tc_lock); 323 324 th->th_cpu = tc; 325 th->th_txg = txg; 326 327 return (txg); 328 } 329 330 void 331 txg_rele_to_quiesce(txg_handle_t *th) 332 { 333 tx_cpu_t *tc = th->th_cpu; 334 335 ASSERT(!MUTEX_HELD(&tc->tc_lock)); 336 mutex_exit(&tc->tc_open_lock); 337 } 338 339 void 340 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks) 341 { 342 tx_cpu_t *tc = th->th_cpu; 343 int g = th->th_txg & TXG_MASK; 344 345 mutex_enter(&tc->tc_lock); 346 list_move_tail(&tc->tc_callbacks[g], tx_callbacks); 347 mutex_exit(&tc->tc_lock); 348 } 349 350 void 351 txg_rele_to_sync(txg_handle_t *th) 352 { 353 tx_cpu_t *tc = th->th_cpu; 354 int g = th->th_txg & TXG_MASK; 355 356 mutex_enter(&tc->tc_lock); 357 ASSERT(tc->tc_count[g] != 0); 358 if (--tc->tc_count[g] == 0) 359 cv_broadcast(&tc->tc_cv[g]); 360 mutex_exit(&tc->tc_lock); 361 362 th->th_cpu = NULL; /* defensive */ 363 } 364 365 /* 366 * Blocks until all transactions in the group are committed. 367 * 368 * On return, the transaction group has reached a stable state in which it can 369 * then be passed off to the syncing context. 370 */ 371 static void 372 txg_quiesce(dsl_pool_t *dp, uint64_t txg) 373 { 374 tx_state_t *tx = &dp->dp_tx; 375 uint64_t tx_open_time; 376 int g = txg & TXG_MASK; 377 int c; 378 379 /* 380 * Grab all tc_open_locks so nobody else can get into this txg. 381 */ 382 for (c = 0; c < max_ncpus; c++) 383 mutex_enter(&tx->tx_cpu[c].tc_open_lock); 384 385 ASSERT(txg == tx->tx_open_txg); 386 tx->tx_open_txg++; 387 tx->tx_open_time = tx_open_time = gethrtime(); 388 389 DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg); 390 DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg); 391 392 /* 393 * Now that we've incremented tx_open_txg, we can let threads 394 * enter the next transaction group. 395 */ 396 for (c = 0; c < max_ncpus; c++) 397 mutex_exit(&tx->tx_cpu[c].tc_open_lock); 398 399 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time); 400 spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time); 401 402 /* 403 * Quiesce the transaction group by waiting for everyone to txg_exit(). 404 */ 405 for (c = 0; c < max_ncpus; c++) { 406 tx_cpu_t *tc = &tx->tx_cpu[c]; 407 mutex_enter(&tc->tc_lock); 408 while (tc->tc_count[g] != 0) 409 cv_wait(&tc->tc_cv[g], &tc->tc_lock); 410 mutex_exit(&tc->tc_lock); 411 } 412 413 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime()); 414 } 415 416 static void 417 txg_do_callbacks(list_t *cb_list) 418 { 419 dmu_tx_do_callbacks(cb_list, 0); 420 421 list_destroy(cb_list); 422 423 kmem_free(cb_list, sizeof (list_t)); 424 } 425 426 /* 427 * Dispatch the commit callbacks registered on this txg to worker threads. 428 * 429 * If no callbacks are registered for a given TXG, nothing happens. 430 * This function creates a taskq for the associated pool, if needed. 431 */ 432 static void 433 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) 434 { 435 int c; 436 tx_state_t *tx = &dp->dp_tx; 437 list_t *cb_list; 438 439 for (c = 0; c < max_ncpus; c++) { 440 tx_cpu_t *tc = &tx->tx_cpu[c]; 441 /* 442 * No need to lock tx_cpu_t at this point, since this can 443 * only be called once a txg has been synced. 444 */ 445 446 int g = txg & TXG_MASK; 447 448 if (list_is_empty(&tc->tc_callbacks[g])) 449 continue; 450 451 if (tx->tx_commit_cb_taskq == NULL) { 452 /* 453 * Commit callback taskq hasn't been created yet. 454 */ 455 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb", 456 boot_ncpus, defclsyspri, boot_ncpus, boot_ncpus * 2, 457 TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 458 } 459 460 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP); 461 list_create(cb_list, sizeof (dmu_tx_callback_t), 462 offsetof(dmu_tx_callback_t, dcb_node)); 463 464 list_move_tail(cb_list, &tc->tc_callbacks[g]); 465 466 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *) 467 txg_do_callbacks, cb_list, TQ_SLEEP); 468 } 469 } 470 471 /* 472 * Wait for pending commit callbacks of already-synced transactions to finish 473 * processing. 474 * Calling this function from within a commit callback will deadlock. 475 */ 476 void 477 txg_wait_callbacks(dsl_pool_t *dp) 478 { 479 tx_state_t *tx = &dp->dp_tx; 480 481 if (tx->tx_commit_cb_taskq != NULL) 482 taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0); 483 } 484 485 static boolean_t 486 txg_is_syncing(dsl_pool_t *dp) 487 { 488 tx_state_t *tx = &dp->dp_tx; 489 ASSERT(MUTEX_HELD(&tx->tx_sync_lock)); 490 return (tx->tx_syncing_txg != 0); 491 } 492 493 static boolean_t 494 txg_is_quiescing(dsl_pool_t *dp) 495 { 496 tx_state_t *tx = &dp->dp_tx; 497 ASSERT(MUTEX_HELD(&tx->tx_sync_lock)); 498 return (tx->tx_quiescing_txg != 0); 499 } 500 501 static boolean_t 502 txg_has_quiesced_to_sync(dsl_pool_t *dp) 503 { 504 tx_state_t *tx = &dp->dp_tx; 505 ASSERT(MUTEX_HELD(&tx->tx_sync_lock)); 506 return (tx->tx_quiesced_txg != 0); 507 } 508 509 static void 510 txg_sync_thread(void *arg) 511 { 512 dsl_pool_t *dp = arg; 513 spa_t *spa = dp->dp_spa; 514 tx_state_t *tx = &dp->dp_tx; 515 callb_cpr_t cpr; 516 clock_t start, delta; 517 518 (void) spl_fstrans_mark(); 519 txg_thread_enter(tx, &cpr); 520 521 start = delta = 0; 522 for (;;) { 523 clock_t timeout = zfs_txg_timeout * hz; 524 clock_t timer; 525 uint64_t txg; 526 uint64_t dirty_min_bytes = 527 zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100; 528 529 /* 530 * We sync when we're scanning, there's someone waiting 531 * on us, or the quiesce thread has handed off a txg to 532 * us, or we have reached our timeout. 533 */ 534 timer = (delta >= timeout ? 0 : timeout - delta); 535 while (!dsl_scan_active(dp->dp_scan) && 536 !tx->tx_exiting && timer > 0 && 537 tx->tx_synced_txg >= tx->tx_sync_txg_waiting && 538 !txg_has_quiesced_to_sync(dp) && 539 dp->dp_dirty_total < dirty_min_bytes) { 540 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n", 541 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); 542 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer); 543 delta = ddi_get_lbolt() - start; 544 timer = (delta > timeout ? 0 : timeout - delta); 545 } 546 547 /* 548 * Wait until the quiesce thread hands off a txg to us, 549 * prompting it to do so if necessary. 550 */ 551 while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) { 552 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1) 553 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1; 554 cv_broadcast(&tx->tx_quiesce_more_cv); 555 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0); 556 } 557 558 if (tx->tx_exiting) 559 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread); 560 561 /* 562 * Consume the quiesced txg which has been handed off to 563 * us. This may cause the quiescing thread to now be 564 * able to quiesce another txg, so we must signal it. 565 */ 566 ASSERT(tx->tx_quiesced_txg != 0); 567 txg = tx->tx_quiesced_txg; 568 tx->tx_quiesced_txg = 0; 569 tx->tx_syncing_txg = txg; 570 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg); 571 cv_broadcast(&tx->tx_quiesce_more_cv); 572 573 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 574 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); 575 mutex_exit(&tx->tx_sync_lock); 576 577 txg_stat_t *ts = spa_txg_history_init_io(spa, txg, dp); 578 start = ddi_get_lbolt(); 579 spa_sync(spa, txg); 580 delta = ddi_get_lbolt() - start; 581 spa_txg_history_fini_io(spa, ts); 582 583 mutex_enter(&tx->tx_sync_lock); 584 tx->tx_synced_txg = txg; 585 tx->tx_syncing_txg = 0; 586 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg); 587 cv_broadcast(&tx->tx_sync_done_cv); 588 589 /* 590 * Dispatch commit callbacks to worker threads. 591 */ 592 txg_dispatch_callbacks(dp, txg); 593 } 594 } 595 596 static void 597 txg_quiesce_thread(void *arg) 598 { 599 dsl_pool_t *dp = arg; 600 tx_state_t *tx = &dp->dp_tx; 601 callb_cpr_t cpr; 602 603 txg_thread_enter(tx, &cpr); 604 605 for (;;) { 606 uint64_t txg; 607 608 /* 609 * We quiesce when there's someone waiting on us. 610 * However, we can only have one txg in "quiescing" or 611 * "quiesced, waiting to sync" state. So we wait until 612 * the "quiesced, waiting to sync" txg has been consumed 613 * by the sync thread. 614 */ 615 while (!tx->tx_exiting && 616 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting || 617 txg_has_quiesced_to_sync(dp))) 618 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0); 619 620 if (tx->tx_exiting) 621 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread); 622 623 txg = tx->tx_open_txg; 624 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 625 txg, tx->tx_quiesce_txg_waiting, 626 tx->tx_sync_txg_waiting); 627 tx->tx_quiescing_txg = txg; 628 629 mutex_exit(&tx->tx_sync_lock); 630 txg_quiesce(dp, txg); 631 mutex_enter(&tx->tx_sync_lock); 632 633 /* 634 * Hand this txg off to the sync thread. 635 */ 636 dprintf("quiesce done, handing off txg %llu\n", txg); 637 tx->tx_quiescing_txg = 0; 638 tx->tx_quiesced_txg = txg; 639 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg); 640 cv_broadcast(&tx->tx_sync_more_cv); 641 cv_broadcast(&tx->tx_quiesce_done_cv); 642 } 643 } 644 645 /* 646 * Delay this thread by delay nanoseconds if we are still in the open 647 * transaction group and there is already a waiting txg quiescing or quiesced. 648 * Abort the delay if this txg stalls or enters the quiescing state. 649 */ 650 void 651 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution) 652 { 653 tx_state_t *tx = &dp->dp_tx; 654 hrtime_t start = gethrtime(); 655 656 /* don't delay if this txg could transition to quiescing immediately */ 657 if (tx->tx_open_txg > txg || 658 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1) 659 return; 660 661 mutex_enter(&tx->tx_sync_lock); 662 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) { 663 mutex_exit(&tx->tx_sync_lock); 664 return; 665 } 666 667 while (gethrtime() - start < delay && 668 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) { 669 (void) cv_timedwait_hires(&tx->tx_quiesce_more_cv, 670 &tx->tx_sync_lock, delay, resolution, 0); 671 } 672 673 DMU_TX_STAT_BUMP(dmu_tx_delay); 674 675 mutex_exit(&tx->tx_sync_lock); 676 } 677 678 static boolean_t 679 txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig) 680 { 681 tx_state_t *tx = &dp->dp_tx; 682 683 ASSERT(!dsl_pool_config_held(dp)); 684 685 mutex_enter(&tx->tx_sync_lock); 686 ASSERT3U(tx->tx_threads, ==, 2); 687 if (txg == 0) 688 txg = tx->tx_open_txg + TXG_DEFER_SIZE; 689 if (tx->tx_sync_txg_waiting < txg) 690 tx->tx_sync_txg_waiting = txg; 691 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 692 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); 693 while (tx->tx_synced_txg < txg) { 694 dprintf("broadcasting sync more " 695 "tx_synced=%llu waiting=%llu dp=%px\n", 696 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); 697 cv_broadcast(&tx->tx_sync_more_cv); 698 if (wait_sig) { 699 /* 700 * Condition wait here but stop if the thread receives a 701 * signal. The caller may call txg_wait_synced*() again 702 * to resume waiting for this txg. 703 */ 704 if (cv_wait_io_sig(&tx->tx_sync_done_cv, 705 &tx->tx_sync_lock) == 0) { 706 mutex_exit(&tx->tx_sync_lock); 707 return (B_TRUE); 708 } 709 } else { 710 cv_wait_io(&tx->tx_sync_done_cv, &tx->tx_sync_lock); 711 } 712 } 713 mutex_exit(&tx->tx_sync_lock); 714 return (B_FALSE); 715 } 716 717 void 718 txg_wait_synced(dsl_pool_t *dp, uint64_t txg) 719 { 720 VERIFY0(txg_wait_synced_impl(dp, txg, B_FALSE)); 721 } 722 723 /* 724 * Similar to a txg_wait_synced but it can be interrupted from a signal. 725 * Returns B_TRUE if the thread was signaled while waiting. 726 */ 727 boolean_t 728 txg_wait_synced_sig(dsl_pool_t *dp, uint64_t txg) 729 { 730 return (txg_wait_synced_impl(dp, txg, B_TRUE)); 731 } 732 733 /* 734 * Wait for the specified open transaction group. Set should_quiesce 735 * when the current open txg should be quiesced immediately. 736 */ 737 void 738 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce) 739 { 740 tx_state_t *tx = &dp->dp_tx; 741 742 ASSERT(!dsl_pool_config_held(dp)); 743 744 mutex_enter(&tx->tx_sync_lock); 745 ASSERT3U(tx->tx_threads, ==, 2); 746 if (txg == 0) 747 txg = tx->tx_open_txg + 1; 748 if (tx->tx_quiesce_txg_waiting < txg && should_quiesce) 749 tx->tx_quiesce_txg_waiting = txg; 750 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 751 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); 752 while (tx->tx_open_txg < txg) { 753 cv_broadcast(&tx->tx_quiesce_more_cv); 754 /* 755 * Callers setting should_quiesce will use cv_wait_io() and 756 * be accounted for as iowait time. Otherwise, the caller is 757 * understood to be idle and cv_wait_sig() is used to prevent 758 * incorrectly inflating the system load average. 759 */ 760 if (should_quiesce == B_TRUE) { 761 cv_wait_io(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock); 762 } else { 763 cv_wait_sig(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock); 764 } 765 } 766 mutex_exit(&tx->tx_sync_lock); 767 } 768 769 /* 770 * If there isn't a txg syncing or in the pipeline, push another txg through 771 * the pipeline by quiescing the open txg. 772 */ 773 void 774 txg_kick(dsl_pool_t *dp) 775 { 776 tx_state_t *tx = &dp->dp_tx; 777 778 ASSERT(!dsl_pool_config_held(dp)); 779 780 mutex_enter(&tx->tx_sync_lock); 781 if (!txg_is_syncing(dp) && 782 !txg_is_quiescing(dp) && 783 tx->tx_quiesce_txg_waiting <= tx->tx_open_txg && 784 tx->tx_sync_txg_waiting <= tx->tx_synced_txg && 785 tx->tx_quiesced_txg <= tx->tx_synced_txg) { 786 tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1; 787 cv_broadcast(&tx->tx_quiesce_more_cv); 788 } 789 mutex_exit(&tx->tx_sync_lock); 790 } 791 792 boolean_t 793 txg_stalled(dsl_pool_t *dp) 794 { 795 tx_state_t *tx = &dp->dp_tx; 796 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg); 797 } 798 799 boolean_t 800 txg_sync_waiting(dsl_pool_t *dp) 801 { 802 tx_state_t *tx = &dp->dp_tx; 803 804 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting || 805 tx->tx_quiesced_txg != 0); 806 } 807 808 /* 809 * Verify that this txg is active (open, quiescing, syncing). Non-active 810 * txg's should not be manipulated. 811 */ 812 #ifdef ZFS_DEBUG 813 void 814 txg_verify(spa_t *spa, uint64_t txg) 815 { 816 dsl_pool_t *dp __maybe_unused = spa_get_dsl(spa); 817 if (txg <= TXG_INITIAL || txg == ZILTEST_TXG) 818 return; 819 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 820 ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg); 821 ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES); 822 } 823 #endif 824 825 /* 826 * Per-txg object lists. 827 */ 828 void 829 txg_list_create(txg_list_t *tl, spa_t *spa, size_t offset) 830 { 831 int t; 832 833 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL); 834 835 tl->tl_offset = offset; 836 tl->tl_spa = spa; 837 838 for (t = 0; t < TXG_SIZE; t++) 839 tl->tl_head[t] = NULL; 840 } 841 842 static boolean_t 843 txg_list_empty_impl(txg_list_t *tl, uint64_t txg) 844 { 845 ASSERT(MUTEX_HELD(&tl->tl_lock)); 846 TXG_VERIFY(tl->tl_spa, txg); 847 return (tl->tl_head[txg & TXG_MASK] == NULL); 848 } 849 850 boolean_t 851 txg_list_empty(txg_list_t *tl, uint64_t txg) 852 { 853 mutex_enter(&tl->tl_lock); 854 boolean_t ret = txg_list_empty_impl(tl, txg); 855 mutex_exit(&tl->tl_lock); 856 857 return (ret); 858 } 859 860 void 861 txg_list_destroy(txg_list_t *tl) 862 { 863 int t; 864 865 mutex_enter(&tl->tl_lock); 866 for (t = 0; t < TXG_SIZE; t++) 867 ASSERT(txg_list_empty_impl(tl, t)); 868 mutex_exit(&tl->tl_lock); 869 870 mutex_destroy(&tl->tl_lock); 871 } 872 873 /* 874 * Returns true if all txg lists are empty. 875 * 876 * Warning: this is inherently racy (an item could be added immediately 877 * after this function returns). 878 */ 879 boolean_t 880 txg_all_lists_empty(txg_list_t *tl) 881 { 882 mutex_enter(&tl->tl_lock); 883 for (int i = 0; i < TXG_SIZE; i++) { 884 if (!txg_list_empty_impl(tl, i)) { 885 mutex_exit(&tl->tl_lock); 886 return (B_FALSE); 887 } 888 } 889 mutex_exit(&tl->tl_lock); 890 return (B_TRUE); 891 } 892 893 /* 894 * Add an entry to the list (unless it's already on the list). 895 * Returns B_TRUE if it was actually added. 896 */ 897 boolean_t 898 txg_list_add(txg_list_t *tl, void *p, uint64_t txg) 899 { 900 int t = txg & TXG_MASK; 901 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 902 boolean_t add; 903 904 TXG_VERIFY(tl->tl_spa, txg); 905 mutex_enter(&tl->tl_lock); 906 add = (tn->tn_member[t] == 0); 907 if (add) { 908 tn->tn_member[t] = 1; 909 tn->tn_next[t] = tl->tl_head[t]; 910 tl->tl_head[t] = tn; 911 } 912 mutex_exit(&tl->tl_lock); 913 914 return (add); 915 } 916 917 /* 918 * Add an entry to the end of the list, unless it's already on the list. 919 * (walks list to find end) 920 * Returns B_TRUE if it was actually added. 921 */ 922 boolean_t 923 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) 924 { 925 int t = txg & TXG_MASK; 926 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 927 boolean_t add; 928 929 TXG_VERIFY(tl->tl_spa, txg); 930 mutex_enter(&tl->tl_lock); 931 add = (tn->tn_member[t] == 0); 932 if (add) { 933 txg_node_t **tp; 934 935 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t]) 936 continue; 937 938 tn->tn_member[t] = 1; 939 tn->tn_next[t] = NULL; 940 *tp = tn; 941 } 942 mutex_exit(&tl->tl_lock); 943 944 return (add); 945 } 946 947 /* 948 * Remove the head of the list and return it. 949 */ 950 void * 951 txg_list_remove(txg_list_t *tl, uint64_t txg) 952 { 953 int t = txg & TXG_MASK; 954 txg_node_t *tn; 955 void *p = NULL; 956 957 TXG_VERIFY(tl->tl_spa, txg); 958 mutex_enter(&tl->tl_lock); 959 if ((tn = tl->tl_head[t]) != NULL) { 960 ASSERT(tn->tn_member[t]); 961 ASSERT(tn->tn_next[t] == NULL || tn->tn_next[t]->tn_member[t]); 962 p = (char *)tn - tl->tl_offset; 963 tl->tl_head[t] = tn->tn_next[t]; 964 tn->tn_next[t] = NULL; 965 tn->tn_member[t] = 0; 966 } 967 mutex_exit(&tl->tl_lock); 968 969 return (p); 970 } 971 972 /* 973 * Remove a specific item from the list and return it. 974 */ 975 void * 976 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) 977 { 978 int t = txg & TXG_MASK; 979 txg_node_t *tn, **tp; 980 981 TXG_VERIFY(tl->tl_spa, txg); 982 mutex_enter(&tl->tl_lock); 983 984 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) { 985 if ((char *)tn - tl->tl_offset == p) { 986 *tp = tn->tn_next[t]; 987 tn->tn_next[t] = NULL; 988 tn->tn_member[t] = 0; 989 mutex_exit(&tl->tl_lock); 990 return (p); 991 } 992 } 993 994 mutex_exit(&tl->tl_lock); 995 996 return (NULL); 997 } 998 999 boolean_t 1000 txg_list_member(txg_list_t *tl, void *p, uint64_t txg) 1001 { 1002 int t = txg & TXG_MASK; 1003 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 1004 1005 TXG_VERIFY(tl->tl_spa, txg); 1006 return (tn->tn_member[t] != 0); 1007 } 1008 1009 /* 1010 * Walk a txg list 1011 */ 1012 void * 1013 txg_list_head(txg_list_t *tl, uint64_t txg) 1014 { 1015 int t = txg & TXG_MASK; 1016 txg_node_t *tn; 1017 1018 mutex_enter(&tl->tl_lock); 1019 tn = tl->tl_head[t]; 1020 mutex_exit(&tl->tl_lock); 1021 1022 TXG_VERIFY(tl->tl_spa, txg); 1023 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); 1024 } 1025 1026 void * 1027 txg_list_next(txg_list_t *tl, void *p, uint64_t txg) 1028 { 1029 int t = txg & TXG_MASK; 1030 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 1031 1032 TXG_VERIFY(tl->tl_spa, txg); 1033 1034 mutex_enter(&tl->tl_lock); 1035 tn = tn->tn_next[t]; 1036 mutex_exit(&tl->tl_lock); 1037 1038 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); 1039 } 1040 1041 EXPORT_SYMBOL(txg_init); 1042 EXPORT_SYMBOL(txg_fini); 1043 EXPORT_SYMBOL(txg_sync_start); 1044 EXPORT_SYMBOL(txg_sync_stop); 1045 EXPORT_SYMBOL(txg_hold_open); 1046 EXPORT_SYMBOL(txg_rele_to_quiesce); 1047 EXPORT_SYMBOL(txg_rele_to_sync); 1048 EXPORT_SYMBOL(txg_register_callbacks); 1049 EXPORT_SYMBOL(txg_delay); 1050 EXPORT_SYMBOL(txg_wait_synced); 1051 EXPORT_SYMBOL(txg_wait_open); 1052 EXPORT_SYMBOL(txg_wait_callbacks); 1053 EXPORT_SYMBOL(txg_stalled); 1054 EXPORT_SYMBOL(txg_sync_waiting); 1055 1056 /* BEGIN CSTYLED */ 1057 ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, INT, ZMOD_RW, 1058 "Max seconds worth of delta per txg"); 1059 /* END CSTYLED */ 1060