1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Portions Copyright 2011 Martin Matuska 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/txg_impl.h> 28 #include <sys/dmu_impl.h> 29 #include <sys/dmu_tx.h> 30 #include <sys/dsl_pool.h> 31 #include <sys/dsl_scan.h> 32 #include <sys/callb.h> 33 34 /* 35 * Pool-wide transaction groups. 36 */ 37 38 static void txg_sync_thread(dsl_pool_t *dp); 39 static void txg_quiesce_thread(dsl_pool_t *dp); 40 41 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */ 42 43 /* 44 * Prepare the txg subsystem. 45 */ 46 void 47 txg_init(dsl_pool_t *dp, uint64_t txg) 48 { 49 tx_state_t *tx = &dp->dp_tx; 50 int c; 51 bzero(tx, sizeof (tx_state_t)); 52 53 tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP); 54 55 for (c = 0; c < max_ncpus; c++) { 56 int i; 57 58 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL); 59 for (i = 0; i < TXG_SIZE; i++) { 60 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT, 61 NULL); 62 list_create(&tx->tx_cpu[c].tc_callbacks[i], 63 sizeof (dmu_tx_callback_t), 64 offsetof(dmu_tx_callback_t, dcb_node)); 65 } 66 } 67 68 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL); 69 70 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL); 71 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL); 72 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL); 73 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL); 74 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL); 75 76 tx->tx_open_txg = txg; 77 } 78 79 /* 80 * Close down the txg subsystem. 81 */ 82 void 83 txg_fini(dsl_pool_t *dp) 84 { 85 tx_state_t *tx = &dp->dp_tx; 86 int c; 87 88 ASSERT(tx->tx_threads == 0); 89 90 mutex_destroy(&tx->tx_sync_lock); 91 92 cv_destroy(&tx->tx_sync_more_cv); 93 cv_destroy(&tx->tx_sync_done_cv); 94 cv_destroy(&tx->tx_quiesce_more_cv); 95 cv_destroy(&tx->tx_quiesce_done_cv); 96 cv_destroy(&tx->tx_exit_cv); 97 98 for (c = 0; c < max_ncpus; c++) { 99 int i; 100 101 mutex_destroy(&tx->tx_cpu[c].tc_lock); 102 for (i = 0; i < TXG_SIZE; i++) { 103 cv_destroy(&tx->tx_cpu[c].tc_cv[i]); 104 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]); 105 } 106 } 107 108 if (tx->tx_commit_cb_taskq != NULL) 109 taskq_destroy(tx->tx_commit_cb_taskq); 110 111 kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t)); 112 113 bzero(tx, sizeof (tx_state_t)); 114 } 115 116 /* 117 * Start syncing transaction groups. 118 */ 119 void 120 txg_sync_start(dsl_pool_t *dp) 121 { 122 tx_state_t *tx = &dp->dp_tx; 123 124 mutex_enter(&tx->tx_sync_lock); 125 126 dprintf("pool %p\n", dp); 127 128 ASSERT(tx->tx_threads == 0); 129 130 tx->tx_threads = 2; 131 132 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread, 133 dp, 0, &p0, TS_RUN, minclsyspri); 134 135 /* 136 * The sync thread can need a larger-than-default stack size on 137 * 32-bit x86. This is due in part to nested pools and 138 * scrub_visitbp() recursion. 139 */ 140 tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread, 141 dp, 0, &p0, TS_RUN, minclsyspri); 142 143 mutex_exit(&tx->tx_sync_lock); 144 } 145 146 static void 147 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr) 148 { 149 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG); 150 mutex_enter(&tx->tx_sync_lock); 151 } 152 153 static void 154 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp) 155 { 156 ASSERT(*tpp != NULL); 157 *tpp = NULL; 158 tx->tx_threads--; 159 cv_broadcast(&tx->tx_exit_cv); 160 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */ 161 thread_exit(); 162 } 163 164 static void 165 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time) 166 { 167 CALLB_CPR_SAFE_BEGIN(cpr); 168 169 if (time) 170 (void) cv_timedwait(cv, &tx->tx_sync_lock, 171 ddi_get_lbolt() + time); 172 else 173 cv_wait(cv, &tx->tx_sync_lock); 174 175 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock); 176 } 177 178 /* 179 * Stop syncing transaction groups. 180 */ 181 void 182 txg_sync_stop(dsl_pool_t *dp) 183 { 184 tx_state_t *tx = &dp->dp_tx; 185 186 dprintf("pool %p\n", dp); 187 /* 188 * Finish off any work in progress. 189 */ 190 ASSERT(tx->tx_threads == 2); 191 192 /* 193 * We need to ensure that we've vacated the deferred space_maps. 194 */ 195 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE); 196 197 /* 198 * Wake all sync threads and wait for them to die. 199 */ 200 mutex_enter(&tx->tx_sync_lock); 201 202 ASSERT(tx->tx_threads == 2); 203 204 tx->tx_exiting = 1; 205 206 cv_broadcast(&tx->tx_quiesce_more_cv); 207 cv_broadcast(&tx->tx_quiesce_done_cv); 208 cv_broadcast(&tx->tx_sync_more_cv); 209 210 while (tx->tx_threads != 0) 211 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock); 212 213 tx->tx_exiting = 0; 214 215 mutex_exit(&tx->tx_sync_lock); 216 } 217 218 uint64_t 219 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th) 220 { 221 tx_state_t *tx = &dp->dp_tx; 222 tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID]; 223 uint64_t txg; 224 225 mutex_enter(&tc->tc_lock); 226 227 txg = tx->tx_open_txg; 228 tc->tc_count[txg & TXG_MASK]++; 229 230 th->th_cpu = tc; 231 th->th_txg = txg; 232 233 return (txg); 234 } 235 236 void 237 txg_rele_to_quiesce(txg_handle_t *th) 238 { 239 tx_cpu_t *tc = th->th_cpu; 240 241 mutex_exit(&tc->tc_lock); 242 } 243 244 void 245 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks) 246 { 247 tx_cpu_t *tc = th->th_cpu; 248 int g = th->th_txg & TXG_MASK; 249 250 mutex_enter(&tc->tc_lock); 251 list_move_tail(&tc->tc_callbacks[g], tx_callbacks); 252 mutex_exit(&tc->tc_lock); 253 } 254 255 void 256 txg_rele_to_sync(txg_handle_t *th) 257 { 258 tx_cpu_t *tc = th->th_cpu; 259 int g = th->th_txg & TXG_MASK; 260 261 mutex_enter(&tc->tc_lock); 262 ASSERT(tc->tc_count[g] != 0); 263 if (--tc->tc_count[g] == 0) 264 cv_broadcast(&tc->tc_cv[g]); 265 mutex_exit(&tc->tc_lock); 266 267 th->th_cpu = NULL; /* defensive */ 268 } 269 270 static void 271 txg_quiesce(dsl_pool_t *dp, uint64_t txg) 272 { 273 tx_state_t *tx = &dp->dp_tx; 274 int g = txg & TXG_MASK; 275 int c; 276 277 /* 278 * Grab all tx_cpu locks so nobody else can get into this txg. 279 */ 280 for (c = 0; c < max_ncpus; c++) 281 mutex_enter(&tx->tx_cpu[c].tc_lock); 282 283 ASSERT(txg == tx->tx_open_txg); 284 tx->tx_open_txg++; 285 286 /* 287 * Now that we've incremented tx_open_txg, we can let threads 288 * enter the next transaction group. 289 */ 290 for (c = 0; c < max_ncpus; c++) 291 mutex_exit(&tx->tx_cpu[c].tc_lock); 292 293 /* 294 * Quiesce the transaction group by waiting for everyone to txg_exit(). 295 */ 296 for (c = 0; c < max_ncpus; c++) { 297 tx_cpu_t *tc = &tx->tx_cpu[c]; 298 mutex_enter(&tc->tc_lock); 299 while (tc->tc_count[g] != 0) 300 cv_wait(&tc->tc_cv[g], &tc->tc_lock); 301 mutex_exit(&tc->tc_lock); 302 } 303 } 304 305 static void 306 txg_do_callbacks(list_t *cb_list) 307 { 308 dmu_tx_do_callbacks(cb_list, 0); 309 310 list_destroy(cb_list); 311 312 kmem_free(cb_list, sizeof (list_t)); 313 } 314 315 /* 316 * Dispatch the commit callbacks registered on this txg to worker threads. 317 */ 318 static void 319 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) 320 { 321 int c; 322 tx_state_t *tx = &dp->dp_tx; 323 list_t *cb_list; 324 325 for (c = 0; c < max_ncpus; c++) { 326 tx_cpu_t *tc = &tx->tx_cpu[c]; 327 /* No need to lock tx_cpu_t at this point */ 328 329 int g = txg & TXG_MASK; 330 331 if (list_is_empty(&tc->tc_callbacks[g])) 332 continue; 333 334 if (tx->tx_commit_cb_taskq == NULL) { 335 /* 336 * Commit callback taskq hasn't been created yet. 337 */ 338 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb", 339 max_ncpus, minclsyspri, max_ncpus, max_ncpus * 2, 340 TASKQ_PREPOPULATE); 341 } 342 343 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP); 344 list_create(cb_list, sizeof (dmu_tx_callback_t), 345 offsetof(dmu_tx_callback_t, dcb_node)); 346 347 list_move_tail(&tc->tc_callbacks[g], cb_list); 348 349 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *) 350 txg_do_callbacks, cb_list, TQ_SLEEP); 351 } 352 } 353 354 static void 355 txg_sync_thread(dsl_pool_t *dp) 356 { 357 spa_t *spa = dp->dp_spa; 358 tx_state_t *tx = &dp->dp_tx; 359 callb_cpr_t cpr; 360 uint64_t start, delta; 361 362 txg_thread_enter(tx, &cpr); 363 364 start = delta = 0; 365 for (;;) { 366 uint64_t timer, timeout = zfs_txg_timeout * hz; 367 uint64_t txg; 368 369 /* 370 * We sync when we're scanning, there's someone waiting 371 * on us, or the quiesce thread has handed off a txg to 372 * us, or we have reached our timeout. 373 */ 374 timer = (delta >= timeout ? 0 : timeout - delta); 375 while (!dsl_scan_active(dp->dp_scan) && 376 !tx->tx_exiting && timer > 0 && 377 tx->tx_synced_txg >= tx->tx_sync_txg_waiting && 378 tx->tx_quiesced_txg == 0) { 379 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n", 380 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); 381 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer); 382 delta = ddi_get_lbolt() - start; 383 timer = (delta > timeout ? 0 : timeout - delta); 384 } 385 386 /* 387 * Wait until the quiesce thread hands off a txg to us, 388 * prompting it to do so if necessary. 389 */ 390 while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) { 391 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1) 392 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1; 393 cv_broadcast(&tx->tx_quiesce_more_cv); 394 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0); 395 } 396 397 if (tx->tx_exiting) 398 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread); 399 400 /* 401 * Consume the quiesced txg which has been handed off to 402 * us. This may cause the quiescing thread to now be 403 * able to quiesce another txg, so we must signal it. 404 */ 405 txg = tx->tx_quiesced_txg; 406 tx->tx_quiesced_txg = 0; 407 tx->tx_syncing_txg = txg; 408 cv_broadcast(&tx->tx_quiesce_more_cv); 409 410 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 411 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); 412 mutex_exit(&tx->tx_sync_lock); 413 414 start = ddi_get_lbolt(); 415 spa_sync(spa, txg); 416 delta = ddi_get_lbolt() - start; 417 418 mutex_enter(&tx->tx_sync_lock); 419 tx->tx_synced_txg = txg; 420 tx->tx_syncing_txg = 0; 421 cv_broadcast(&tx->tx_sync_done_cv); 422 423 /* 424 * Dispatch commit callbacks to worker threads. 425 */ 426 txg_dispatch_callbacks(dp, txg); 427 } 428 } 429 430 static void 431 txg_quiesce_thread(dsl_pool_t *dp) 432 { 433 tx_state_t *tx = &dp->dp_tx; 434 callb_cpr_t cpr; 435 436 txg_thread_enter(tx, &cpr); 437 438 for (;;) { 439 uint64_t txg; 440 441 /* 442 * We quiesce when there's someone waiting on us. 443 * However, we can only have one txg in "quiescing" or 444 * "quiesced, waiting to sync" state. So we wait until 445 * the "quiesced, waiting to sync" txg has been consumed 446 * by the sync thread. 447 */ 448 while (!tx->tx_exiting && 449 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting || 450 tx->tx_quiesced_txg != 0)) 451 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0); 452 453 if (tx->tx_exiting) 454 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread); 455 456 txg = tx->tx_open_txg; 457 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 458 txg, tx->tx_quiesce_txg_waiting, 459 tx->tx_sync_txg_waiting); 460 mutex_exit(&tx->tx_sync_lock); 461 txg_quiesce(dp, txg); 462 mutex_enter(&tx->tx_sync_lock); 463 464 /* 465 * Hand this txg off to the sync thread. 466 */ 467 dprintf("quiesce done, handing off txg %llu\n", txg); 468 tx->tx_quiesced_txg = txg; 469 cv_broadcast(&tx->tx_sync_more_cv); 470 cv_broadcast(&tx->tx_quiesce_done_cv); 471 } 472 } 473 474 /* 475 * Delay this thread by 'ticks' if we are still in the open transaction 476 * group and there is already a waiting txg quiesing or quiesced. Abort 477 * the delay if this txg stalls or enters the quiesing state. 478 */ 479 void 480 txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks) 481 { 482 tx_state_t *tx = &dp->dp_tx; 483 clock_t timeout = ddi_get_lbolt() + ticks; 484 485 /* don't delay if this txg could transition to quiesing immediately */ 486 if (tx->tx_open_txg > txg || 487 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1) 488 return; 489 490 mutex_enter(&tx->tx_sync_lock); 491 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) { 492 mutex_exit(&tx->tx_sync_lock); 493 return; 494 } 495 496 while (ddi_get_lbolt() < timeout && 497 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) 498 (void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock, 499 timeout); 500 501 mutex_exit(&tx->tx_sync_lock); 502 } 503 504 void 505 txg_wait_synced(dsl_pool_t *dp, uint64_t txg) 506 { 507 tx_state_t *tx = &dp->dp_tx; 508 509 mutex_enter(&tx->tx_sync_lock); 510 ASSERT(tx->tx_threads == 2); 511 if (txg == 0) 512 txg = tx->tx_open_txg + TXG_DEFER_SIZE; 513 if (tx->tx_sync_txg_waiting < txg) 514 tx->tx_sync_txg_waiting = txg; 515 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 516 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); 517 while (tx->tx_synced_txg < txg) { 518 dprintf("broadcasting sync more " 519 "tx_synced=%llu waiting=%llu dp=%p\n", 520 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp); 521 cv_broadcast(&tx->tx_sync_more_cv); 522 cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock); 523 } 524 mutex_exit(&tx->tx_sync_lock); 525 } 526 527 void 528 txg_wait_open(dsl_pool_t *dp, uint64_t txg) 529 { 530 tx_state_t *tx = &dp->dp_tx; 531 532 mutex_enter(&tx->tx_sync_lock); 533 ASSERT(tx->tx_threads == 2); 534 if (txg == 0) 535 txg = tx->tx_open_txg + 1; 536 if (tx->tx_quiesce_txg_waiting < txg) 537 tx->tx_quiesce_txg_waiting = txg; 538 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", 539 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); 540 while (tx->tx_open_txg < txg) { 541 cv_broadcast(&tx->tx_quiesce_more_cv); 542 cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock); 543 } 544 mutex_exit(&tx->tx_sync_lock); 545 } 546 547 boolean_t 548 txg_stalled(dsl_pool_t *dp) 549 { 550 tx_state_t *tx = &dp->dp_tx; 551 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg); 552 } 553 554 boolean_t 555 txg_sync_waiting(dsl_pool_t *dp) 556 { 557 tx_state_t *tx = &dp->dp_tx; 558 559 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting || 560 tx->tx_quiesced_txg != 0); 561 } 562 563 /* 564 * Per-txg object lists. 565 */ 566 void 567 txg_list_create(txg_list_t *tl, size_t offset) 568 { 569 int t; 570 571 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL); 572 573 tl->tl_offset = offset; 574 575 for (t = 0; t < TXG_SIZE; t++) 576 tl->tl_head[t] = NULL; 577 } 578 579 void 580 txg_list_destroy(txg_list_t *tl) 581 { 582 int t; 583 584 for (t = 0; t < TXG_SIZE; t++) 585 ASSERT(txg_list_empty(tl, t)); 586 587 mutex_destroy(&tl->tl_lock); 588 } 589 590 int 591 txg_list_empty(txg_list_t *tl, uint64_t txg) 592 { 593 return (tl->tl_head[txg & TXG_MASK] == NULL); 594 } 595 596 /* 597 * Add an entry to the list. 598 * Returns 0 if it's a new entry, 1 if it's already there. 599 */ 600 int 601 txg_list_add(txg_list_t *tl, void *p, uint64_t txg) 602 { 603 int t = txg & TXG_MASK; 604 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 605 int already_on_list; 606 607 mutex_enter(&tl->tl_lock); 608 already_on_list = tn->tn_member[t]; 609 if (!already_on_list) { 610 tn->tn_member[t] = 1; 611 tn->tn_next[t] = tl->tl_head[t]; 612 tl->tl_head[t] = tn; 613 } 614 mutex_exit(&tl->tl_lock); 615 616 return (already_on_list); 617 } 618 619 /* 620 * Add an entry to the end of the list (walks list to find end). 621 * Returns 0 if it's a new entry, 1 if it's already there. 622 */ 623 int 624 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) 625 { 626 int t = txg & TXG_MASK; 627 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 628 int already_on_list; 629 630 mutex_enter(&tl->tl_lock); 631 already_on_list = tn->tn_member[t]; 632 if (!already_on_list) { 633 txg_node_t **tp; 634 635 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t]) 636 continue; 637 638 tn->tn_member[t] = 1; 639 tn->tn_next[t] = NULL; 640 *tp = tn; 641 } 642 mutex_exit(&tl->tl_lock); 643 644 return (already_on_list); 645 } 646 647 /* 648 * Remove the head of the list and return it. 649 */ 650 void * 651 txg_list_remove(txg_list_t *tl, uint64_t txg) 652 { 653 int t = txg & TXG_MASK; 654 txg_node_t *tn; 655 void *p = NULL; 656 657 mutex_enter(&tl->tl_lock); 658 if ((tn = tl->tl_head[t]) != NULL) { 659 p = (char *)tn - tl->tl_offset; 660 tl->tl_head[t] = tn->tn_next[t]; 661 tn->tn_next[t] = NULL; 662 tn->tn_member[t] = 0; 663 } 664 mutex_exit(&tl->tl_lock); 665 666 return (p); 667 } 668 669 /* 670 * Remove a specific item from the list and return it. 671 */ 672 void * 673 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg) 674 { 675 int t = txg & TXG_MASK; 676 txg_node_t *tn, **tp; 677 678 mutex_enter(&tl->tl_lock); 679 680 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) { 681 if ((char *)tn - tl->tl_offset == p) { 682 *tp = tn->tn_next[t]; 683 tn->tn_next[t] = NULL; 684 tn->tn_member[t] = 0; 685 mutex_exit(&tl->tl_lock); 686 return (p); 687 } 688 } 689 690 mutex_exit(&tl->tl_lock); 691 692 return (NULL); 693 } 694 695 int 696 txg_list_member(txg_list_t *tl, void *p, uint64_t txg) 697 { 698 int t = txg & TXG_MASK; 699 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 700 701 return (tn->tn_member[t]); 702 } 703 704 /* 705 * Walk a txg list -- only safe if you know it's not changing. 706 */ 707 void * 708 txg_list_head(txg_list_t *tl, uint64_t txg) 709 { 710 int t = txg & TXG_MASK; 711 txg_node_t *tn = tl->tl_head[t]; 712 713 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); 714 } 715 716 void * 717 txg_list_next(txg_list_t *tl, void *p, uint64_t txg) 718 { 719 int t = txg & TXG_MASK; 720 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset); 721 722 tn = tn->tn_next[t]; 723 724 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset); 725 } 726