1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 26 * Copyright (c) 2024, Klara, Inc. 27 */ 28 29 #include <sys/dmu.h> 30 #include <sys/dmu_impl.h> 31 #include <sys/dbuf.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/dmu_objset.h> 34 #include <sys/dsl_dataset.h> 35 #include <sys/dsl_dir.h> 36 #include <sys/dsl_pool.h> 37 #include <sys/zap_impl.h> 38 #include <sys/spa.h> 39 #include <sys/sa.h> 40 #include <sys/sa_impl.h> 41 #include <sys/zfs_context.h> 42 #include <sys/trace_zfs.h> 43 44 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 45 uint64_t arg1, uint64_t arg2); 46 47 dmu_tx_stats_t dmu_tx_stats = { 48 { "dmu_tx_assigned", KSTAT_DATA_UINT64 }, 49 { "dmu_tx_delay", KSTAT_DATA_UINT64 }, 50 { "dmu_tx_error", KSTAT_DATA_UINT64 }, 51 { "dmu_tx_suspended", KSTAT_DATA_UINT64 }, 52 { "dmu_tx_group", KSTAT_DATA_UINT64 }, 53 { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 }, 54 { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 }, 55 { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 }, 56 { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 }, 57 { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 }, 58 { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64 }, 59 { "dmu_tx_wrlog_delay", KSTAT_DATA_UINT64 }, 60 { "dmu_tx_quota", KSTAT_DATA_UINT64 }, 61 }; 62 63 static kstat_t *dmu_tx_ksp; 64 65 dmu_tx_t * 66 dmu_tx_create_dd(dsl_dir_t *dd) 67 { 68 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 69 tx->tx_dir = dd; 70 if (dd != NULL) 71 tx->tx_pool = dd->dd_pool; 72 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 73 offsetof(dmu_tx_hold_t, txh_node)); 74 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 75 offsetof(dmu_tx_callback_t, dcb_node)); 76 tx->tx_start = gethrtime(); 77 return (tx); 78 } 79 80 dmu_tx_t * 81 dmu_tx_create(objset_t *os) 82 { 83 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 84 tx->tx_objset = os; 85 return (tx); 86 } 87 88 dmu_tx_t * 89 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 90 { 91 dmu_tx_t *tx = dmu_tx_create_dd(NULL); 92 93 TXG_VERIFY(dp->dp_spa, txg); 94 tx->tx_pool = dp; 95 tx->tx_txg = txg; 96 tx->tx_anyobj = TRUE; 97 98 return (tx); 99 } 100 101 int 102 dmu_tx_is_syncing(dmu_tx_t *tx) 103 { 104 return (tx->tx_anyobj); 105 } 106 107 int 108 dmu_tx_private_ok(dmu_tx_t *tx) 109 { 110 return (tx->tx_anyobj); 111 } 112 113 static dmu_tx_hold_t * 114 dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, 115 uint64_t arg1, uint64_t arg2) 116 { 117 dmu_tx_hold_t *txh; 118 119 if (dn != NULL) { 120 (void) zfs_refcount_add(&dn->dn_holds, tx); 121 if (tx->tx_txg != 0) { 122 mutex_enter(&dn->dn_mtx); 123 /* 124 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 125 * problem, but there's no way for it to happen (for 126 * now, at least). 127 */ 128 ASSERT(dn->dn_assigned_txg == 0); 129 dn->dn_assigned_txg = tx->tx_txg; 130 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); 131 mutex_exit(&dn->dn_mtx); 132 } 133 } 134 135 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 136 txh->txh_tx = tx; 137 txh->txh_dnode = dn; 138 zfs_refcount_create(&txh->txh_space_towrite); 139 zfs_refcount_create(&txh->txh_memory_tohold); 140 txh->txh_type = type; 141 txh->txh_arg1 = arg1; 142 txh->txh_arg2 = arg2; 143 list_insert_tail(&tx->tx_holds, txh); 144 145 return (txh); 146 } 147 148 static dmu_tx_hold_t * 149 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 150 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 151 { 152 dnode_t *dn = NULL; 153 dmu_tx_hold_t *txh; 154 int err; 155 156 if (object != DMU_NEW_OBJECT) { 157 err = dnode_hold(os, object, FTAG, &dn); 158 if (err != 0) { 159 tx->tx_err = err; 160 return (NULL); 161 } 162 } 163 txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); 164 if (dn != NULL) 165 dnode_rele(dn, FTAG); 166 return (txh); 167 } 168 169 void 170 dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) 171 { 172 /* 173 * If we're syncing, they can manipulate any object anyhow, and 174 * the hold on the dnode_t can cause problems. 175 */ 176 if (!dmu_tx_is_syncing(tx)) 177 (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); 178 } 179 180 /* 181 * This function reads specified data from disk. The specified data will 182 * be needed to perform the transaction -- i.e, it will be read after 183 * we do dmu_tx_assign(). There are two reasons that we read the data now 184 * (before dmu_tx_assign()): 185 * 186 * 1. Reading it now has potentially better performance. The transaction 187 * has not yet been assigned, so the TXG is not held open, and also the 188 * caller typically has less locks held when calling dmu_tx_hold_*() than 189 * after the transaction has been assigned. This reduces the lock (and txg) 190 * hold times, thus reducing lock contention. 191 * 192 * 2. It is easier for callers (primarily the ZPL) to handle i/o errors 193 * that are detected before they start making changes to the DMU state 194 * (i.e. now). Once the transaction has been assigned, and some DMU 195 * state has been changed, it can be difficult to recover from an i/o 196 * error (e.g. to undo the changes already made in memory at the DMU 197 * layer). Typically code to do so does not exist in the caller -- it 198 * assumes that the data has already been cached and thus i/o errors are 199 * not possible. 200 * 201 * It has been observed that the i/o initiated here can be a performance 202 * problem, and it appears to be optional, because we don't look at the 203 * data which is read. However, removing this read would only serve to 204 * move the work elsewhere (after the dmu_tx_assign()), where it may 205 * have a greater impact on performance (in addition to the impact on 206 * fault tolerance noted above). 207 */ 208 static int 209 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 210 { 211 int err; 212 dmu_buf_impl_t *db; 213 214 rw_enter(&dn->dn_struct_rwlock, RW_READER); 215 err = dbuf_hold_impl(dn, level, blkid, TRUE, FALSE, FTAG, &db); 216 rw_exit(&dn->dn_struct_rwlock); 217 if (err == ENOENT) 218 return (0); 219 if (err != 0) 220 return (err); 221 /* 222 * PARTIAL_FIRST allows caching for uncacheable blocks. It will 223 * be cleared after dmu_buf_will_dirty() call dbuf_read() again. 224 */ 225 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH | 226 (level == 0 ? DB_RF_PARTIAL_FIRST : 0)); 227 dbuf_rele(db, FTAG); 228 return (err); 229 } 230 231 static void 232 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 233 { 234 dnode_t *dn = txh->txh_dnode; 235 int err = 0; 236 237 if (len == 0) 238 return; 239 240 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG); 241 242 if (dn == NULL) 243 return; 244 245 /* 246 * For i/o error checking, read the blocks that will be needed 247 * to perform the write: the first and last level-0 blocks (if 248 * they are not aligned, i.e. if they are partial-block writes), 249 * and all the level-1 blocks. 250 */ 251 if (dn->dn_maxblkid == 0) { 252 if (off < dn->dn_datablksz && 253 (off > 0 || len < dn->dn_datablksz)) { 254 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 255 if (err != 0) { 256 txh->txh_tx->tx_err = err; 257 } 258 } 259 } else { 260 zio_t *zio = zio_root(dn->dn_objset->os_spa, 261 NULL, NULL, ZIO_FLAG_CANFAIL); 262 263 /* first level-0 block */ 264 uint64_t start = off >> dn->dn_datablkshift; 265 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { 266 err = dmu_tx_check_ioerr(zio, dn, 0, start); 267 if (err != 0) { 268 txh->txh_tx->tx_err = err; 269 } 270 } 271 272 /* last level-0 block */ 273 uint64_t end = (off + len - 1) >> dn->dn_datablkshift; 274 if (end != start && end <= dn->dn_maxblkid && 275 P2PHASE(off + len, dn->dn_datablksz)) { 276 err = dmu_tx_check_ioerr(zio, dn, 0, end); 277 if (err != 0) { 278 txh->txh_tx->tx_err = err; 279 } 280 } 281 282 /* level-1 blocks */ 283 if (dn->dn_nlevels > 1) { 284 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 285 for (uint64_t i = (start >> shft) + 1; 286 i < end >> shft; i++) { 287 err = dmu_tx_check_ioerr(zio, dn, 1, i); 288 if (err != 0) { 289 txh->txh_tx->tx_err = err; 290 } 291 } 292 } 293 294 err = zio_wait(zio); 295 if (err != 0) { 296 txh->txh_tx->tx_err = err; 297 } 298 } 299 } 300 301 static void 302 dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 303 { 304 dnode_t *dn = txh->txh_dnode; 305 int err = 0; 306 307 if (len == 0) 308 return; 309 310 (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG); 311 312 if (dn == NULL) 313 return; 314 315 /* 316 * For i/o error checking, read the blocks that will be needed 317 * to perform the append; first level-0 block (if not aligned, i.e. 318 * if they are partial-block writes), no additional blocks are read. 319 */ 320 if (dn->dn_maxblkid == 0) { 321 if (off < dn->dn_datablksz && 322 (off > 0 || len < dn->dn_datablksz)) { 323 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 324 if (err != 0) { 325 txh->txh_tx->tx_err = err; 326 } 327 } 328 } else { 329 zio_t *zio = zio_root(dn->dn_objset->os_spa, 330 NULL, NULL, ZIO_FLAG_CANFAIL); 331 332 /* first level-0 block */ 333 uint64_t start = off >> dn->dn_datablkshift; 334 if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { 335 err = dmu_tx_check_ioerr(zio, dn, 0, start); 336 if (err != 0) { 337 txh->txh_tx->tx_err = err; 338 } 339 } 340 341 err = zio_wait(zio); 342 if (err != 0) { 343 txh->txh_tx->tx_err = err; 344 } 345 } 346 } 347 348 static void 349 dmu_tx_count_dnode(dmu_tx_hold_t *txh) 350 { 351 (void) zfs_refcount_add_many(&txh->txh_space_towrite, 352 DNODE_MIN_SIZE, FTAG); 353 } 354 355 void 356 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 357 { 358 dmu_tx_hold_t *txh; 359 360 ASSERT0(tx->tx_txg); 361 ASSERT3U(len, <=, DMU_MAX_ACCESS); 362 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 363 364 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 365 object, THT_WRITE, off, len); 366 if (txh != NULL) { 367 dmu_tx_count_write(txh, off, len); 368 dmu_tx_count_dnode(txh); 369 } 370 } 371 372 void 373 dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 374 { 375 dmu_tx_hold_t *txh; 376 377 ASSERT0(tx->tx_txg); 378 ASSERT3U(len, <=, DMU_MAX_ACCESS); 379 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 380 381 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); 382 if (txh != NULL) { 383 dmu_tx_count_write(txh, off, len); 384 dmu_tx_count_dnode(txh); 385 } 386 } 387 388 /* 389 * Should be used when appending to an object and the exact offset is unknown. 390 * The write must occur at or beyond the specified offset. Only the L0 block 391 * at provided offset will be prefetched. 392 */ 393 void 394 dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 395 { 396 dmu_tx_hold_t *txh; 397 398 ASSERT0(tx->tx_txg); 399 ASSERT3U(len, <=, DMU_MAX_ACCESS); 400 401 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 402 object, THT_APPEND, off, DMU_OBJECT_END); 403 if (txh != NULL) { 404 dmu_tx_count_append(txh, off, len); 405 dmu_tx_count_dnode(txh); 406 } 407 } 408 409 void 410 dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 411 { 412 dmu_tx_hold_t *txh; 413 414 ASSERT0(tx->tx_txg); 415 ASSERT3U(len, <=, DMU_MAX_ACCESS); 416 417 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END); 418 if (txh != NULL) { 419 dmu_tx_count_append(txh, off, len); 420 dmu_tx_count_dnode(txh); 421 } 422 } 423 424 /* 425 * This function marks the transaction as being a "net free". The end 426 * result is that refquotas will be disabled for this transaction, and 427 * this transaction will be able to use half of the pool space overhead 428 * (see dsl_pool_adjustedsize()). Therefore this function should only 429 * be called for transactions that we expect will not cause a net increase 430 * in the amount of space used (but it's OK if that is occasionally not true). 431 */ 432 void 433 dmu_tx_mark_netfree(dmu_tx_t *tx) 434 { 435 tx->tx_netfree = B_TRUE; 436 } 437 438 static void 439 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 440 { 441 dmu_tx_t *tx = txh->txh_tx; 442 dnode_t *dn = txh->txh_dnode; 443 int err; 444 445 ASSERT(tx->tx_txg == 0); 446 447 if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz) 448 return; 449 if (len == DMU_OBJECT_END) 450 len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off; 451 452 /* 453 * For i/o error checking, we read the first and last level-0 454 * blocks if they are not aligned, and all the level-1 blocks. 455 * 456 * Note: dbuf_free_range() assumes that we have not instantiated 457 * any level-0 dbufs that will be completely freed. Therefore we must 458 * exercise care to not read or count the first and last blocks 459 * if they are blocksize-aligned. 460 */ 461 if (dn->dn_datablkshift == 0) { 462 if (off != 0 || len < dn->dn_datablksz) 463 dmu_tx_count_write(txh, 0, dn->dn_datablksz); 464 } else { 465 /* first block will be modified if it is not aligned */ 466 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 467 dmu_tx_count_write(txh, off, 1); 468 /* last block will be modified if it is not aligned */ 469 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 470 dmu_tx_count_write(txh, off + len, 1); 471 } 472 473 /* 474 * Check level-1 blocks. 475 */ 476 if (dn->dn_nlevels > 1) { 477 int shift = dn->dn_datablkshift + dn->dn_indblkshift - 478 SPA_BLKPTRSHIFT; 479 uint64_t start = off >> shift; 480 uint64_t end = (off + len) >> shift; 481 482 ASSERT(dn->dn_indblkshift != 0); 483 484 /* 485 * dnode_reallocate() can result in an object with indirect 486 * blocks having an odd data block size. In this case, 487 * just check the single block. 488 */ 489 if (dn->dn_datablkshift == 0) 490 start = end = 0; 491 492 zio_t *zio = zio_root(tx->tx_pool->dp_spa, 493 NULL, NULL, ZIO_FLAG_CANFAIL); 494 for (uint64_t i = start; i <= end; i++) { 495 uint64_t ibyte = i << shift; 496 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 497 i = ibyte >> shift; 498 if (err == ESRCH || i > end) 499 break; 500 if (err != 0) { 501 tx->tx_err = err; 502 (void) zio_wait(zio); 503 return; 504 } 505 506 (void) zfs_refcount_add_many(&txh->txh_memory_tohold, 507 1 << dn->dn_indblkshift, FTAG); 508 509 err = dmu_tx_check_ioerr(zio, dn, 1, i); 510 if (err != 0) { 511 tx->tx_err = err; 512 (void) zio_wait(zio); 513 return; 514 } 515 } 516 err = zio_wait(zio); 517 if (err != 0) { 518 tx->tx_err = err; 519 return; 520 } 521 } 522 } 523 524 void 525 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 526 { 527 dmu_tx_hold_t *txh; 528 529 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 530 object, THT_FREE, off, len); 531 if (txh != NULL) { 532 dmu_tx_count_dnode(txh); 533 dmu_tx_count_free(txh, off, len); 534 } 535 } 536 537 void 538 dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) 539 { 540 dmu_tx_hold_t *txh; 541 542 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); 543 if (txh != NULL) { 544 dmu_tx_count_dnode(txh); 545 dmu_tx_count_free(txh, off, len); 546 } 547 } 548 549 static void 550 dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 551 { 552 553 /* 554 * Reuse dmu_tx_count_free(), it does exactly what we need for clone. 555 */ 556 dmu_tx_count_free(txh, off, len); 557 } 558 559 void 560 dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 561 { 562 dmu_tx_hold_t *txh; 563 564 ASSERT0(tx->tx_txg); 565 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 566 567 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len); 568 if (txh != NULL) { 569 dmu_tx_count_dnode(txh); 570 dmu_tx_count_clone(txh, off, len); 571 } 572 } 573 574 static void 575 dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name) 576 { 577 dmu_tx_t *tx = txh->txh_tx; 578 dnode_t *dn = txh->txh_dnode; 579 int err; 580 581 ASSERT(tx->tx_txg == 0); 582 583 dmu_tx_count_dnode(txh); 584 585 /* 586 * Modifying a almost-full microzap is around the worst case (128KB) 587 * 588 * If it is a fat zap, the worst case would be 7*16KB=112KB: 589 * - 3 blocks overwritten: target leaf, ptrtbl block, header block 590 * - 4 new blocks written if adding: 591 * - 2 blocks for possibly split leaves, 592 * - 2 grown ptrtbl blocks 593 */ 594 (void) zfs_refcount_add_many(&txh->txh_space_towrite, 595 zap_get_micro_max_size(tx->tx_pool->dp_spa), FTAG); 596 597 if (dn == NULL) 598 return; 599 600 ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 601 602 if (dn->dn_maxblkid == 0 || name == NULL) { 603 /* 604 * This is a microzap (only one block), or we don't know 605 * the name. Check the first block for i/o errors. 606 */ 607 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 608 if (err != 0) { 609 tx->tx_err = err; 610 } 611 } else { 612 /* 613 * Access the name so that we'll check for i/o errors to 614 * the leaf blocks, etc. We ignore ENOENT, as this name 615 * may not yet exist. 616 */ 617 err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); 618 if (err == EIO || err == ECKSUM || err == ENXIO) { 619 tx->tx_err = err; 620 } 621 } 622 } 623 624 void 625 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 626 { 627 dmu_tx_hold_t *txh; 628 629 ASSERT0(tx->tx_txg); 630 631 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 632 object, THT_ZAP, add, (uintptr_t)name); 633 if (txh != NULL) 634 dmu_tx_hold_zap_impl(txh, name); 635 } 636 637 void 638 dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) 639 { 640 dmu_tx_hold_t *txh; 641 642 ASSERT0(tx->tx_txg); 643 ASSERT(dn != NULL); 644 645 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); 646 if (txh != NULL) 647 dmu_tx_hold_zap_impl(txh, name); 648 } 649 650 void 651 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 652 { 653 dmu_tx_hold_t *txh; 654 655 ASSERT(tx->tx_txg == 0); 656 657 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 658 object, THT_BONUS, 0, 0); 659 if (txh) 660 dmu_tx_count_dnode(txh); 661 } 662 663 void 664 dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) 665 { 666 dmu_tx_hold_t *txh; 667 668 ASSERT0(tx->tx_txg); 669 670 txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); 671 if (txh) 672 dmu_tx_count_dnode(txh); 673 } 674 675 void 676 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 677 { 678 dmu_tx_hold_t *txh; 679 680 ASSERT(tx->tx_txg == 0); 681 682 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 683 DMU_NEW_OBJECT, THT_SPACE, space, 0); 684 if (txh) { 685 (void) zfs_refcount_add_many( 686 &txh->txh_space_towrite, space, FTAG); 687 } 688 } 689 690 #ifdef ZFS_DEBUG 691 void 692 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 693 { 694 boolean_t match_object = B_FALSE; 695 boolean_t match_offset = B_FALSE; 696 697 DB_DNODE_ENTER(db); 698 dnode_t *dn = DB_DNODE(db); 699 ASSERT(tx->tx_txg != 0); 700 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 701 ASSERT3U(dn->dn_object, ==, db->db.db_object); 702 703 if (tx->tx_anyobj) { 704 DB_DNODE_EXIT(db); 705 return; 706 } 707 708 /* XXX No checking on the meta dnode for now */ 709 if (db->db.db_object == DMU_META_DNODE_OBJECT) { 710 DB_DNODE_EXIT(db); 711 return; 712 } 713 714 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 715 txh = list_next(&tx->tx_holds, txh)) { 716 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 717 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 718 match_object = TRUE; 719 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 720 int datablkshift = dn->dn_datablkshift ? 721 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 722 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 723 int shift = datablkshift + epbs * db->db_level; 724 uint64_t beginblk = shift >= 64 ? 0 : 725 (txh->txh_arg1 >> shift); 726 uint64_t endblk = shift >= 64 ? 0 : 727 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 728 uint64_t blkid = db->db_blkid; 729 730 /* XXX txh_arg2 better not be zero... */ 731 732 dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 733 txh->txh_type, (u_longlong_t)beginblk, 734 (u_longlong_t)endblk); 735 736 switch (txh->txh_type) { 737 case THT_WRITE: 738 if (blkid >= beginblk && blkid <= endblk) 739 match_offset = TRUE; 740 /* 741 * We will let this hold work for the bonus 742 * or spill buffer so that we don't need to 743 * hold it when creating a new object. 744 */ 745 if (blkid == DMU_BONUS_BLKID || 746 blkid == DMU_SPILL_BLKID) 747 match_offset = TRUE; 748 /* 749 * They might have to increase nlevels, 750 * thus dirtying the new TLIBs. Or the 751 * might have to change the block size, 752 * thus dirying the new lvl=0 blk=0. 753 */ 754 if (blkid == 0) 755 match_offset = TRUE; 756 break; 757 case THT_APPEND: 758 if (blkid >= beginblk && (blkid <= endblk || 759 txh->txh_arg2 == DMU_OBJECT_END)) 760 match_offset = TRUE; 761 762 /* 763 * THT_WRITE used for bonus and spill blocks. 764 */ 765 ASSERT(blkid != DMU_BONUS_BLKID && 766 blkid != DMU_SPILL_BLKID); 767 768 /* 769 * They might have to increase nlevels, 770 * thus dirtying the new TLIBs. Or the 771 * might have to change the block size, 772 * thus dirying the new lvl=0 blk=0. 773 */ 774 if (blkid == 0) 775 match_offset = TRUE; 776 break; 777 case THT_FREE: 778 /* 779 * We will dirty all the level 1 blocks in 780 * the free range and perhaps the first and 781 * last level 0 block. 782 */ 783 if (blkid >= beginblk && (blkid <= endblk || 784 txh->txh_arg2 == DMU_OBJECT_END)) 785 match_offset = TRUE; 786 break; 787 case THT_SPILL: 788 if (blkid == DMU_SPILL_BLKID) 789 match_offset = TRUE; 790 break; 791 case THT_BONUS: 792 if (blkid == DMU_BONUS_BLKID) 793 match_offset = TRUE; 794 break; 795 case THT_ZAP: 796 match_offset = TRUE; 797 break; 798 case THT_NEWOBJECT: 799 match_object = TRUE; 800 break; 801 case THT_CLONE: 802 if (blkid >= beginblk && blkid <= endblk) 803 match_offset = TRUE; 804 /* 805 * They might have to increase nlevels, 806 * thus dirtying the new TLIBs. Or the 807 * might have to change the block size, 808 * thus dirying the new lvl=0 blk=0. 809 */ 810 if (blkid == 0) 811 match_offset = TRUE; 812 break; 813 default: 814 cmn_err(CE_PANIC, "bad txh_type %d", 815 txh->txh_type); 816 } 817 } 818 if (match_object && match_offset) { 819 DB_DNODE_EXIT(db); 820 return; 821 } 822 } 823 DB_DNODE_EXIT(db); 824 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 825 (u_longlong_t)db->db.db_object, db->db_level, 826 (u_longlong_t)db->db_blkid); 827 } 828 #endif 829 830 /* 831 * If we can't do 10 iops, something is wrong. Let us go ahead 832 * and hit zfs_dirty_data_max. 833 */ 834 static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */ 835 836 /* 837 * We delay transactions when we've determined that the backend storage 838 * isn't able to accommodate the rate of incoming writes. 839 * 840 * If there is already a transaction waiting, we delay relative to when 841 * that transaction finishes waiting. This way the calculated min_time 842 * is independent of the number of threads concurrently executing 843 * transactions. 844 * 845 * If we are the only waiter, wait relative to when the transaction 846 * started, rather than the current time. This credits the transaction for 847 * "time already served", e.g. reading indirect blocks. 848 * 849 * The minimum time for a transaction to take is calculated as: 850 * min_time = scale * (dirty - min) / (max - dirty) 851 * min_time is then capped at zfs_delay_max_ns. 852 * 853 * The delay has two degrees of freedom that can be adjusted via tunables. 854 * The percentage of dirty data at which we start to delay is defined by 855 * zfs_delay_min_dirty_percent. This should typically be at or above 856 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 857 * delay after writing at full speed has failed to keep up with the incoming 858 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 859 * speaking, this variable determines the amount of delay at the midpoint of 860 * the curve. 861 * 862 * delay 863 * 10ms +-------------------------------------------------------------*+ 864 * | *| 865 * 9ms + *+ 866 * | *| 867 * 8ms + *+ 868 * | * | 869 * 7ms + * + 870 * | * | 871 * 6ms + * + 872 * | * | 873 * 5ms + * + 874 * | * | 875 * 4ms + * + 876 * | * | 877 * 3ms + * + 878 * | * | 879 * 2ms + (midpoint) * + 880 * | | ** | 881 * 1ms + v *** + 882 * | zfs_delay_scale ----------> ******** | 883 * 0 +-------------------------------------*********----------------+ 884 * 0% <- zfs_dirty_data_max -> 100% 885 * 886 * Note that since the delay is added to the outstanding time remaining on the 887 * most recent transaction, the delay is effectively the inverse of IOPS. 888 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 889 * was chosen such that small changes in the amount of accumulated dirty data 890 * in the first 3/4 of the curve yield relatively small differences in the 891 * amount of delay. 892 * 893 * The effects can be easier to understand when the amount of delay is 894 * represented on a log scale: 895 * 896 * delay 897 * 100ms +-------------------------------------------------------------++ 898 * + + 899 * | | 900 * + *+ 901 * 10ms + *+ 902 * + ** + 903 * | (midpoint) ** | 904 * + | ** + 905 * 1ms + v **** + 906 * + zfs_delay_scale ----------> ***** + 907 * | **** | 908 * + **** + 909 * 100us + ** + 910 * + * + 911 * | * | 912 * + * + 913 * 10us + * + 914 * + + 915 * | | 916 * + + 917 * +--------------------------------------------------------------+ 918 * 0% <- zfs_dirty_data_max -> 100% 919 * 920 * Note here that only as the amount of dirty data approaches its limit does 921 * the delay start to increase rapidly. The goal of a properly tuned system 922 * should be to keep the amount of dirty data out of that range by first 923 * ensuring that the appropriate limits are set for the I/O scheduler to reach 924 * optimal throughput on the backend storage, and then by changing the value 925 * of zfs_delay_scale to increase the steepness of the curve. 926 */ 927 static void 928 dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 929 { 930 dsl_pool_t *dp = tx->tx_pool; 931 uint64_t delay_min_bytes, wrlog; 932 hrtime_t wakeup, tx_time = 0, now; 933 934 /* Calculate minimum transaction time for the dirty data amount. */ 935 delay_min_bytes = 936 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 937 if (dirty > delay_min_bytes) { 938 /* 939 * The caller has already waited until we are under the max. 940 * We make them pass us the amount of dirty data so we don't 941 * have to handle the case of it being >= the max, which 942 * could cause a divide-by-zero if it's == the max. 943 */ 944 ASSERT3U(dirty, <, zfs_dirty_data_max); 945 946 tx_time = zfs_delay_scale * (dirty - delay_min_bytes) / 947 (zfs_dirty_data_max - dirty); 948 } 949 950 /* Calculate minimum transaction time for the TX_WRITE log size. */ 951 wrlog = aggsum_upper_bound(&dp->dp_wrlog_total); 952 delay_min_bytes = 953 zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100; 954 if (wrlog >= zfs_wrlog_data_max) { 955 tx_time = zfs_delay_max_ns; 956 } else if (wrlog > delay_min_bytes) { 957 tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) / 958 (zfs_wrlog_data_max - wrlog), tx_time); 959 } 960 961 if (tx_time == 0) 962 return; 963 964 tx_time = MIN(tx_time, zfs_delay_max_ns); 965 now = gethrtime(); 966 if (now > tx->tx_start + tx_time) 967 return; 968 969 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 970 uint64_t, tx_time); 971 972 mutex_enter(&dp->dp_lock); 973 wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time); 974 dp->dp_last_wakeup = wakeup; 975 mutex_exit(&dp->dp_lock); 976 977 zfs_sleep_until(wakeup); 978 } 979 980 /* 981 * This routine attempts to assign the transaction to a transaction group. 982 * To do so, we must determine if there is sufficient free space on disk. 983 * 984 * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree() 985 * on it), then it is assumed that there is sufficient free space, 986 * unless there's insufficient slop space in the pool (see the comment 987 * above spa_slop_shift in spa_misc.c). 988 * 989 * If it is not a "netfree" transaction, then if the data already on disk 990 * is over the allowed usage (e.g. quota), this will fail with EDQUOT or 991 * ENOSPC. Otherwise, if the current rough estimate of pending changes, 992 * plus the rough estimate of this transaction's changes, may exceed the 993 * allowed usage, then this will fail with ERESTART, which will cause the 994 * caller to wait for the pending changes to be written to disk (by waiting 995 * for the next TXG to open), and then check the space usage again. 996 * 997 * The rough estimate of pending changes is comprised of the sum of: 998 * 999 * - this transaction's holds' txh_space_towrite 1000 * 1001 * - dd_tempreserved[], which is the sum of in-flight transactions' 1002 * holds' txh_space_towrite (i.e. those transactions that have called 1003 * dmu_tx_assign() but not yet called dmu_tx_commit()). 1004 * 1005 * - dd_space_towrite[], which is the amount of dirtied dbufs. 1006 * 1007 * Note that all of these values are inflated by spa_get_worst_case_asize(), 1008 * which means that we may get ERESTART well before we are actually in danger 1009 * of running out of space, but this also mitigates any small inaccuracies 1010 * in the rough estimate (e.g. txh_space_towrite doesn't take into account 1011 * indirect blocks, and dd_space_towrite[] doesn't take into account changes 1012 * to the MOS). 1013 * 1014 * Note that due to this algorithm, it is possible to exceed the allowed 1015 * usage by one transaction. Also, as we approach the allowed usage, 1016 * we will allow a very limited amount of changes into each TXG, thus 1017 * decreasing performance. 1018 */ 1019 static int 1020 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t flags) 1021 { 1022 spa_t *spa = tx->tx_pool->dp_spa; 1023 1024 ASSERT0(tx->tx_txg); 1025 1026 if (tx->tx_err) { 1027 DMU_TX_STAT_BUMP(dmu_tx_error); 1028 return (tx->tx_err); 1029 } 1030 1031 if (spa_suspended(spa)) { 1032 DMU_TX_STAT_BUMP(dmu_tx_suspended); 1033 1034 /* 1035 * If the user has indicated a blocking failure mode 1036 * then return ERESTART which will block in dmu_tx_wait(). 1037 * Otherwise, return EIO so that an error can get 1038 * propagated back to the VOP calls. 1039 * 1040 * Note that we always honor the `flags` flag regardless 1041 * of the failuremode setting. 1042 */ 1043 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1044 !(flags & DMU_TX_WAIT)) 1045 return (SET_ERROR(EIO)); 1046 1047 return (SET_ERROR(ERESTART)); 1048 } 1049 1050 if (!tx->tx_dirty_delayed && 1051 dsl_pool_need_wrlog_delay(tx->tx_pool)) { 1052 tx->tx_wait_dirty = B_TRUE; 1053 DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay); 1054 return (SET_ERROR(ERESTART)); 1055 } 1056 1057 if (!tx->tx_dirty_delayed && 1058 dsl_pool_need_dirty_delay(tx->tx_pool)) { 1059 tx->tx_wait_dirty = B_TRUE; 1060 DMU_TX_STAT_BUMP(dmu_tx_dirty_delay); 1061 return (SET_ERROR(ERESTART)); 1062 } 1063 1064 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1065 tx->tx_needassign_txh = NULL; 1066 1067 /* 1068 * NB: No error returns are allowed after txg_hold_open, but 1069 * before processing the dnode holds, due to the 1070 * dmu_tx_unassign() logic. 1071 */ 1072 1073 uint64_t towrite = 0; 1074 uint64_t tohold = 0; 1075 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 1076 txh = list_next(&tx->tx_holds, txh)) { 1077 dnode_t *dn = txh->txh_dnode; 1078 if (dn != NULL) { 1079 /* 1080 * This thread can't hold the dn_struct_rwlock 1081 * while assigning the tx, because this can lead to 1082 * deadlock. Specifically, if this dnode is already 1083 * assigned to an earlier txg, this thread may need 1084 * to wait for that txg to sync (the ERESTART case 1085 * below). The other thread that has assigned this 1086 * dnode to an earlier txg prevents this txg from 1087 * syncing until its tx can complete (calling 1088 * dmu_tx_commit()), but it may need to acquire the 1089 * dn_struct_rwlock to do so (e.g. via 1090 * dmu_buf_hold*()). 1091 * 1092 * Note that this thread can't hold the lock for 1093 * read either, but the rwlock doesn't record 1094 * enough information to make that assertion. 1095 */ 1096 ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1097 1098 mutex_enter(&dn->dn_mtx); 1099 if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1100 mutex_exit(&dn->dn_mtx); 1101 tx->tx_needassign_txh = txh; 1102 DMU_TX_STAT_BUMP(dmu_tx_group); 1103 return (SET_ERROR(ERESTART)); 1104 } 1105 if (dn->dn_assigned_txg == 0) 1106 dn->dn_assigned_txg = tx->tx_txg; 1107 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1108 (void) zfs_refcount_add(&dn->dn_tx_holds, tx); 1109 mutex_exit(&dn->dn_mtx); 1110 } 1111 towrite += zfs_refcount_count(&txh->txh_space_towrite); 1112 tohold += zfs_refcount_count(&txh->txh_memory_tohold); 1113 } 1114 1115 /* needed allocation: worst-case estimate of write space */ 1116 uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite); 1117 /* calculate memory footprint estimate */ 1118 uint64_t memory = towrite + tohold; 1119 1120 if (tx->tx_dir != NULL && asize != 0) { 1121 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1122 asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx); 1123 if (err != 0) 1124 return (err); 1125 } 1126 1127 DMU_TX_STAT_BUMP(dmu_tx_assigned); 1128 1129 return (0); 1130 } 1131 1132 static void 1133 dmu_tx_unassign(dmu_tx_t *tx) 1134 { 1135 if (tx->tx_txg == 0) 1136 return; 1137 1138 txg_rele_to_quiesce(&tx->tx_txgh); 1139 1140 /* 1141 * Walk the transaction's hold list, removing the hold on the 1142 * associated dnode, and notifying waiters if the refcount drops to 0. 1143 */ 1144 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); 1145 txh && txh != tx->tx_needassign_txh; 1146 txh = list_next(&tx->tx_holds, txh)) { 1147 dnode_t *dn = txh->txh_dnode; 1148 1149 if (dn == NULL) 1150 continue; 1151 mutex_enter(&dn->dn_mtx); 1152 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1153 1154 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1155 dn->dn_assigned_txg = 0; 1156 cv_broadcast(&dn->dn_notxholds); 1157 } 1158 mutex_exit(&dn->dn_mtx); 1159 } 1160 1161 txg_rele_to_sync(&tx->tx_txgh); 1162 1163 tx->tx_lasttried_txg = tx->tx_txg; 1164 tx->tx_txg = 0; 1165 } 1166 1167 /* 1168 * Assign tx to a transaction group; `flags` is a bitmask: 1169 * 1170 * If DMU_TX_WAIT is set and the currently open txg is full, this function 1171 * will wait until there's a new txg. This should be used when no locks 1172 * are being held. With this bit set, this function will only fail if 1173 * we're truly out of space (or over quota). 1174 * 1175 * If DMU_TX_WAIT is *not* set and we can't assign into the currently open 1176 * txg without blocking, this function will return immediately with 1177 * ERESTART. This should be used whenever locks are being held. On an 1178 * ERESTART error, the caller should drop all locks, call dmu_tx_wait(), 1179 * and try again. 1180 * 1181 * If DMU_TX_NOTHROTTLE is set, this indicates that this tx should not be 1182 * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for 1183 * details on the throttle). This is used by the VFS operations, after 1184 * they have already called dmu_tx_wait() (though most likely on a 1185 * different tx). 1186 * 1187 * It is guaranteed that subsequent successful calls to dmu_tx_assign() 1188 * will assign the tx to monotonically increasing txgs. Of course this is 1189 * not strong monotonicity, because the same txg can be returned multiple 1190 * times in a row. This guarantee holds both for subsequent calls from 1191 * one thread and for multiple threads. For example, it is impossible to 1192 * observe the following sequence of events: 1193 * 1194 * Thread 1 Thread 2 1195 * 1196 * dmu_tx_assign(T1, ...) 1197 * 1 <- dmu_tx_get_txg(T1) 1198 * dmu_tx_assign(T2, ...) 1199 * 2 <- dmu_tx_get_txg(T2) 1200 * dmu_tx_assign(T3, ...) 1201 * 1 <- dmu_tx_get_txg(T3) 1202 */ 1203 int 1204 dmu_tx_assign(dmu_tx_t *tx, uint64_t flags) 1205 { 1206 int err; 1207 1208 ASSERT(tx->tx_txg == 0); 1209 ASSERT0(flags & ~(DMU_TX_WAIT | DMU_TX_NOTHROTTLE)); 1210 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1211 1212 /* If we might wait, we must not hold the config lock. */ 1213 IMPLY((flags & DMU_TX_WAIT), !dsl_pool_config_held(tx->tx_pool)); 1214 1215 if ((flags & DMU_TX_NOTHROTTLE)) 1216 tx->tx_dirty_delayed = B_TRUE; 1217 1218 while ((err = dmu_tx_try_assign(tx, flags)) != 0) { 1219 dmu_tx_unassign(tx); 1220 1221 if (err != ERESTART || !(flags & DMU_TX_WAIT)) 1222 return (err); 1223 1224 dmu_tx_wait(tx); 1225 } 1226 1227 txg_rele_to_quiesce(&tx->tx_txgh); 1228 1229 return (0); 1230 } 1231 1232 void 1233 dmu_tx_wait(dmu_tx_t *tx) 1234 { 1235 spa_t *spa = tx->tx_pool->dp_spa; 1236 dsl_pool_t *dp = tx->tx_pool; 1237 hrtime_t before; 1238 1239 ASSERT(tx->tx_txg == 0); 1240 ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1241 1242 before = gethrtime(); 1243 1244 if (tx->tx_wait_dirty) { 1245 uint64_t dirty; 1246 1247 /* 1248 * dmu_tx_try_assign() has determined that we need to wait 1249 * because we've consumed much or all of the dirty buffer 1250 * space. 1251 */ 1252 mutex_enter(&dp->dp_lock); 1253 if (dp->dp_dirty_total >= zfs_dirty_data_max) 1254 DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max); 1255 while (dp->dp_dirty_total >= zfs_dirty_data_max) 1256 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1257 dirty = dp->dp_dirty_total; 1258 mutex_exit(&dp->dp_lock); 1259 1260 dmu_tx_delay(tx, dirty); 1261 1262 tx->tx_wait_dirty = B_FALSE; 1263 1264 /* 1265 * Note: setting tx_dirty_delayed only has effect if the 1266 * caller used DMU_TX_WAIT. Otherwise they are going to 1267 * destroy this tx and try again. The common case, 1268 * zfs_write(), uses DMU_TX_WAIT. 1269 */ 1270 tx->tx_dirty_delayed = B_TRUE; 1271 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1272 /* 1273 * If the pool is suspended we need to wait until it 1274 * is resumed. Note that it's possible that the pool 1275 * has become active after this thread has tried to 1276 * obtain a tx. If that's the case then tx_lasttried_txg 1277 * would not have been set. 1278 */ 1279 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1280 } else if (tx->tx_needassign_txh) { 1281 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1282 1283 mutex_enter(&dn->dn_mtx); 1284 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1285 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1286 mutex_exit(&dn->dn_mtx); 1287 tx->tx_needassign_txh = NULL; 1288 } else { 1289 /* 1290 * If we have a lot of dirty data just wait until we sync 1291 * out a TXG at which point we'll hopefully have synced 1292 * a portion of the changes. 1293 */ 1294 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1295 } 1296 1297 spa_tx_assign_add_nsecs(spa, gethrtime() - before); 1298 } 1299 1300 static void 1301 dmu_tx_destroy(dmu_tx_t *tx) 1302 { 1303 dmu_tx_hold_t *txh; 1304 1305 while ((txh = list_head(&tx->tx_holds)) != NULL) { 1306 dnode_t *dn = txh->txh_dnode; 1307 1308 list_remove(&tx->tx_holds, txh); 1309 zfs_refcount_destroy_many(&txh->txh_space_towrite, 1310 zfs_refcount_count(&txh->txh_space_towrite)); 1311 zfs_refcount_destroy_many(&txh->txh_memory_tohold, 1312 zfs_refcount_count(&txh->txh_memory_tohold)); 1313 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1314 if (dn != NULL) 1315 dnode_rele(dn, tx); 1316 } 1317 1318 list_destroy(&tx->tx_callbacks); 1319 list_destroy(&tx->tx_holds); 1320 kmem_free(tx, sizeof (dmu_tx_t)); 1321 } 1322 1323 void 1324 dmu_tx_commit(dmu_tx_t *tx) 1325 { 1326 ASSERT(tx->tx_txg != 0); 1327 1328 /* 1329 * Go through the transaction's hold list and remove holds on 1330 * associated dnodes, notifying waiters if no holds remain. 1331 */ 1332 for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 1333 txh = list_next(&tx->tx_holds, txh)) { 1334 dnode_t *dn = txh->txh_dnode; 1335 1336 if (dn == NULL) 1337 continue; 1338 1339 mutex_enter(&dn->dn_mtx); 1340 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1341 1342 if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1343 dn->dn_assigned_txg = 0; 1344 cv_broadcast(&dn->dn_notxholds); 1345 } 1346 mutex_exit(&dn->dn_mtx); 1347 } 1348 1349 if (tx->tx_tempreserve_cookie) 1350 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1351 1352 if (!list_is_empty(&tx->tx_callbacks)) 1353 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1354 1355 if (tx->tx_anyobj == FALSE) 1356 txg_rele_to_sync(&tx->tx_txgh); 1357 1358 dmu_tx_destroy(tx); 1359 } 1360 1361 void 1362 dmu_tx_abort(dmu_tx_t *tx) 1363 { 1364 ASSERT(tx->tx_txg == 0); 1365 1366 /* 1367 * Call any registered callbacks with an error code. 1368 */ 1369 if (!list_is_empty(&tx->tx_callbacks)) 1370 dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED)); 1371 1372 dmu_tx_destroy(tx); 1373 } 1374 1375 uint64_t 1376 dmu_tx_get_txg(dmu_tx_t *tx) 1377 { 1378 ASSERT(tx->tx_txg != 0); 1379 return (tx->tx_txg); 1380 } 1381 1382 dsl_pool_t * 1383 dmu_tx_pool(dmu_tx_t *tx) 1384 { 1385 ASSERT(tx->tx_pool != NULL); 1386 return (tx->tx_pool); 1387 } 1388 1389 /* 1390 * Register a callback to be executed at the end of a TXG. 1391 * 1392 * Note: This currently exists for outside consumers, specifically the ZFS OSD 1393 * for Lustre. Please do not remove before checking that project. For examples 1394 * on how to use this see `ztest_commit_callback`. 1395 */ 1396 void 1397 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1398 { 1399 dmu_tx_callback_t *dcb; 1400 1401 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1402 1403 dcb->dcb_func = func; 1404 dcb->dcb_data = data; 1405 1406 list_insert_tail(&tx->tx_callbacks, dcb); 1407 } 1408 1409 /* 1410 * Call all the commit callbacks on a list, with a given error code. 1411 */ 1412 void 1413 dmu_tx_do_callbacks(list_t *cb_list, int error) 1414 { 1415 dmu_tx_callback_t *dcb; 1416 1417 while ((dcb = list_remove_tail(cb_list)) != NULL) { 1418 dcb->dcb_func(dcb->dcb_data, error); 1419 kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1420 } 1421 } 1422 1423 /* 1424 * Interface to hold a bunch of attributes. 1425 * used for creating new files. 1426 * attrsize is the total size of all attributes 1427 * to be added during object creation 1428 * 1429 * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1430 */ 1431 1432 /* 1433 * hold necessary attribute name for attribute registration. 1434 * should be a very rare case where this is needed. If it does 1435 * happen it would only happen on the first write to the file system. 1436 */ 1437 static void 1438 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1439 { 1440 if (!sa->sa_need_attr_registration) 1441 return; 1442 1443 for (int i = 0; i != sa->sa_num_attrs; i++) { 1444 if (!sa->sa_attr_table[i].sa_registered) { 1445 if (sa->sa_reg_attr_obj) 1446 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1447 B_TRUE, sa->sa_attr_table[i].sa_name); 1448 else 1449 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1450 B_TRUE, sa->sa_attr_table[i].sa_name); 1451 } 1452 } 1453 } 1454 1455 void 1456 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1457 { 1458 dmu_tx_hold_t *txh; 1459 1460 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1461 THT_SPILL, 0, 0); 1462 if (txh != NULL) 1463 (void) zfs_refcount_add_many(&txh->txh_space_towrite, 1464 SPA_OLD_MAXBLOCKSIZE, FTAG); 1465 } 1466 1467 void 1468 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1469 { 1470 sa_os_t *sa = tx->tx_objset->os_sa; 1471 1472 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1473 1474 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1475 return; 1476 1477 if (tx->tx_objset->os_sa->sa_layout_attr_obj) { 1478 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1479 } else { 1480 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1481 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1482 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1483 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1484 } 1485 1486 dmu_tx_sa_registration_hold(sa, tx); 1487 1488 if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill) 1489 return; 1490 1491 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1492 THT_SPILL, 0, 0); 1493 } 1494 1495 /* 1496 * Hold SA attribute 1497 * 1498 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1499 * 1500 * variable_size is the total size of all variable sized attributes 1501 * passed to this function. It is not the total size of all 1502 * variable size attributes that *may* exist on this object. 1503 */ 1504 void 1505 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1506 { 1507 uint64_t object; 1508 sa_os_t *sa = tx->tx_objset->os_sa; 1509 1510 ASSERT(hdl != NULL); 1511 1512 object = sa_handle_object(hdl); 1513 1514 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1515 DB_DNODE_ENTER(db); 1516 dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db)); 1517 DB_DNODE_EXIT(db); 1518 1519 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1520 return; 1521 1522 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1523 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1524 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1525 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1526 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1527 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1528 } 1529 1530 dmu_tx_sa_registration_hold(sa, tx); 1531 1532 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1533 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1534 1535 if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1536 ASSERT(tx->tx_txg == 0); 1537 dmu_tx_hold_spill(tx, object); 1538 } else { 1539 DB_DNODE_ENTER(db); 1540 if (DB_DNODE(db)->dn_have_spill) { 1541 ASSERT(tx->tx_txg == 0); 1542 dmu_tx_hold_spill(tx, object); 1543 } 1544 DB_DNODE_EXIT(db); 1545 } 1546 } 1547 1548 void 1549 dmu_tx_init(void) 1550 { 1551 dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc", 1552 KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t), 1553 KSTAT_FLAG_VIRTUAL); 1554 1555 if (dmu_tx_ksp != NULL) { 1556 dmu_tx_ksp->ks_data = &dmu_tx_stats; 1557 kstat_install(dmu_tx_ksp); 1558 } 1559 } 1560 1561 void 1562 dmu_tx_fini(void) 1563 { 1564 if (dmu_tx_ksp != NULL) { 1565 kstat_delete(dmu_tx_ksp); 1566 dmu_tx_ksp = NULL; 1567 } 1568 } 1569 1570 #if defined(_KERNEL) 1571 EXPORT_SYMBOL(dmu_tx_create); 1572 EXPORT_SYMBOL(dmu_tx_hold_write); 1573 EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode); 1574 EXPORT_SYMBOL(dmu_tx_hold_append); 1575 EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode); 1576 EXPORT_SYMBOL(dmu_tx_hold_free); 1577 EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode); 1578 EXPORT_SYMBOL(dmu_tx_hold_zap); 1579 EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode); 1580 EXPORT_SYMBOL(dmu_tx_hold_bonus); 1581 EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode); 1582 EXPORT_SYMBOL(dmu_tx_abort); 1583 EXPORT_SYMBOL(dmu_tx_assign); 1584 EXPORT_SYMBOL(dmu_tx_wait); 1585 EXPORT_SYMBOL(dmu_tx_commit); 1586 EXPORT_SYMBOL(dmu_tx_mark_netfree); 1587 EXPORT_SYMBOL(dmu_tx_get_txg); 1588 EXPORT_SYMBOL(dmu_tx_callback_register); 1589 EXPORT_SYMBOL(dmu_tx_do_callbacks); 1590 EXPORT_SYMBOL(dmu_tx_hold_spill); 1591 EXPORT_SYMBOL(dmu_tx_hold_sa_create); 1592 EXPORT_SYMBOL(dmu_tx_hold_sa); 1593 #endif 1594