1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24eda14cbcSMatt Macy * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25eda14cbcSMatt Macy */ 26eda14cbcSMatt Macy 27eda14cbcSMatt Macy #include <sys/dmu.h> 28eda14cbcSMatt Macy #include <sys/dmu_impl.h> 29eda14cbcSMatt Macy #include <sys/dbuf.h> 30eda14cbcSMatt Macy #include <sys/dmu_tx.h> 31eda14cbcSMatt Macy #include <sys/dmu_objset.h> 32eda14cbcSMatt Macy #include <sys/dsl_dataset.h> 33eda14cbcSMatt Macy #include <sys/dsl_dir.h> 34eda14cbcSMatt Macy #include <sys/dsl_pool.h> 35eda14cbcSMatt Macy #include <sys/zap_impl.h> 36eda14cbcSMatt Macy #include <sys/spa.h> 37eda14cbcSMatt Macy #include <sys/sa.h> 38eda14cbcSMatt Macy #include <sys/sa_impl.h> 39eda14cbcSMatt Macy #include <sys/zfs_context.h> 40eda14cbcSMatt Macy #include <sys/trace_zfs.h> 41eda14cbcSMatt Macy 42eda14cbcSMatt Macy typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 43eda14cbcSMatt Macy uint64_t arg1, uint64_t arg2); 44eda14cbcSMatt Macy 45eda14cbcSMatt Macy dmu_tx_stats_t dmu_tx_stats = { 46eda14cbcSMatt Macy { "dmu_tx_assigned", KSTAT_DATA_UINT64 }, 47eda14cbcSMatt Macy { "dmu_tx_delay", KSTAT_DATA_UINT64 }, 48eda14cbcSMatt Macy { "dmu_tx_error", KSTAT_DATA_UINT64 }, 49eda14cbcSMatt Macy { "dmu_tx_suspended", KSTAT_DATA_UINT64 }, 50eda14cbcSMatt Macy { "dmu_tx_group", KSTAT_DATA_UINT64 }, 51eda14cbcSMatt Macy { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 }, 52eda14cbcSMatt Macy { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 }, 53eda14cbcSMatt Macy { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 }, 54eda14cbcSMatt Macy { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 }, 55eda14cbcSMatt Macy { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 }, 56eda14cbcSMatt Macy { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64 }, 57*e3aa18adSMartin Matuska { "dmu_tx_wrlog_delay", KSTAT_DATA_UINT64 }, 58eda14cbcSMatt Macy { "dmu_tx_quota", KSTAT_DATA_UINT64 }, 59eda14cbcSMatt Macy }; 60eda14cbcSMatt Macy 61eda14cbcSMatt Macy static kstat_t *dmu_tx_ksp; 62eda14cbcSMatt Macy 63eda14cbcSMatt Macy dmu_tx_t * 64eda14cbcSMatt Macy dmu_tx_create_dd(dsl_dir_t *dd) 65eda14cbcSMatt Macy { 66eda14cbcSMatt Macy dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 67eda14cbcSMatt Macy tx->tx_dir = dd; 68eda14cbcSMatt Macy if (dd != NULL) 69eda14cbcSMatt Macy tx->tx_pool = dd->dd_pool; 70eda14cbcSMatt Macy list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 71eda14cbcSMatt Macy offsetof(dmu_tx_hold_t, txh_node)); 72eda14cbcSMatt Macy list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 73eda14cbcSMatt Macy offsetof(dmu_tx_callback_t, dcb_node)); 74eda14cbcSMatt Macy tx->tx_start = gethrtime(); 75eda14cbcSMatt Macy return (tx); 76eda14cbcSMatt Macy } 77eda14cbcSMatt Macy 78eda14cbcSMatt Macy dmu_tx_t * 79eda14cbcSMatt Macy dmu_tx_create(objset_t *os) 80eda14cbcSMatt Macy { 81eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 82eda14cbcSMatt Macy tx->tx_objset = os; 83eda14cbcSMatt Macy return (tx); 84eda14cbcSMatt Macy } 85eda14cbcSMatt Macy 86eda14cbcSMatt Macy dmu_tx_t * 87eda14cbcSMatt Macy dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 88eda14cbcSMatt Macy { 89eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(NULL); 90eda14cbcSMatt Macy 91eda14cbcSMatt Macy TXG_VERIFY(dp->dp_spa, txg); 92eda14cbcSMatt Macy tx->tx_pool = dp; 93eda14cbcSMatt Macy tx->tx_txg = txg; 94eda14cbcSMatt Macy tx->tx_anyobj = TRUE; 95eda14cbcSMatt Macy 96eda14cbcSMatt Macy return (tx); 97eda14cbcSMatt Macy } 98eda14cbcSMatt Macy 99eda14cbcSMatt Macy int 100eda14cbcSMatt Macy dmu_tx_is_syncing(dmu_tx_t *tx) 101eda14cbcSMatt Macy { 102eda14cbcSMatt Macy return (tx->tx_anyobj); 103eda14cbcSMatt Macy } 104eda14cbcSMatt Macy 105eda14cbcSMatt Macy int 106eda14cbcSMatt Macy dmu_tx_private_ok(dmu_tx_t *tx) 107eda14cbcSMatt Macy { 108eda14cbcSMatt Macy return (tx->tx_anyobj); 109eda14cbcSMatt Macy } 110eda14cbcSMatt Macy 111eda14cbcSMatt Macy static dmu_tx_hold_t * 112eda14cbcSMatt Macy dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, 113eda14cbcSMatt Macy uint64_t arg1, uint64_t arg2) 114eda14cbcSMatt Macy { 115eda14cbcSMatt Macy dmu_tx_hold_t *txh; 116eda14cbcSMatt Macy 117eda14cbcSMatt Macy if (dn != NULL) { 118eda14cbcSMatt Macy (void) zfs_refcount_add(&dn->dn_holds, tx); 119eda14cbcSMatt Macy if (tx->tx_txg != 0) { 120eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 121eda14cbcSMatt Macy /* 122eda14cbcSMatt Macy * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 123eda14cbcSMatt Macy * problem, but there's no way for it to happen (for 124eda14cbcSMatt Macy * now, at least). 125eda14cbcSMatt Macy */ 126eda14cbcSMatt Macy ASSERT(dn->dn_assigned_txg == 0); 127eda14cbcSMatt Macy dn->dn_assigned_txg = tx->tx_txg; 128eda14cbcSMatt Macy (void) zfs_refcount_add(&dn->dn_tx_holds, tx); 129eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 130eda14cbcSMatt Macy } 131eda14cbcSMatt Macy } 132eda14cbcSMatt Macy 133eda14cbcSMatt Macy txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 134eda14cbcSMatt Macy txh->txh_tx = tx; 135eda14cbcSMatt Macy txh->txh_dnode = dn; 136eda14cbcSMatt Macy zfs_refcount_create(&txh->txh_space_towrite); 137eda14cbcSMatt Macy zfs_refcount_create(&txh->txh_memory_tohold); 138eda14cbcSMatt Macy txh->txh_type = type; 139eda14cbcSMatt Macy txh->txh_arg1 = arg1; 140eda14cbcSMatt Macy txh->txh_arg2 = arg2; 141eda14cbcSMatt Macy list_insert_tail(&tx->tx_holds, txh); 142eda14cbcSMatt Macy 143eda14cbcSMatt Macy return (txh); 144eda14cbcSMatt Macy } 145eda14cbcSMatt Macy 146eda14cbcSMatt Macy static dmu_tx_hold_t * 147eda14cbcSMatt Macy dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 148eda14cbcSMatt Macy enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 149eda14cbcSMatt Macy { 150eda14cbcSMatt Macy dnode_t *dn = NULL; 151eda14cbcSMatt Macy dmu_tx_hold_t *txh; 152eda14cbcSMatt Macy int err; 153eda14cbcSMatt Macy 154eda14cbcSMatt Macy if (object != DMU_NEW_OBJECT) { 155eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 156eda14cbcSMatt Macy if (err != 0) { 157eda14cbcSMatt Macy tx->tx_err = err; 158eda14cbcSMatt Macy return (NULL); 159eda14cbcSMatt Macy } 160eda14cbcSMatt Macy } 161eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); 162eda14cbcSMatt Macy if (dn != NULL) 163eda14cbcSMatt Macy dnode_rele(dn, FTAG); 164eda14cbcSMatt Macy return (txh); 165eda14cbcSMatt Macy } 166eda14cbcSMatt Macy 167eda14cbcSMatt Macy void 168eda14cbcSMatt Macy dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) 169eda14cbcSMatt Macy { 170eda14cbcSMatt Macy /* 171eda14cbcSMatt Macy * If we're syncing, they can manipulate any object anyhow, and 172eda14cbcSMatt Macy * the hold on the dnode_t can cause problems. 173eda14cbcSMatt Macy */ 174eda14cbcSMatt Macy if (!dmu_tx_is_syncing(tx)) 175eda14cbcSMatt Macy (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); 176eda14cbcSMatt Macy } 177eda14cbcSMatt Macy 178eda14cbcSMatt Macy /* 179eda14cbcSMatt Macy * This function reads specified data from disk. The specified data will 180eda14cbcSMatt Macy * be needed to perform the transaction -- i.e, it will be read after 181eda14cbcSMatt Macy * we do dmu_tx_assign(). There are two reasons that we read the data now 182eda14cbcSMatt Macy * (before dmu_tx_assign()): 183eda14cbcSMatt Macy * 184eda14cbcSMatt Macy * 1. Reading it now has potentially better performance. The transaction 185eda14cbcSMatt Macy * has not yet been assigned, so the TXG is not held open, and also the 186eda14cbcSMatt Macy * caller typically has less locks held when calling dmu_tx_hold_*() than 187eda14cbcSMatt Macy * after the transaction has been assigned. This reduces the lock (and txg) 188eda14cbcSMatt Macy * hold times, thus reducing lock contention. 189eda14cbcSMatt Macy * 190eda14cbcSMatt Macy * 2. It is easier for callers (primarily the ZPL) to handle i/o errors 191eda14cbcSMatt Macy * that are detected before they start making changes to the DMU state 192eda14cbcSMatt Macy * (i.e. now). Once the transaction has been assigned, and some DMU 193eda14cbcSMatt Macy * state has been changed, it can be difficult to recover from an i/o 194eda14cbcSMatt Macy * error (e.g. to undo the changes already made in memory at the DMU 195eda14cbcSMatt Macy * layer). Typically code to do so does not exist in the caller -- it 196eda14cbcSMatt Macy * assumes that the data has already been cached and thus i/o errors are 197eda14cbcSMatt Macy * not possible. 198eda14cbcSMatt Macy * 199eda14cbcSMatt Macy * It has been observed that the i/o initiated here can be a performance 200eda14cbcSMatt Macy * problem, and it appears to be optional, because we don't look at the 201eda14cbcSMatt Macy * data which is read. However, removing this read would only serve to 202eda14cbcSMatt Macy * move the work elsewhere (after the dmu_tx_assign()), where it may 203eda14cbcSMatt Macy * have a greater impact on performance (in addition to the impact on 204eda14cbcSMatt Macy * fault tolerance noted above). 205eda14cbcSMatt Macy */ 206eda14cbcSMatt Macy static int 207eda14cbcSMatt Macy dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 208eda14cbcSMatt Macy { 209eda14cbcSMatt Macy int err; 210eda14cbcSMatt Macy dmu_buf_impl_t *db; 211eda14cbcSMatt Macy 212eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 213eda14cbcSMatt Macy db = dbuf_hold_level(dn, level, blkid, FTAG); 214eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 215eda14cbcSMatt Macy if (db == NULL) 216eda14cbcSMatt Macy return (SET_ERROR(EIO)); 217eda14cbcSMatt Macy err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 218eda14cbcSMatt Macy dbuf_rele(db, FTAG); 219eda14cbcSMatt Macy return (err); 220eda14cbcSMatt Macy } 221eda14cbcSMatt Macy 222eda14cbcSMatt Macy static void 223eda14cbcSMatt Macy dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 224eda14cbcSMatt Macy { 225eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 226eda14cbcSMatt Macy int err = 0; 227eda14cbcSMatt Macy 228eda14cbcSMatt Macy if (len == 0) 229eda14cbcSMatt Macy return; 230eda14cbcSMatt Macy 231eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG); 232eda14cbcSMatt Macy 233eda14cbcSMatt Macy if (dn == NULL) 234eda14cbcSMatt Macy return; 235eda14cbcSMatt Macy 236eda14cbcSMatt Macy /* 237eda14cbcSMatt Macy * For i/o error checking, read the blocks that will be needed 238eda14cbcSMatt Macy * to perform the write: the first and last level-0 blocks (if 239eda14cbcSMatt Macy * they are not aligned, i.e. if they are partial-block writes), 240eda14cbcSMatt Macy * and all the level-1 blocks. 241eda14cbcSMatt Macy */ 242eda14cbcSMatt Macy if (dn->dn_maxblkid == 0) { 243eda14cbcSMatt Macy if (off < dn->dn_datablksz && 244eda14cbcSMatt Macy (off > 0 || len < dn->dn_datablksz)) { 245eda14cbcSMatt Macy err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 246eda14cbcSMatt Macy if (err != 0) { 247eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 248eda14cbcSMatt Macy } 249eda14cbcSMatt Macy } 250eda14cbcSMatt Macy } else { 251eda14cbcSMatt Macy zio_t *zio = zio_root(dn->dn_objset->os_spa, 252eda14cbcSMatt Macy NULL, NULL, ZIO_FLAG_CANFAIL); 253eda14cbcSMatt Macy 254eda14cbcSMatt Macy /* first level-0 block */ 255eda14cbcSMatt Macy uint64_t start = off >> dn->dn_datablkshift; 256eda14cbcSMatt Macy if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { 257eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 0, start); 258eda14cbcSMatt Macy if (err != 0) { 259eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 260eda14cbcSMatt Macy } 261eda14cbcSMatt Macy } 262eda14cbcSMatt Macy 263eda14cbcSMatt Macy /* last level-0 block */ 264eda14cbcSMatt Macy uint64_t end = (off + len - 1) >> dn->dn_datablkshift; 265eda14cbcSMatt Macy if (end != start && end <= dn->dn_maxblkid && 266eda14cbcSMatt Macy P2PHASE(off + len, dn->dn_datablksz)) { 267eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 0, end); 268eda14cbcSMatt Macy if (err != 0) { 269eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 270eda14cbcSMatt Macy } 271eda14cbcSMatt Macy } 272eda14cbcSMatt Macy 273eda14cbcSMatt Macy /* level-1 blocks */ 274eda14cbcSMatt Macy if (dn->dn_nlevels > 1) { 275eda14cbcSMatt Macy int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 276eda14cbcSMatt Macy for (uint64_t i = (start >> shft) + 1; 277eda14cbcSMatt Macy i < end >> shft; i++) { 278eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 1, i); 279eda14cbcSMatt Macy if (err != 0) { 280eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 281eda14cbcSMatt Macy } 282eda14cbcSMatt Macy } 283eda14cbcSMatt Macy } 284eda14cbcSMatt Macy 285eda14cbcSMatt Macy err = zio_wait(zio); 286eda14cbcSMatt Macy if (err != 0) { 287eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 288eda14cbcSMatt Macy } 289eda14cbcSMatt Macy } 290eda14cbcSMatt Macy } 291eda14cbcSMatt Macy 292eda14cbcSMatt Macy static void 293eda14cbcSMatt Macy dmu_tx_count_dnode(dmu_tx_hold_t *txh) 294eda14cbcSMatt Macy { 295eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, 296eda14cbcSMatt Macy DNODE_MIN_SIZE, FTAG); 297eda14cbcSMatt Macy } 298eda14cbcSMatt Macy 299eda14cbcSMatt Macy void 300eda14cbcSMatt Macy dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 301eda14cbcSMatt Macy { 302eda14cbcSMatt Macy dmu_tx_hold_t *txh; 303eda14cbcSMatt Macy 304eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 305eda14cbcSMatt Macy ASSERT3U(len, <=, DMU_MAX_ACCESS); 306eda14cbcSMatt Macy ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 307eda14cbcSMatt Macy 308eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 309eda14cbcSMatt Macy object, THT_WRITE, off, len); 310eda14cbcSMatt Macy if (txh != NULL) { 311eda14cbcSMatt Macy dmu_tx_count_write(txh, off, len); 312eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 313eda14cbcSMatt Macy } 314eda14cbcSMatt Macy } 315eda14cbcSMatt Macy 316eda14cbcSMatt Macy void 317eda14cbcSMatt Macy dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 318eda14cbcSMatt Macy { 319eda14cbcSMatt Macy dmu_tx_hold_t *txh; 320eda14cbcSMatt Macy 321eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 322eda14cbcSMatt Macy ASSERT3U(len, <=, DMU_MAX_ACCESS); 323eda14cbcSMatt Macy ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 324eda14cbcSMatt Macy 325eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); 326eda14cbcSMatt Macy if (txh != NULL) { 327eda14cbcSMatt Macy dmu_tx_count_write(txh, off, len); 328eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 329eda14cbcSMatt Macy } 330eda14cbcSMatt Macy } 331eda14cbcSMatt Macy 332eda14cbcSMatt Macy /* 333eda14cbcSMatt Macy * This function marks the transaction as being a "net free". The end 334eda14cbcSMatt Macy * result is that refquotas will be disabled for this transaction, and 335eda14cbcSMatt Macy * this transaction will be able to use half of the pool space overhead 336eda14cbcSMatt Macy * (see dsl_pool_adjustedsize()). Therefore this function should only 337eda14cbcSMatt Macy * be called for transactions that we expect will not cause a net increase 338eda14cbcSMatt Macy * in the amount of space used (but it's OK if that is occasionally not true). 339eda14cbcSMatt Macy */ 340eda14cbcSMatt Macy void 341eda14cbcSMatt Macy dmu_tx_mark_netfree(dmu_tx_t *tx) 342eda14cbcSMatt Macy { 343eda14cbcSMatt Macy tx->tx_netfree = B_TRUE; 344eda14cbcSMatt Macy } 345eda14cbcSMatt Macy 346eda14cbcSMatt Macy static void 347eda14cbcSMatt Macy dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 348eda14cbcSMatt Macy { 349eda14cbcSMatt Macy dmu_tx_t *tx = txh->txh_tx; 350eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 351eda14cbcSMatt Macy int err; 352eda14cbcSMatt Macy 353eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 354eda14cbcSMatt Macy 355eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 356eda14cbcSMatt Macy 357eda14cbcSMatt Macy if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz) 358eda14cbcSMatt Macy return; 359eda14cbcSMatt Macy if (len == DMU_OBJECT_END) 360eda14cbcSMatt Macy len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off; 361eda14cbcSMatt Macy 362eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 363eda14cbcSMatt Macy 364eda14cbcSMatt Macy /* 365eda14cbcSMatt Macy * For i/o error checking, we read the first and last level-0 366eda14cbcSMatt Macy * blocks if they are not aligned, and all the level-1 blocks. 367eda14cbcSMatt Macy * 368eda14cbcSMatt Macy * Note: dbuf_free_range() assumes that we have not instantiated 369eda14cbcSMatt Macy * any level-0 dbufs that will be completely freed. Therefore we must 370eda14cbcSMatt Macy * exercise care to not read or count the first and last blocks 371eda14cbcSMatt Macy * if they are blocksize-aligned. 372eda14cbcSMatt Macy */ 373eda14cbcSMatt Macy if (dn->dn_datablkshift == 0) { 374eda14cbcSMatt Macy if (off != 0 || len < dn->dn_datablksz) 375eda14cbcSMatt Macy dmu_tx_count_write(txh, 0, dn->dn_datablksz); 376eda14cbcSMatt Macy } else { 377eda14cbcSMatt Macy /* first block will be modified if it is not aligned */ 378eda14cbcSMatt Macy if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 379eda14cbcSMatt Macy dmu_tx_count_write(txh, off, 1); 380eda14cbcSMatt Macy /* last block will be modified if it is not aligned */ 381eda14cbcSMatt Macy if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 382eda14cbcSMatt Macy dmu_tx_count_write(txh, off + len, 1); 383eda14cbcSMatt Macy } 384eda14cbcSMatt Macy 385eda14cbcSMatt Macy /* 386eda14cbcSMatt Macy * Check level-1 blocks. 387eda14cbcSMatt Macy */ 388eda14cbcSMatt Macy if (dn->dn_nlevels > 1) { 389eda14cbcSMatt Macy int shift = dn->dn_datablkshift + dn->dn_indblkshift - 390eda14cbcSMatt Macy SPA_BLKPTRSHIFT; 391eda14cbcSMatt Macy uint64_t start = off >> shift; 392eda14cbcSMatt Macy uint64_t end = (off + len) >> shift; 393eda14cbcSMatt Macy 394eda14cbcSMatt Macy ASSERT(dn->dn_indblkshift != 0); 395eda14cbcSMatt Macy 396eda14cbcSMatt Macy /* 397eda14cbcSMatt Macy * dnode_reallocate() can result in an object with indirect 398eda14cbcSMatt Macy * blocks having an odd data block size. In this case, 399eda14cbcSMatt Macy * just check the single block. 400eda14cbcSMatt Macy */ 401eda14cbcSMatt Macy if (dn->dn_datablkshift == 0) 402eda14cbcSMatt Macy start = end = 0; 403eda14cbcSMatt Macy 404eda14cbcSMatt Macy zio_t *zio = zio_root(tx->tx_pool->dp_spa, 405eda14cbcSMatt Macy NULL, NULL, ZIO_FLAG_CANFAIL); 406eda14cbcSMatt Macy for (uint64_t i = start; i <= end; i++) { 407eda14cbcSMatt Macy uint64_t ibyte = i << shift; 408eda14cbcSMatt Macy err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 409eda14cbcSMatt Macy i = ibyte >> shift; 410eda14cbcSMatt Macy if (err == ESRCH || i > end) 411eda14cbcSMatt Macy break; 412eda14cbcSMatt Macy if (err != 0) { 413eda14cbcSMatt Macy tx->tx_err = err; 414eda14cbcSMatt Macy (void) zio_wait(zio); 415eda14cbcSMatt Macy return; 416eda14cbcSMatt Macy } 417eda14cbcSMatt Macy 418eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_memory_tohold, 419eda14cbcSMatt Macy 1 << dn->dn_indblkshift, FTAG); 420eda14cbcSMatt Macy 421eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 1, i); 422eda14cbcSMatt Macy if (err != 0) { 423eda14cbcSMatt Macy tx->tx_err = err; 424eda14cbcSMatt Macy (void) zio_wait(zio); 425eda14cbcSMatt Macy return; 426eda14cbcSMatt Macy } 427eda14cbcSMatt Macy } 428eda14cbcSMatt Macy err = zio_wait(zio); 429eda14cbcSMatt Macy if (err != 0) { 430eda14cbcSMatt Macy tx->tx_err = err; 431eda14cbcSMatt Macy return; 432eda14cbcSMatt Macy } 433eda14cbcSMatt Macy } 434eda14cbcSMatt Macy } 435eda14cbcSMatt Macy 436eda14cbcSMatt Macy void 437eda14cbcSMatt Macy dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 438eda14cbcSMatt Macy { 439eda14cbcSMatt Macy dmu_tx_hold_t *txh; 440eda14cbcSMatt Macy 441eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 442eda14cbcSMatt Macy object, THT_FREE, off, len); 443eda14cbcSMatt Macy if (txh != NULL) 444eda14cbcSMatt Macy (void) dmu_tx_hold_free_impl(txh, off, len); 445eda14cbcSMatt Macy } 446eda14cbcSMatt Macy 447eda14cbcSMatt Macy void 448eda14cbcSMatt Macy dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) 449eda14cbcSMatt Macy { 450eda14cbcSMatt Macy dmu_tx_hold_t *txh; 451eda14cbcSMatt Macy 452eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); 453eda14cbcSMatt Macy if (txh != NULL) 454eda14cbcSMatt Macy (void) dmu_tx_hold_free_impl(txh, off, len); 455eda14cbcSMatt Macy } 456eda14cbcSMatt Macy 457eda14cbcSMatt Macy static void 458eda14cbcSMatt Macy dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name) 459eda14cbcSMatt Macy { 460eda14cbcSMatt Macy dmu_tx_t *tx = txh->txh_tx; 461eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 462eda14cbcSMatt Macy int err; 463eda14cbcSMatt Macy 464eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 465eda14cbcSMatt Macy 466eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 467eda14cbcSMatt Macy 468eda14cbcSMatt Macy /* 469eda14cbcSMatt Macy * Modifying a almost-full microzap is around the worst case (128KB) 470eda14cbcSMatt Macy * 471eda14cbcSMatt Macy * If it is a fat zap, the worst case would be 7*16KB=112KB: 472eda14cbcSMatt Macy * - 3 blocks overwritten: target leaf, ptrtbl block, header block 473eda14cbcSMatt Macy * - 4 new blocks written if adding: 474eda14cbcSMatt Macy * - 2 blocks for possibly split leaves, 475eda14cbcSMatt Macy * - 2 grown ptrtbl blocks 476eda14cbcSMatt Macy */ 477eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, 478eda14cbcSMatt Macy MZAP_MAX_BLKSZ, FTAG); 479eda14cbcSMatt Macy 480eda14cbcSMatt Macy if (dn == NULL) 481eda14cbcSMatt Macy return; 482eda14cbcSMatt Macy 483eda14cbcSMatt Macy ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 484eda14cbcSMatt Macy 485eda14cbcSMatt Macy if (dn->dn_maxblkid == 0 || name == NULL) { 486eda14cbcSMatt Macy /* 487eda14cbcSMatt Macy * This is a microzap (only one block), or we don't know 488eda14cbcSMatt Macy * the name. Check the first block for i/o errors. 489eda14cbcSMatt Macy */ 490eda14cbcSMatt Macy err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 491eda14cbcSMatt Macy if (err != 0) { 492eda14cbcSMatt Macy tx->tx_err = err; 493eda14cbcSMatt Macy } 494eda14cbcSMatt Macy } else { 495eda14cbcSMatt Macy /* 496eda14cbcSMatt Macy * Access the name so that we'll check for i/o errors to 497eda14cbcSMatt Macy * the leaf blocks, etc. We ignore ENOENT, as this name 498eda14cbcSMatt Macy * may not yet exist. 499eda14cbcSMatt Macy */ 500eda14cbcSMatt Macy err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); 501eda14cbcSMatt Macy if (err == EIO || err == ECKSUM || err == ENXIO) { 502eda14cbcSMatt Macy tx->tx_err = err; 503eda14cbcSMatt Macy } 504eda14cbcSMatt Macy } 505eda14cbcSMatt Macy } 506eda14cbcSMatt Macy 507eda14cbcSMatt Macy void 508eda14cbcSMatt Macy dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 509eda14cbcSMatt Macy { 510eda14cbcSMatt Macy dmu_tx_hold_t *txh; 511eda14cbcSMatt Macy 512eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 513eda14cbcSMatt Macy 514eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 515eda14cbcSMatt Macy object, THT_ZAP, add, (uintptr_t)name); 516eda14cbcSMatt Macy if (txh != NULL) 517eda14cbcSMatt Macy dmu_tx_hold_zap_impl(txh, name); 518eda14cbcSMatt Macy } 519eda14cbcSMatt Macy 520eda14cbcSMatt Macy void 521eda14cbcSMatt Macy dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) 522eda14cbcSMatt Macy { 523eda14cbcSMatt Macy dmu_tx_hold_t *txh; 524eda14cbcSMatt Macy 525eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 526eda14cbcSMatt Macy ASSERT(dn != NULL); 527eda14cbcSMatt Macy 528eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); 529eda14cbcSMatt Macy if (txh != NULL) 530eda14cbcSMatt Macy dmu_tx_hold_zap_impl(txh, name); 531eda14cbcSMatt Macy } 532eda14cbcSMatt Macy 533eda14cbcSMatt Macy void 534eda14cbcSMatt Macy dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 535eda14cbcSMatt Macy { 536eda14cbcSMatt Macy dmu_tx_hold_t *txh; 537eda14cbcSMatt Macy 538eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 539eda14cbcSMatt Macy 540eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 541eda14cbcSMatt Macy object, THT_BONUS, 0, 0); 542eda14cbcSMatt Macy if (txh) 543eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 544eda14cbcSMatt Macy } 545eda14cbcSMatt Macy 546eda14cbcSMatt Macy void 547eda14cbcSMatt Macy dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) 548eda14cbcSMatt Macy { 549eda14cbcSMatt Macy dmu_tx_hold_t *txh; 550eda14cbcSMatt Macy 551eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 552eda14cbcSMatt Macy 553eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); 554eda14cbcSMatt Macy if (txh) 555eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 556eda14cbcSMatt Macy } 557eda14cbcSMatt Macy 558eda14cbcSMatt Macy void 559eda14cbcSMatt Macy dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 560eda14cbcSMatt Macy { 561eda14cbcSMatt Macy dmu_tx_hold_t *txh; 562eda14cbcSMatt Macy 563eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 564eda14cbcSMatt Macy 565eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 566eda14cbcSMatt Macy DMU_NEW_OBJECT, THT_SPACE, space, 0); 567eda14cbcSMatt Macy if (txh) { 568eda14cbcSMatt Macy (void) zfs_refcount_add_many( 569eda14cbcSMatt Macy &txh->txh_space_towrite, space, FTAG); 570eda14cbcSMatt Macy } 571eda14cbcSMatt Macy } 572eda14cbcSMatt Macy 573eda14cbcSMatt Macy #ifdef ZFS_DEBUG 574eda14cbcSMatt Macy void 575eda14cbcSMatt Macy dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 576eda14cbcSMatt Macy { 577eda14cbcSMatt Macy boolean_t match_object = B_FALSE; 578eda14cbcSMatt Macy boolean_t match_offset = B_FALSE; 579eda14cbcSMatt Macy 580eda14cbcSMatt Macy DB_DNODE_ENTER(db); 581eda14cbcSMatt Macy dnode_t *dn = DB_DNODE(db); 582eda14cbcSMatt Macy ASSERT(tx->tx_txg != 0); 583eda14cbcSMatt Macy ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 584eda14cbcSMatt Macy ASSERT3U(dn->dn_object, ==, db->db.db_object); 585eda14cbcSMatt Macy 586eda14cbcSMatt Macy if (tx->tx_anyobj) { 587eda14cbcSMatt Macy DB_DNODE_EXIT(db); 588eda14cbcSMatt Macy return; 589eda14cbcSMatt Macy } 590eda14cbcSMatt Macy 591eda14cbcSMatt Macy /* XXX No checking on the meta dnode for now */ 592eda14cbcSMatt Macy if (db->db.db_object == DMU_META_DNODE_OBJECT) { 593eda14cbcSMatt Macy DB_DNODE_EXIT(db); 594eda14cbcSMatt Macy return; 595eda14cbcSMatt Macy } 596eda14cbcSMatt Macy 597eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 598eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 599eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 600eda14cbcSMatt Macy if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 601eda14cbcSMatt Macy match_object = TRUE; 602eda14cbcSMatt Macy if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 603eda14cbcSMatt Macy int datablkshift = dn->dn_datablkshift ? 604eda14cbcSMatt Macy dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 605eda14cbcSMatt Macy int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 606eda14cbcSMatt Macy int shift = datablkshift + epbs * db->db_level; 607eda14cbcSMatt Macy uint64_t beginblk = shift >= 64 ? 0 : 608eda14cbcSMatt Macy (txh->txh_arg1 >> shift); 609eda14cbcSMatt Macy uint64_t endblk = shift >= 64 ? 0 : 610eda14cbcSMatt Macy ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 611eda14cbcSMatt Macy uint64_t blkid = db->db_blkid; 612eda14cbcSMatt Macy 613eda14cbcSMatt Macy /* XXX txh_arg2 better not be zero... */ 614eda14cbcSMatt Macy 615eda14cbcSMatt Macy dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 61633b8c039SMartin Matuska txh->txh_type, (u_longlong_t)beginblk, 61733b8c039SMartin Matuska (u_longlong_t)endblk); 618eda14cbcSMatt Macy 619eda14cbcSMatt Macy switch (txh->txh_type) { 620eda14cbcSMatt Macy case THT_WRITE: 621eda14cbcSMatt Macy if (blkid >= beginblk && blkid <= endblk) 622eda14cbcSMatt Macy match_offset = TRUE; 623eda14cbcSMatt Macy /* 624eda14cbcSMatt Macy * We will let this hold work for the bonus 625eda14cbcSMatt Macy * or spill buffer so that we don't need to 626eda14cbcSMatt Macy * hold it when creating a new object. 627eda14cbcSMatt Macy */ 628eda14cbcSMatt Macy if (blkid == DMU_BONUS_BLKID || 629eda14cbcSMatt Macy blkid == DMU_SPILL_BLKID) 630eda14cbcSMatt Macy match_offset = TRUE; 631eda14cbcSMatt Macy /* 632eda14cbcSMatt Macy * They might have to increase nlevels, 633eda14cbcSMatt Macy * thus dirtying the new TLIBs. Or the 634eda14cbcSMatt Macy * might have to change the block size, 635eda14cbcSMatt Macy * thus dirying the new lvl=0 blk=0. 636eda14cbcSMatt Macy */ 637eda14cbcSMatt Macy if (blkid == 0) 638eda14cbcSMatt Macy match_offset = TRUE; 639eda14cbcSMatt Macy break; 640eda14cbcSMatt Macy case THT_FREE: 641eda14cbcSMatt Macy /* 642eda14cbcSMatt Macy * We will dirty all the level 1 blocks in 643eda14cbcSMatt Macy * the free range and perhaps the first and 644eda14cbcSMatt Macy * last level 0 block. 645eda14cbcSMatt Macy */ 646eda14cbcSMatt Macy if (blkid >= beginblk && (blkid <= endblk || 647eda14cbcSMatt Macy txh->txh_arg2 == DMU_OBJECT_END)) 648eda14cbcSMatt Macy match_offset = TRUE; 649eda14cbcSMatt Macy break; 650eda14cbcSMatt Macy case THT_SPILL: 651eda14cbcSMatt Macy if (blkid == DMU_SPILL_BLKID) 652eda14cbcSMatt Macy match_offset = TRUE; 653eda14cbcSMatt Macy break; 654eda14cbcSMatt Macy case THT_BONUS: 655eda14cbcSMatt Macy if (blkid == DMU_BONUS_BLKID) 656eda14cbcSMatt Macy match_offset = TRUE; 657eda14cbcSMatt Macy break; 658eda14cbcSMatt Macy case THT_ZAP: 659eda14cbcSMatt Macy match_offset = TRUE; 660eda14cbcSMatt Macy break; 661eda14cbcSMatt Macy case THT_NEWOBJECT: 662eda14cbcSMatt Macy match_object = TRUE; 663eda14cbcSMatt Macy break; 664eda14cbcSMatt Macy default: 665eda14cbcSMatt Macy cmn_err(CE_PANIC, "bad txh_type %d", 666eda14cbcSMatt Macy txh->txh_type); 667eda14cbcSMatt Macy } 668eda14cbcSMatt Macy } 669eda14cbcSMatt Macy if (match_object && match_offset) { 670eda14cbcSMatt Macy DB_DNODE_EXIT(db); 671eda14cbcSMatt Macy return; 672eda14cbcSMatt Macy } 673eda14cbcSMatt Macy } 674eda14cbcSMatt Macy DB_DNODE_EXIT(db); 675eda14cbcSMatt Macy panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 676eda14cbcSMatt Macy (u_longlong_t)db->db.db_object, db->db_level, 677eda14cbcSMatt Macy (u_longlong_t)db->db_blkid); 678eda14cbcSMatt Macy } 679eda14cbcSMatt Macy #endif 680eda14cbcSMatt Macy 681eda14cbcSMatt Macy /* 682eda14cbcSMatt Macy * If we can't do 10 iops, something is wrong. Let us go ahead 683eda14cbcSMatt Macy * and hit zfs_dirty_data_max. 684eda14cbcSMatt Macy */ 685e92ffd9bSMartin Matuska static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */ 686eda14cbcSMatt Macy 687eda14cbcSMatt Macy /* 688eda14cbcSMatt Macy * We delay transactions when we've determined that the backend storage 689eda14cbcSMatt Macy * isn't able to accommodate the rate of incoming writes. 690eda14cbcSMatt Macy * 691eda14cbcSMatt Macy * If there is already a transaction waiting, we delay relative to when 692eda14cbcSMatt Macy * that transaction finishes waiting. This way the calculated min_time 693eda14cbcSMatt Macy * is independent of the number of threads concurrently executing 694eda14cbcSMatt Macy * transactions. 695eda14cbcSMatt Macy * 696eda14cbcSMatt Macy * If we are the only waiter, wait relative to when the transaction 697eda14cbcSMatt Macy * started, rather than the current time. This credits the transaction for 698eda14cbcSMatt Macy * "time already served", e.g. reading indirect blocks. 699eda14cbcSMatt Macy * 700eda14cbcSMatt Macy * The minimum time for a transaction to take is calculated as: 701eda14cbcSMatt Macy * min_time = scale * (dirty - min) / (max - dirty) 702eda14cbcSMatt Macy * min_time is then capped at zfs_delay_max_ns. 703eda14cbcSMatt Macy * 704eda14cbcSMatt Macy * The delay has two degrees of freedom that can be adjusted via tunables. 705eda14cbcSMatt Macy * The percentage of dirty data at which we start to delay is defined by 706eda14cbcSMatt Macy * zfs_delay_min_dirty_percent. This should typically be at or above 707eda14cbcSMatt Macy * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 708eda14cbcSMatt Macy * delay after writing at full speed has failed to keep up with the incoming 709eda14cbcSMatt Macy * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 710eda14cbcSMatt Macy * speaking, this variable determines the amount of delay at the midpoint of 711eda14cbcSMatt Macy * the curve. 712eda14cbcSMatt Macy * 713eda14cbcSMatt Macy * delay 714eda14cbcSMatt Macy * 10ms +-------------------------------------------------------------*+ 715eda14cbcSMatt Macy * | *| 716eda14cbcSMatt Macy * 9ms + *+ 717eda14cbcSMatt Macy * | *| 718eda14cbcSMatt Macy * 8ms + *+ 719eda14cbcSMatt Macy * | * | 720eda14cbcSMatt Macy * 7ms + * + 721eda14cbcSMatt Macy * | * | 722eda14cbcSMatt Macy * 6ms + * + 723eda14cbcSMatt Macy * | * | 724eda14cbcSMatt Macy * 5ms + * + 725eda14cbcSMatt Macy * | * | 726eda14cbcSMatt Macy * 4ms + * + 727eda14cbcSMatt Macy * | * | 728eda14cbcSMatt Macy * 3ms + * + 729eda14cbcSMatt Macy * | * | 730eda14cbcSMatt Macy * 2ms + (midpoint) * + 731eda14cbcSMatt Macy * | | ** | 732eda14cbcSMatt Macy * 1ms + v *** + 733eda14cbcSMatt Macy * | zfs_delay_scale ----------> ******** | 734eda14cbcSMatt Macy * 0 +-------------------------------------*********----------------+ 735eda14cbcSMatt Macy * 0% <- zfs_dirty_data_max -> 100% 736eda14cbcSMatt Macy * 737eda14cbcSMatt Macy * Note that since the delay is added to the outstanding time remaining on the 738eda14cbcSMatt Macy * most recent transaction, the delay is effectively the inverse of IOPS. 739eda14cbcSMatt Macy * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 740eda14cbcSMatt Macy * was chosen such that small changes in the amount of accumulated dirty data 741eda14cbcSMatt Macy * in the first 3/4 of the curve yield relatively small differences in the 742eda14cbcSMatt Macy * amount of delay. 743eda14cbcSMatt Macy * 744eda14cbcSMatt Macy * The effects can be easier to understand when the amount of delay is 745eda14cbcSMatt Macy * represented on a log scale: 746eda14cbcSMatt Macy * 747eda14cbcSMatt Macy * delay 748eda14cbcSMatt Macy * 100ms +-------------------------------------------------------------++ 749eda14cbcSMatt Macy * + + 750eda14cbcSMatt Macy * | | 751eda14cbcSMatt Macy * + *+ 752eda14cbcSMatt Macy * 10ms + *+ 753eda14cbcSMatt Macy * + ** + 754eda14cbcSMatt Macy * | (midpoint) ** | 755eda14cbcSMatt Macy * + | ** + 756eda14cbcSMatt Macy * 1ms + v **** + 757eda14cbcSMatt Macy * + zfs_delay_scale ----------> ***** + 758eda14cbcSMatt Macy * | **** | 759eda14cbcSMatt Macy * + **** + 760eda14cbcSMatt Macy * 100us + ** + 761eda14cbcSMatt Macy * + * + 762eda14cbcSMatt Macy * | * | 763eda14cbcSMatt Macy * + * + 764eda14cbcSMatt Macy * 10us + * + 765eda14cbcSMatt Macy * + + 766eda14cbcSMatt Macy * | | 767eda14cbcSMatt Macy * + + 768eda14cbcSMatt Macy * +--------------------------------------------------------------+ 769eda14cbcSMatt Macy * 0% <- zfs_dirty_data_max -> 100% 770eda14cbcSMatt Macy * 771eda14cbcSMatt Macy * Note here that only as the amount of dirty data approaches its limit does 772eda14cbcSMatt Macy * the delay start to increase rapidly. The goal of a properly tuned system 773eda14cbcSMatt Macy * should be to keep the amount of dirty data out of that range by first 774eda14cbcSMatt Macy * ensuring that the appropriate limits are set for the I/O scheduler to reach 775eda14cbcSMatt Macy * optimal throughput on the backend storage, and then by changing the value 776eda14cbcSMatt Macy * of zfs_delay_scale to increase the steepness of the curve. 777eda14cbcSMatt Macy */ 778eda14cbcSMatt Macy static void 779eda14cbcSMatt Macy dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 780eda14cbcSMatt Macy { 781eda14cbcSMatt Macy dsl_pool_t *dp = tx->tx_pool; 782*e3aa18adSMartin Matuska uint64_t delay_min_bytes, wrlog; 783*e3aa18adSMartin Matuska hrtime_t wakeup, tx_time = 0, now; 784*e3aa18adSMartin Matuska 785*e3aa18adSMartin Matuska /* Calculate minimum transaction time for the dirty data amount. */ 786*e3aa18adSMartin Matuska delay_min_bytes = 787eda14cbcSMatt Macy zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 788*e3aa18adSMartin Matuska if (dirty > delay_min_bytes) { 789eda14cbcSMatt Macy /* 790eda14cbcSMatt Macy * The caller has already waited until we are under the max. 791eda14cbcSMatt Macy * We make them pass us the amount of dirty data so we don't 792*e3aa18adSMartin Matuska * have to handle the case of it being >= the max, which 793*e3aa18adSMartin Matuska * could cause a divide-by-zero if it's == the max. 794eda14cbcSMatt Macy */ 795eda14cbcSMatt Macy ASSERT3U(dirty, <, zfs_dirty_data_max); 796eda14cbcSMatt Macy 797*e3aa18adSMartin Matuska tx_time = zfs_delay_scale * (dirty - delay_min_bytes) / 798*e3aa18adSMartin Matuska (zfs_dirty_data_max - dirty); 799*e3aa18adSMartin Matuska } 800*e3aa18adSMartin Matuska 801*e3aa18adSMartin Matuska /* Calculate minimum transaction time for the TX_WRITE log size. */ 802*e3aa18adSMartin Matuska wrlog = aggsum_upper_bound(&dp->dp_wrlog_total); 803*e3aa18adSMartin Matuska delay_min_bytes = 804*e3aa18adSMartin Matuska zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100; 805*e3aa18adSMartin Matuska if (wrlog >= zfs_wrlog_data_max) { 806*e3aa18adSMartin Matuska tx_time = zfs_delay_max_ns; 807*e3aa18adSMartin Matuska } else if (wrlog > delay_min_bytes) { 808*e3aa18adSMartin Matuska tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) / 809*e3aa18adSMartin Matuska (zfs_wrlog_data_max - wrlog), tx_time); 810*e3aa18adSMartin Matuska } 811*e3aa18adSMartin Matuska 812*e3aa18adSMartin Matuska if (tx_time == 0) 813*e3aa18adSMartin Matuska return; 814*e3aa18adSMartin Matuska 815*e3aa18adSMartin Matuska tx_time = MIN(tx_time, zfs_delay_max_ns); 816eda14cbcSMatt Macy now = gethrtime(); 817*e3aa18adSMartin Matuska if (now > tx->tx_start + tx_time) 818eda14cbcSMatt Macy return; 819eda14cbcSMatt Macy 820eda14cbcSMatt Macy DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 821*e3aa18adSMartin Matuska uint64_t, tx_time); 822eda14cbcSMatt Macy 823eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 824*e3aa18adSMartin Matuska wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time); 825eda14cbcSMatt Macy dp->dp_last_wakeup = wakeup; 826eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 827eda14cbcSMatt Macy 828eda14cbcSMatt Macy zfs_sleep_until(wakeup); 829eda14cbcSMatt Macy } 830eda14cbcSMatt Macy 831eda14cbcSMatt Macy /* 832eda14cbcSMatt Macy * This routine attempts to assign the transaction to a transaction group. 833eda14cbcSMatt Macy * To do so, we must determine if there is sufficient free space on disk. 834eda14cbcSMatt Macy * 835eda14cbcSMatt Macy * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree() 836eda14cbcSMatt Macy * on it), then it is assumed that there is sufficient free space, 837eda14cbcSMatt Macy * unless there's insufficient slop space in the pool (see the comment 838eda14cbcSMatt Macy * above spa_slop_shift in spa_misc.c). 839eda14cbcSMatt Macy * 840eda14cbcSMatt Macy * If it is not a "netfree" transaction, then if the data already on disk 841eda14cbcSMatt Macy * is over the allowed usage (e.g. quota), this will fail with EDQUOT or 842eda14cbcSMatt Macy * ENOSPC. Otherwise, if the current rough estimate of pending changes, 843eda14cbcSMatt Macy * plus the rough estimate of this transaction's changes, may exceed the 844eda14cbcSMatt Macy * allowed usage, then this will fail with ERESTART, which will cause the 845eda14cbcSMatt Macy * caller to wait for the pending changes to be written to disk (by waiting 846eda14cbcSMatt Macy * for the next TXG to open), and then check the space usage again. 847eda14cbcSMatt Macy * 848eda14cbcSMatt Macy * The rough estimate of pending changes is comprised of the sum of: 849eda14cbcSMatt Macy * 850eda14cbcSMatt Macy * - this transaction's holds' txh_space_towrite 851eda14cbcSMatt Macy * 852eda14cbcSMatt Macy * - dd_tempreserved[], which is the sum of in-flight transactions' 853eda14cbcSMatt Macy * holds' txh_space_towrite (i.e. those transactions that have called 854eda14cbcSMatt Macy * dmu_tx_assign() but not yet called dmu_tx_commit()). 855eda14cbcSMatt Macy * 856eda14cbcSMatt Macy * - dd_space_towrite[], which is the amount of dirtied dbufs. 857eda14cbcSMatt Macy * 858eda14cbcSMatt Macy * Note that all of these values are inflated by spa_get_worst_case_asize(), 859eda14cbcSMatt Macy * which means that we may get ERESTART well before we are actually in danger 860eda14cbcSMatt Macy * of running out of space, but this also mitigates any small inaccuracies 861eda14cbcSMatt Macy * in the rough estimate (e.g. txh_space_towrite doesn't take into account 862eda14cbcSMatt Macy * indirect blocks, and dd_space_towrite[] doesn't take into account changes 863eda14cbcSMatt Macy * to the MOS). 864eda14cbcSMatt Macy * 865eda14cbcSMatt Macy * Note that due to this algorithm, it is possible to exceed the allowed 866eda14cbcSMatt Macy * usage by one transaction. Also, as we approach the allowed usage, 867eda14cbcSMatt Macy * we will allow a very limited amount of changes into each TXG, thus 868eda14cbcSMatt Macy * decreasing performance. 869eda14cbcSMatt Macy */ 870eda14cbcSMatt Macy static int 871eda14cbcSMatt Macy dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) 872eda14cbcSMatt Macy { 873eda14cbcSMatt Macy spa_t *spa = tx->tx_pool->dp_spa; 874eda14cbcSMatt Macy 875eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 876eda14cbcSMatt Macy 877eda14cbcSMatt Macy if (tx->tx_err) { 878eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_error); 879eda14cbcSMatt Macy return (tx->tx_err); 880eda14cbcSMatt Macy } 881eda14cbcSMatt Macy 882eda14cbcSMatt Macy if (spa_suspended(spa)) { 883eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_suspended); 884eda14cbcSMatt Macy 885eda14cbcSMatt Macy /* 886eda14cbcSMatt Macy * If the user has indicated a blocking failure mode 887eda14cbcSMatt Macy * then return ERESTART which will block in dmu_tx_wait(). 888eda14cbcSMatt Macy * Otherwise, return EIO so that an error can get 889eda14cbcSMatt Macy * propagated back to the VOP calls. 890eda14cbcSMatt Macy * 891eda14cbcSMatt Macy * Note that we always honor the txg_how flag regardless 892eda14cbcSMatt Macy * of the failuremode setting. 893eda14cbcSMatt Macy */ 894eda14cbcSMatt Macy if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 895eda14cbcSMatt Macy !(txg_how & TXG_WAIT)) 896eda14cbcSMatt Macy return (SET_ERROR(EIO)); 897eda14cbcSMatt Macy 898eda14cbcSMatt Macy return (SET_ERROR(ERESTART)); 899eda14cbcSMatt Macy } 900eda14cbcSMatt Macy 901eda14cbcSMatt Macy if (!tx->tx_dirty_delayed && 902*e3aa18adSMartin Matuska dsl_pool_need_wrlog_delay(tx->tx_pool)) { 903*e3aa18adSMartin Matuska tx->tx_wait_dirty = B_TRUE; 904*e3aa18adSMartin Matuska DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay); 9053f9d360cSMartin Matuska return (SET_ERROR(ERESTART)); 9063f9d360cSMartin Matuska } 9073f9d360cSMartin Matuska 9083f9d360cSMartin Matuska if (!tx->tx_dirty_delayed && 909eda14cbcSMatt Macy dsl_pool_need_dirty_delay(tx->tx_pool)) { 910eda14cbcSMatt Macy tx->tx_wait_dirty = B_TRUE; 911eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_dirty_delay); 912eda14cbcSMatt Macy return (SET_ERROR(ERESTART)); 913eda14cbcSMatt Macy } 914eda14cbcSMatt Macy 915eda14cbcSMatt Macy tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 916eda14cbcSMatt Macy tx->tx_needassign_txh = NULL; 917eda14cbcSMatt Macy 918eda14cbcSMatt Macy /* 919eda14cbcSMatt Macy * NB: No error returns are allowed after txg_hold_open, but 920eda14cbcSMatt Macy * before processing the dnode holds, due to the 921eda14cbcSMatt Macy * dmu_tx_unassign() logic. 922eda14cbcSMatt Macy */ 923eda14cbcSMatt Macy 924eda14cbcSMatt Macy uint64_t towrite = 0; 925eda14cbcSMatt Macy uint64_t tohold = 0; 926eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 927eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 928eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 929eda14cbcSMatt Macy if (dn != NULL) { 930eda14cbcSMatt Macy /* 931eda14cbcSMatt Macy * This thread can't hold the dn_struct_rwlock 932eda14cbcSMatt Macy * while assigning the tx, because this can lead to 933eda14cbcSMatt Macy * deadlock. Specifically, if this dnode is already 934eda14cbcSMatt Macy * assigned to an earlier txg, this thread may need 935eda14cbcSMatt Macy * to wait for that txg to sync (the ERESTART case 936eda14cbcSMatt Macy * below). The other thread that has assigned this 937eda14cbcSMatt Macy * dnode to an earlier txg prevents this txg from 938eda14cbcSMatt Macy * syncing until its tx can complete (calling 939eda14cbcSMatt Macy * dmu_tx_commit()), but it may need to acquire the 940eda14cbcSMatt Macy * dn_struct_rwlock to do so (e.g. via 941eda14cbcSMatt Macy * dmu_buf_hold*()). 942eda14cbcSMatt Macy * 943eda14cbcSMatt Macy * Note that this thread can't hold the lock for 944eda14cbcSMatt Macy * read either, but the rwlock doesn't record 945eda14cbcSMatt Macy * enough information to make that assertion. 946eda14cbcSMatt Macy */ 947eda14cbcSMatt Macy ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock)); 948eda14cbcSMatt Macy 949eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 950eda14cbcSMatt Macy if (dn->dn_assigned_txg == tx->tx_txg - 1) { 951eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 952eda14cbcSMatt Macy tx->tx_needassign_txh = txh; 953eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_group); 954eda14cbcSMatt Macy return (SET_ERROR(ERESTART)); 955eda14cbcSMatt Macy } 956eda14cbcSMatt Macy if (dn->dn_assigned_txg == 0) 957eda14cbcSMatt Macy dn->dn_assigned_txg = tx->tx_txg; 958eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 959eda14cbcSMatt Macy (void) zfs_refcount_add(&dn->dn_tx_holds, tx); 960eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 961eda14cbcSMatt Macy } 962eda14cbcSMatt Macy towrite += zfs_refcount_count(&txh->txh_space_towrite); 963eda14cbcSMatt Macy tohold += zfs_refcount_count(&txh->txh_memory_tohold); 964eda14cbcSMatt Macy } 965eda14cbcSMatt Macy 966eda14cbcSMatt Macy /* needed allocation: worst-case estimate of write space */ 967eda14cbcSMatt Macy uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite); 968eda14cbcSMatt Macy /* calculate memory footprint estimate */ 969eda14cbcSMatt Macy uint64_t memory = towrite + tohold; 970eda14cbcSMatt Macy 971eda14cbcSMatt Macy if (tx->tx_dir != NULL && asize != 0) { 972eda14cbcSMatt Macy int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 973eda14cbcSMatt Macy asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx); 974eda14cbcSMatt Macy if (err != 0) 975eda14cbcSMatt Macy return (err); 976eda14cbcSMatt Macy } 977eda14cbcSMatt Macy 978eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_assigned); 979eda14cbcSMatt Macy 980eda14cbcSMatt Macy return (0); 981eda14cbcSMatt Macy } 982eda14cbcSMatt Macy 983eda14cbcSMatt Macy static void 984eda14cbcSMatt Macy dmu_tx_unassign(dmu_tx_t *tx) 985eda14cbcSMatt Macy { 986eda14cbcSMatt Macy if (tx->tx_txg == 0) 987eda14cbcSMatt Macy return; 988eda14cbcSMatt Macy 989eda14cbcSMatt Macy txg_rele_to_quiesce(&tx->tx_txgh); 990eda14cbcSMatt Macy 991eda14cbcSMatt Macy /* 992eda14cbcSMatt Macy * Walk the transaction's hold list, removing the hold on the 993eda14cbcSMatt Macy * associated dnode, and notifying waiters if the refcount drops to 0. 994eda14cbcSMatt Macy */ 995eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); 996eda14cbcSMatt Macy txh && txh != tx->tx_needassign_txh; 997eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 998eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 999eda14cbcSMatt Macy 1000eda14cbcSMatt Macy if (dn == NULL) 1001eda14cbcSMatt Macy continue; 1002eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 1003eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1004eda14cbcSMatt Macy 1005eda14cbcSMatt Macy if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1006eda14cbcSMatt Macy dn->dn_assigned_txg = 0; 1007eda14cbcSMatt Macy cv_broadcast(&dn->dn_notxholds); 1008eda14cbcSMatt Macy } 1009eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1010eda14cbcSMatt Macy } 1011eda14cbcSMatt Macy 1012eda14cbcSMatt Macy txg_rele_to_sync(&tx->tx_txgh); 1013eda14cbcSMatt Macy 1014eda14cbcSMatt Macy tx->tx_lasttried_txg = tx->tx_txg; 1015eda14cbcSMatt Macy tx->tx_txg = 0; 1016eda14cbcSMatt Macy } 1017eda14cbcSMatt Macy 1018eda14cbcSMatt Macy /* 1019eda14cbcSMatt Macy * Assign tx to a transaction group; txg_how is a bitmask: 1020eda14cbcSMatt Macy * 1021eda14cbcSMatt Macy * If TXG_WAIT is set and the currently open txg is full, this function 1022eda14cbcSMatt Macy * will wait until there's a new txg. This should be used when no locks 1023eda14cbcSMatt Macy * are being held. With this bit set, this function will only fail if 1024eda14cbcSMatt Macy * we're truly out of space (or over quota). 1025eda14cbcSMatt Macy * 1026eda14cbcSMatt Macy * If TXG_WAIT is *not* set and we can't assign into the currently open 1027eda14cbcSMatt Macy * txg without blocking, this function will return immediately with 1028eda14cbcSMatt Macy * ERESTART. This should be used whenever locks are being held. On an 1029eda14cbcSMatt Macy * ERESTART error, the caller should drop all locks, call dmu_tx_wait(), 1030eda14cbcSMatt Macy * and try again. 1031eda14cbcSMatt Macy * 1032eda14cbcSMatt Macy * If TXG_NOTHROTTLE is set, this indicates that this tx should not be 1033eda14cbcSMatt Macy * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for 1034eda14cbcSMatt Macy * details on the throttle). This is used by the VFS operations, after 1035eda14cbcSMatt Macy * they have already called dmu_tx_wait() (though most likely on a 1036eda14cbcSMatt Macy * different tx). 1037184c1b94SMartin Matuska * 1038184c1b94SMartin Matuska * It is guaranteed that subsequent successful calls to dmu_tx_assign() 1039184c1b94SMartin Matuska * will assign the tx to monotonically increasing txgs. Of course this is 1040184c1b94SMartin Matuska * not strong monotonicity, because the same txg can be returned multiple 1041184c1b94SMartin Matuska * times in a row. This guarantee holds both for subsequent calls from 1042184c1b94SMartin Matuska * one thread and for multiple threads. For example, it is impossible to 1043184c1b94SMartin Matuska * observe the following sequence of events: 1044184c1b94SMartin Matuska * 1045184c1b94SMartin Matuska * Thread 1 Thread 2 1046184c1b94SMartin Matuska * 1047184c1b94SMartin Matuska * dmu_tx_assign(T1, ...) 1048184c1b94SMartin Matuska * 1 <- dmu_tx_get_txg(T1) 1049184c1b94SMartin Matuska * dmu_tx_assign(T2, ...) 1050184c1b94SMartin Matuska * 2 <- dmu_tx_get_txg(T2) 1051184c1b94SMartin Matuska * dmu_tx_assign(T3, ...) 1052184c1b94SMartin Matuska * 1 <- dmu_tx_get_txg(T3) 1053eda14cbcSMatt Macy */ 1054eda14cbcSMatt Macy int 1055eda14cbcSMatt Macy dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) 1056eda14cbcSMatt Macy { 1057eda14cbcSMatt Macy int err; 1058eda14cbcSMatt Macy 1059eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1060eda14cbcSMatt Macy ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE)); 1061eda14cbcSMatt Macy ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1062eda14cbcSMatt Macy 1063eda14cbcSMatt Macy /* If we might wait, we must not hold the config lock. */ 1064eda14cbcSMatt Macy IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool)); 1065eda14cbcSMatt Macy 1066eda14cbcSMatt Macy if ((txg_how & TXG_NOTHROTTLE)) 1067eda14cbcSMatt Macy tx->tx_dirty_delayed = B_TRUE; 1068eda14cbcSMatt Macy 1069eda14cbcSMatt Macy while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1070eda14cbcSMatt Macy dmu_tx_unassign(tx); 1071eda14cbcSMatt Macy 1072eda14cbcSMatt Macy if (err != ERESTART || !(txg_how & TXG_WAIT)) 1073eda14cbcSMatt Macy return (err); 1074eda14cbcSMatt Macy 1075eda14cbcSMatt Macy dmu_tx_wait(tx); 1076eda14cbcSMatt Macy } 1077eda14cbcSMatt Macy 1078eda14cbcSMatt Macy txg_rele_to_quiesce(&tx->tx_txgh); 1079eda14cbcSMatt Macy 1080eda14cbcSMatt Macy return (0); 1081eda14cbcSMatt Macy } 1082eda14cbcSMatt Macy 1083eda14cbcSMatt Macy void 1084eda14cbcSMatt Macy dmu_tx_wait(dmu_tx_t *tx) 1085eda14cbcSMatt Macy { 1086eda14cbcSMatt Macy spa_t *spa = tx->tx_pool->dp_spa; 1087eda14cbcSMatt Macy dsl_pool_t *dp = tx->tx_pool; 1088eda14cbcSMatt Macy hrtime_t before; 1089eda14cbcSMatt Macy 1090eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1091eda14cbcSMatt Macy ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1092eda14cbcSMatt Macy 1093eda14cbcSMatt Macy before = gethrtime(); 1094eda14cbcSMatt Macy 1095eda14cbcSMatt Macy if (tx->tx_wait_dirty) { 1096eda14cbcSMatt Macy uint64_t dirty; 1097eda14cbcSMatt Macy 1098eda14cbcSMatt Macy /* 1099eda14cbcSMatt Macy * dmu_tx_try_assign() has determined that we need to wait 1100eda14cbcSMatt Macy * because we've consumed much or all of the dirty buffer 1101eda14cbcSMatt Macy * space. 1102eda14cbcSMatt Macy */ 1103eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 1104eda14cbcSMatt Macy if (dp->dp_dirty_total >= zfs_dirty_data_max) 1105eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max); 1106eda14cbcSMatt Macy while (dp->dp_dirty_total >= zfs_dirty_data_max) 1107eda14cbcSMatt Macy cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1108eda14cbcSMatt Macy dirty = dp->dp_dirty_total; 1109eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 1110eda14cbcSMatt Macy 1111eda14cbcSMatt Macy dmu_tx_delay(tx, dirty); 1112eda14cbcSMatt Macy 1113eda14cbcSMatt Macy tx->tx_wait_dirty = B_FALSE; 1114eda14cbcSMatt Macy 1115eda14cbcSMatt Macy /* 1116eda14cbcSMatt Macy * Note: setting tx_dirty_delayed only has effect if the 1117eda14cbcSMatt Macy * caller used TX_WAIT. Otherwise they are going to 1118eda14cbcSMatt Macy * destroy this tx and try again. The common case, 1119eda14cbcSMatt Macy * zfs_write(), uses TX_WAIT. 1120eda14cbcSMatt Macy */ 1121eda14cbcSMatt Macy tx->tx_dirty_delayed = B_TRUE; 1122eda14cbcSMatt Macy } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1123eda14cbcSMatt Macy /* 1124eda14cbcSMatt Macy * If the pool is suspended we need to wait until it 1125eda14cbcSMatt Macy * is resumed. Note that it's possible that the pool 1126eda14cbcSMatt Macy * has become active after this thread has tried to 1127eda14cbcSMatt Macy * obtain a tx. If that's the case then tx_lasttried_txg 1128eda14cbcSMatt Macy * would not have been set. 1129eda14cbcSMatt Macy */ 1130eda14cbcSMatt Macy txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1131eda14cbcSMatt Macy } else if (tx->tx_needassign_txh) { 1132eda14cbcSMatt Macy dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1133eda14cbcSMatt Macy 1134eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 1135eda14cbcSMatt Macy while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1136eda14cbcSMatt Macy cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1137eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1138eda14cbcSMatt Macy tx->tx_needassign_txh = NULL; 1139eda14cbcSMatt Macy } else { 1140eda14cbcSMatt Macy /* 1141eda14cbcSMatt Macy * If we have a lot of dirty data just wait until we sync 1142eda14cbcSMatt Macy * out a TXG at which point we'll hopefully have synced 1143eda14cbcSMatt Macy * a portion of the changes. 1144eda14cbcSMatt Macy */ 1145eda14cbcSMatt Macy txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1146eda14cbcSMatt Macy } 1147eda14cbcSMatt Macy 1148eda14cbcSMatt Macy spa_tx_assign_add_nsecs(spa, gethrtime() - before); 1149eda14cbcSMatt Macy } 1150eda14cbcSMatt Macy 1151eda14cbcSMatt Macy static void 1152eda14cbcSMatt Macy dmu_tx_destroy(dmu_tx_t *tx) 1153eda14cbcSMatt Macy { 1154eda14cbcSMatt Macy dmu_tx_hold_t *txh; 1155eda14cbcSMatt Macy 1156eda14cbcSMatt Macy while ((txh = list_head(&tx->tx_holds)) != NULL) { 1157eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 1158eda14cbcSMatt Macy 1159eda14cbcSMatt Macy list_remove(&tx->tx_holds, txh); 1160eda14cbcSMatt Macy zfs_refcount_destroy_many(&txh->txh_space_towrite, 1161eda14cbcSMatt Macy zfs_refcount_count(&txh->txh_space_towrite)); 1162eda14cbcSMatt Macy zfs_refcount_destroy_many(&txh->txh_memory_tohold, 1163eda14cbcSMatt Macy zfs_refcount_count(&txh->txh_memory_tohold)); 1164eda14cbcSMatt Macy kmem_free(txh, sizeof (dmu_tx_hold_t)); 1165eda14cbcSMatt Macy if (dn != NULL) 1166eda14cbcSMatt Macy dnode_rele(dn, tx); 1167eda14cbcSMatt Macy } 1168eda14cbcSMatt Macy 1169eda14cbcSMatt Macy list_destroy(&tx->tx_callbacks); 1170eda14cbcSMatt Macy list_destroy(&tx->tx_holds); 1171eda14cbcSMatt Macy kmem_free(tx, sizeof (dmu_tx_t)); 1172eda14cbcSMatt Macy } 1173eda14cbcSMatt Macy 1174eda14cbcSMatt Macy void 1175eda14cbcSMatt Macy dmu_tx_commit(dmu_tx_t *tx) 1176eda14cbcSMatt Macy { 1177eda14cbcSMatt Macy ASSERT(tx->tx_txg != 0); 1178eda14cbcSMatt Macy 1179eda14cbcSMatt Macy /* 1180eda14cbcSMatt Macy * Go through the transaction's hold list and remove holds on 1181eda14cbcSMatt Macy * associated dnodes, notifying waiters if no holds remain. 1182eda14cbcSMatt Macy */ 1183eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 1184eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 1185eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 1186eda14cbcSMatt Macy 1187eda14cbcSMatt Macy if (dn == NULL) 1188eda14cbcSMatt Macy continue; 1189eda14cbcSMatt Macy 1190eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 1191eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1192eda14cbcSMatt Macy 1193eda14cbcSMatt Macy if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1194eda14cbcSMatt Macy dn->dn_assigned_txg = 0; 1195eda14cbcSMatt Macy cv_broadcast(&dn->dn_notxholds); 1196eda14cbcSMatt Macy } 1197eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1198eda14cbcSMatt Macy } 1199eda14cbcSMatt Macy 1200eda14cbcSMatt Macy if (tx->tx_tempreserve_cookie) 1201eda14cbcSMatt Macy dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1202eda14cbcSMatt Macy 1203eda14cbcSMatt Macy if (!list_is_empty(&tx->tx_callbacks)) 1204eda14cbcSMatt Macy txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1205eda14cbcSMatt Macy 1206eda14cbcSMatt Macy if (tx->tx_anyobj == FALSE) 1207eda14cbcSMatt Macy txg_rele_to_sync(&tx->tx_txgh); 1208eda14cbcSMatt Macy 1209eda14cbcSMatt Macy dmu_tx_destroy(tx); 1210eda14cbcSMatt Macy } 1211eda14cbcSMatt Macy 1212eda14cbcSMatt Macy void 1213eda14cbcSMatt Macy dmu_tx_abort(dmu_tx_t *tx) 1214eda14cbcSMatt Macy { 1215eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1216eda14cbcSMatt Macy 1217eda14cbcSMatt Macy /* 1218eda14cbcSMatt Macy * Call any registered callbacks with an error code. 1219eda14cbcSMatt Macy */ 1220eda14cbcSMatt Macy if (!list_is_empty(&tx->tx_callbacks)) 1221eda14cbcSMatt Macy dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED)); 1222eda14cbcSMatt Macy 1223eda14cbcSMatt Macy dmu_tx_destroy(tx); 1224eda14cbcSMatt Macy } 1225eda14cbcSMatt Macy 1226eda14cbcSMatt Macy uint64_t 1227eda14cbcSMatt Macy dmu_tx_get_txg(dmu_tx_t *tx) 1228eda14cbcSMatt Macy { 1229eda14cbcSMatt Macy ASSERT(tx->tx_txg != 0); 1230eda14cbcSMatt Macy return (tx->tx_txg); 1231eda14cbcSMatt Macy } 1232eda14cbcSMatt Macy 1233eda14cbcSMatt Macy dsl_pool_t * 1234eda14cbcSMatt Macy dmu_tx_pool(dmu_tx_t *tx) 1235eda14cbcSMatt Macy { 1236eda14cbcSMatt Macy ASSERT(tx->tx_pool != NULL); 1237eda14cbcSMatt Macy return (tx->tx_pool); 1238eda14cbcSMatt Macy } 1239eda14cbcSMatt Macy 1240eda14cbcSMatt Macy void 1241eda14cbcSMatt Macy dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1242eda14cbcSMatt Macy { 1243eda14cbcSMatt Macy dmu_tx_callback_t *dcb; 1244eda14cbcSMatt Macy 1245eda14cbcSMatt Macy dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1246eda14cbcSMatt Macy 1247eda14cbcSMatt Macy dcb->dcb_func = func; 1248eda14cbcSMatt Macy dcb->dcb_data = data; 1249eda14cbcSMatt Macy 1250eda14cbcSMatt Macy list_insert_tail(&tx->tx_callbacks, dcb); 1251eda14cbcSMatt Macy } 1252eda14cbcSMatt Macy 1253eda14cbcSMatt Macy /* 1254eda14cbcSMatt Macy * Call all the commit callbacks on a list, with a given error code. 1255eda14cbcSMatt Macy */ 1256eda14cbcSMatt Macy void 1257eda14cbcSMatt Macy dmu_tx_do_callbacks(list_t *cb_list, int error) 1258eda14cbcSMatt Macy { 1259eda14cbcSMatt Macy dmu_tx_callback_t *dcb; 1260eda14cbcSMatt Macy 1261eda14cbcSMatt Macy while ((dcb = list_tail(cb_list)) != NULL) { 1262eda14cbcSMatt Macy list_remove(cb_list, dcb); 1263eda14cbcSMatt Macy dcb->dcb_func(dcb->dcb_data, error); 1264eda14cbcSMatt Macy kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1265eda14cbcSMatt Macy } 1266eda14cbcSMatt Macy } 1267eda14cbcSMatt Macy 1268eda14cbcSMatt Macy /* 1269eda14cbcSMatt Macy * Interface to hold a bunch of attributes. 1270eda14cbcSMatt Macy * used for creating new files. 1271eda14cbcSMatt Macy * attrsize is the total size of all attributes 1272eda14cbcSMatt Macy * to be added during object creation 1273eda14cbcSMatt Macy * 1274eda14cbcSMatt Macy * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1275eda14cbcSMatt Macy */ 1276eda14cbcSMatt Macy 1277eda14cbcSMatt Macy /* 1278eda14cbcSMatt Macy * hold necessary attribute name for attribute registration. 1279eda14cbcSMatt Macy * should be a very rare case where this is needed. If it does 1280eda14cbcSMatt Macy * happen it would only happen on the first write to the file system. 1281eda14cbcSMatt Macy */ 1282eda14cbcSMatt Macy static void 1283eda14cbcSMatt Macy dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1284eda14cbcSMatt Macy { 1285eda14cbcSMatt Macy if (!sa->sa_need_attr_registration) 1286eda14cbcSMatt Macy return; 1287eda14cbcSMatt Macy 1288eda14cbcSMatt Macy for (int i = 0; i != sa->sa_num_attrs; i++) { 1289eda14cbcSMatt Macy if (!sa->sa_attr_table[i].sa_registered) { 1290eda14cbcSMatt Macy if (sa->sa_reg_attr_obj) 1291eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1292eda14cbcSMatt Macy B_TRUE, sa->sa_attr_table[i].sa_name); 1293eda14cbcSMatt Macy else 1294eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1295eda14cbcSMatt Macy B_TRUE, sa->sa_attr_table[i].sa_name); 1296eda14cbcSMatt Macy } 1297eda14cbcSMatt Macy } 1298eda14cbcSMatt Macy } 1299eda14cbcSMatt Macy 1300eda14cbcSMatt Macy void 1301eda14cbcSMatt Macy dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1302eda14cbcSMatt Macy { 1303eda14cbcSMatt Macy dmu_tx_hold_t *txh; 1304eda14cbcSMatt Macy 1305eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1306eda14cbcSMatt Macy THT_SPILL, 0, 0); 1307eda14cbcSMatt Macy if (txh != NULL) 1308eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, 1309eda14cbcSMatt Macy SPA_OLD_MAXBLOCKSIZE, FTAG); 1310eda14cbcSMatt Macy } 1311eda14cbcSMatt Macy 1312eda14cbcSMatt Macy void 1313eda14cbcSMatt Macy dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1314eda14cbcSMatt Macy { 1315eda14cbcSMatt Macy sa_os_t *sa = tx->tx_objset->os_sa; 1316eda14cbcSMatt Macy 1317eda14cbcSMatt Macy dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1318eda14cbcSMatt Macy 1319eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_master_obj == 0) 1320eda14cbcSMatt Macy return; 1321eda14cbcSMatt Macy 1322eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_layout_attr_obj) { 1323eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1324eda14cbcSMatt Macy } else { 1325eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1326eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1327eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1328eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1329eda14cbcSMatt Macy } 1330eda14cbcSMatt Macy 1331eda14cbcSMatt Macy dmu_tx_sa_registration_hold(sa, tx); 1332eda14cbcSMatt Macy 1333eda14cbcSMatt Macy if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill) 1334eda14cbcSMatt Macy return; 1335eda14cbcSMatt Macy 1336eda14cbcSMatt Macy (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1337eda14cbcSMatt Macy THT_SPILL, 0, 0); 1338eda14cbcSMatt Macy } 1339eda14cbcSMatt Macy 1340eda14cbcSMatt Macy /* 1341eda14cbcSMatt Macy * Hold SA attribute 1342eda14cbcSMatt Macy * 1343eda14cbcSMatt Macy * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1344eda14cbcSMatt Macy * 1345eda14cbcSMatt Macy * variable_size is the total size of all variable sized attributes 1346eda14cbcSMatt Macy * passed to this function. It is not the total size of all 1347eda14cbcSMatt Macy * variable size attributes that *may* exist on this object. 1348eda14cbcSMatt Macy */ 1349eda14cbcSMatt Macy void 1350eda14cbcSMatt Macy dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1351eda14cbcSMatt Macy { 1352eda14cbcSMatt Macy uint64_t object; 1353eda14cbcSMatt Macy sa_os_t *sa = tx->tx_objset->os_sa; 1354eda14cbcSMatt Macy 1355eda14cbcSMatt Macy ASSERT(hdl != NULL); 1356eda14cbcSMatt Macy 1357eda14cbcSMatt Macy object = sa_handle_object(hdl); 1358eda14cbcSMatt Macy 1359eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1360eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1361eda14cbcSMatt Macy dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db)); 1362eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1363eda14cbcSMatt Macy 1364eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_master_obj == 0) 1365eda14cbcSMatt Macy return; 1366eda14cbcSMatt Macy 1367eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1368eda14cbcSMatt Macy tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1369eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1370eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1371eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1372eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1373eda14cbcSMatt Macy } 1374eda14cbcSMatt Macy 1375eda14cbcSMatt Macy dmu_tx_sa_registration_hold(sa, tx); 1376eda14cbcSMatt Macy 1377eda14cbcSMatt Macy if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1378eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1379eda14cbcSMatt Macy 1380eda14cbcSMatt Macy if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1381eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1382eda14cbcSMatt Macy dmu_tx_hold_spill(tx, object); 1383eda14cbcSMatt Macy } else { 1384eda14cbcSMatt Macy dnode_t *dn; 1385eda14cbcSMatt Macy 1386eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1387eda14cbcSMatt Macy dn = DB_DNODE(db); 1388eda14cbcSMatt Macy if (dn->dn_have_spill) { 1389eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1390eda14cbcSMatt Macy dmu_tx_hold_spill(tx, object); 1391eda14cbcSMatt Macy } 1392eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1393eda14cbcSMatt Macy } 1394eda14cbcSMatt Macy } 1395eda14cbcSMatt Macy 1396eda14cbcSMatt Macy void 1397eda14cbcSMatt Macy dmu_tx_init(void) 1398eda14cbcSMatt Macy { 1399eda14cbcSMatt Macy dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc", 1400eda14cbcSMatt Macy KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t), 1401eda14cbcSMatt Macy KSTAT_FLAG_VIRTUAL); 1402eda14cbcSMatt Macy 1403eda14cbcSMatt Macy if (dmu_tx_ksp != NULL) { 1404eda14cbcSMatt Macy dmu_tx_ksp->ks_data = &dmu_tx_stats; 1405eda14cbcSMatt Macy kstat_install(dmu_tx_ksp); 1406eda14cbcSMatt Macy } 1407eda14cbcSMatt Macy } 1408eda14cbcSMatt Macy 1409eda14cbcSMatt Macy void 1410eda14cbcSMatt Macy dmu_tx_fini(void) 1411eda14cbcSMatt Macy { 1412eda14cbcSMatt Macy if (dmu_tx_ksp != NULL) { 1413eda14cbcSMatt Macy kstat_delete(dmu_tx_ksp); 1414eda14cbcSMatt Macy dmu_tx_ksp = NULL; 1415eda14cbcSMatt Macy } 1416eda14cbcSMatt Macy } 1417eda14cbcSMatt Macy 1418eda14cbcSMatt Macy #if defined(_KERNEL) 1419eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_create); 1420eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_write); 1421eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode); 1422eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_free); 1423eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode); 1424eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_zap); 1425eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode); 1426eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_bonus); 1427eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode); 1428eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_abort); 1429eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_assign); 1430eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_wait); 1431eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_commit); 1432eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_mark_netfree); 1433eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_get_txg); 1434eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_callback_register); 1435eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_do_callbacks); 1436eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_spill); 1437eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_sa_create); 1438eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_sa); 1439eda14cbcSMatt Macy #endif 1440