1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24eda14cbcSMatt Macy * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25eda14cbcSMatt Macy */ 26eda14cbcSMatt Macy 27eda14cbcSMatt Macy #include <sys/dmu.h> 28eda14cbcSMatt Macy #include <sys/dmu_impl.h> 29eda14cbcSMatt Macy #include <sys/dbuf.h> 30eda14cbcSMatt Macy #include <sys/dmu_tx.h> 31eda14cbcSMatt Macy #include <sys/dmu_objset.h> 32eda14cbcSMatt Macy #include <sys/dsl_dataset.h> 33eda14cbcSMatt Macy #include <sys/dsl_dir.h> 34eda14cbcSMatt Macy #include <sys/dsl_pool.h> 35eda14cbcSMatt Macy #include <sys/zap_impl.h> 36eda14cbcSMatt Macy #include <sys/spa.h> 37eda14cbcSMatt Macy #include <sys/sa.h> 38eda14cbcSMatt Macy #include <sys/sa_impl.h> 39eda14cbcSMatt Macy #include <sys/zfs_context.h> 40eda14cbcSMatt Macy #include <sys/trace_zfs.h> 41eda14cbcSMatt Macy 42eda14cbcSMatt Macy typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 43eda14cbcSMatt Macy uint64_t arg1, uint64_t arg2); 44eda14cbcSMatt Macy 45eda14cbcSMatt Macy dmu_tx_stats_t dmu_tx_stats = { 46eda14cbcSMatt Macy { "dmu_tx_assigned", KSTAT_DATA_UINT64 }, 47eda14cbcSMatt Macy { "dmu_tx_delay", KSTAT_DATA_UINT64 }, 48eda14cbcSMatt Macy { "dmu_tx_error", KSTAT_DATA_UINT64 }, 49eda14cbcSMatt Macy { "dmu_tx_suspended", KSTAT_DATA_UINT64 }, 50eda14cbcSMatt Macy { "dmu_tx_group", KSTAT_DATA_UINT64 }, 51eda14cbcSMatt Macy { "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 }, 52eda14cbcSMatt Macy { "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 }, 53eda14cbcSMatt Macy { "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 }, 54eda14cbcSMatt Macy { "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 }, 55eda14cbcSMatt Macy { "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 }, 56eda14cbcSMatt Macy { "dmu_tx_dirty_frees_delay", KSTAT_DATA_UINT64 }, 57e3aa18adSMartin Matuska { "dmu_tx_wrlog_delay", KSTAT_DATA_UINT64 }, 58eda14cbcSMatt Macy { "dmu_tx_quota", KSTAT_DATA_UINT64 }, 59eda14cbcSMatt Macy }; 60eda14cbcSMatt Macy 61eda14cbcSMatt Macy static kstat_t *dmu_tx_ksp; 62eda14cbcSMatt Macy 63eda14cbcSMatt Macy dmu_tx_t * 64eda14cbcSMatt Macy dmu_tx_create_dd(dsl_dir_t *dd) 65eda14cbcSMatt Macy { 66eda14cbcSMatt Macy dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 67eda14cbcSMatt Macy tx->tx_dir = dd; 68eda14cbcSMatt Macy if (dd != NULL) 69eda14cbcSMatt Macy tx->tx_pool = dd->dd_pool; 70eda14cbcSMatt Macy list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 71eda14cbcSMatt Macy offsetof(dmu_tx_hold_t, txh_node)); 72eda14cbcSMatt Macy list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 73eda14cbcSMatt Macy offsetof(dmu_tx_callback_t, dcb_node)); 74eda14cbcSMatt Macy tx->tx_start = gethrtime(); 75eda14cbcSMatt Macy return (tx); 76eda14cbcSMatt Macy } 77eda14cbcSMatt Macy 78eda14cbcSMatt Macy dmu_tx_t * 79eda14cbcSMatt Macy dmu_tx_create(objset_t *os) 80eda14cbcSMatt Macy { 81eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 82eda14cbcSMatt Macy tx->tx_objset = os; 83eda14cbcSMatt Macy return (tx); 84eda14cbcSMatt Macy } 85eda14cbcSMatt Macy 86eda14cbcSMatt Macy dmu_tx_t * 87eda14cbcSMatt Macy dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 88eda14cbcSMatt Macy { 89eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_dd(NULL); 90eda14cbcSMatt Macy 91eda14cbcSMatt Macy TXG_VERIFY(dp->dp_spa, txg); 92eda14cbcSMatt Macy tx->tx_pool = dp; 93eda14cbcSMatt Macy tx->tx_txg = txg; 94eda14cbcSMatt Macy tx->tx_anyobj = TRUE; 95eda14cbcSMatt Macy 96eda14cbcSMatt Macy return (tx); 97eda14cbcSMatt Macy } 98eda14cbcSMatt Macy 99eda14cbcSMatt Macy int 100eda14cbcSMatt Macy dmu_tx_is_syncing(dmu_tx_t *tx) 101eda14cbcSMatt Macy { 102eda14cbcSMatt Macy return (tx->tx_anyobj); 103eda14cbcSMatt Macy } 104eda14cbcSMatt Macy 105eda14cbcSMatt Macy int 106eda14cbcSMatt Macy dmu_tx_private_ok(dmu_tx_t *tx) 107eda14cbcSMatt Macy { 108eda14cbcSMatt Macy return (tx->tx_anyobj); 109eda14cbcSMatt Macy } 110eda14cbcSMatt Macy 111eda14cbcSMatt Macy static dmu_tx_hold_t * 112eda14cbcSMatt Macy dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type, 113eda14cbcSMatt Macy uint64_t arg1, uint64_t arg2) 114eda14cbcSMatt Macy { 115eda14cbcSMatt Macy dmu_tx_hold_t *txh; 116eda14cbcSMatt Macy 117eda14cbcSMatt Macy if (dn != NULL) { 118eda14cbcSMatt Macy (void) zfs_refcount_add(&dn->dn_holds, tx); 119eda14cbcSMatt Macy if (tx->tx_txg != 0) { 120eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 121eda14cbcSMatt Macy /* 122eda14cbcSMatt Macy * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 123eda14cbcSMatt Macy * problem, but there's no way for it to happen (for 124eda14cbcSMatt Macy * now, at least). 125eda14cbcSMatt Macy */ 126eda14cbcSMatt Macy ASSERT(dn->dn_assigned_txg == 0); 127eda14cbcSMatt Macy dn->dn_assigned_txg = tx->tx_txg; 128eda14cbcSMatt Macy (void) zfs_refcount_add(&dn->dn_tx_holds, tx); 129eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 130eda14cbcSMatt Macy } 131eda14cbcSMatt Macy } 132eda14cbcSMatt Macy 133eda14cbcSMatt Macy txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 134eda14cbcSMatt Macy txh->txh_tx = tx; 135eda14cbcSMatt Macy txh->txh_dnode = dn; 136eda14cbcSMatt Macy zfs_refcount_create(&txh->txh_space_towrite); 137eda14cbcSMatt Macy zfs_refcount_create(&txh->txh_memory_tohold); 138eda14cbcSMatt Macy txh->txh_type = type; 139eda14cbcSMatt Macy txh->txh_arg1 = arg1; 140eda14cbcSMatt Macy txh->txh_arg2 = arg2; 141eda14cbcSMatt Macy list_insert_tail(&tx->tx_holds, txh); 142eda14cbcSMatt Macy 143eda14cbcSMatt Macy return (txh); 144eda14cbcSMatt Macy } 145eda14cbcSMatt Macy 146eda14cbcSMatt Macy static dmu_tx_hold_t * 147eda14cbcSMatt Macy dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 148eda14cbcSMatt Macy enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 149eda14cbcSMatt Macy { 150eda14cbcSMatt Macy dnode_t *dn = NULL; 151eda14cbcSMatt Macy dmu_tx_hold_t *txh; 152eda14cbcSMatt Macy int err; 153eda14cbcSMatt Macy 154eda14cbcSMatt Macy if (object != DMU_NEW_OBJECT) { 155eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 156eda14cbcSMatt Macy if (err != 0) { 157eda14cbcSMatt Macy tx->tx_err = err; 158eda14cbcSMatt Macy return (NULL); 159eda14cbcSMatt Macy } 160eda14cbcSMatt Macy } 161eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2); 162eda14cbcSMatt Macy if (dn != NULL) 163eda14cbcSMatt Macy dnode_rele(dn, FTAG); 164eda14cbcSMatt Macy return (txh); 165eda14cbcSMatt Macy } 166eda14cbcSMatt Macy 167eda14cbcSMatt Macy void 168eda14cbcSMatt Macy dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn) 169eda14cbcSMatt Macy { 170eda14cbcSMatt Macy /* 171eda14cbcSMatt Macy * If we're syncing, they can manipulate any object anyhow, and 172eda14cbcSMatt Macy * the hold on the dnode_t can cause problems. 173eda14cbcSMatt Macy */ 174eda14cbcSMatt Macy if (!dmu_tx_is_syncing(tx)) 175eda14cbcSMatt Macy (void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0); 176eda14cbcSMatt Macy } 177eda14cbcSMatt Macy 178eda14cbcSMatt Macy /* 179eda14cbcSMatt Macy * This function reads specified data from disk. The specified data will 180eda14cbcSMatt Macy * be needed to perform the transaction -- i.e, it will be read after 181eda14cbcSMatt Macy * we do dmu_tx_assign(). There are two reasons that we read the data now 182eda14cbcSMatt Macy * (before dmu_tx_assign()): 183eda14cbcSMatt Macy * 184eda14cbcSMatt Macy * 1. Reading it now has potentially better performance. The transaction 185eda14cbcSMatt Macy * has not yet been assigned, so the TXG is not held open, and also the 186eda14cbcSMatt Macy * caller typically has less locks held when calling dmu_tx_hold_*() than 187eda14cbcSMatt Macy * after the transaction has been assigned. This reduces the lock (and txg) 188eda14cbcSMatt Macy * hold times, thus reducing lock contention. 189eda14cbcSMatt Macy * 190eda14cbcSMatt Macy * 2. It is easier for callers (primarily the ZPL) to handle i/o errors 191eda14cbcSMatt Macy * that are detected before they start making changes to the DMU state 192eda14cbcSMatt Macy * (i.e. now). Once the transaction has been assigned, and some DMU 193eda14cbcSMatt Macy * state has been changed, it can be difficult to recover from an i/o 194eda14cbcSMatt Macy * error (e.g. to undo the changes already made in memory at the DMU 195eda14cbcSMatt Macy * layer). Typically code to do so does not exist in the caller -- it 196eda14cbcSMatt Macy * assumes that the data has already been cached and thus i/o errors are 197eda14cbcSMatt Macy * not possible. 198eda14cbcSMatt Macy * 199eda14cbcSMatt Macy * It has been observed that the i/o initiated here can be a performance 200eda14cbcSMatt Macy * problem, and it appears to be optional, because we don't look at the 201eda14cbcSMatt Macy * data which is read. However, removing this read would only serve to 202eda14cbcSMatt Macy * move the work elsewhere (after the dmu_tx_assign()), where it may 203eda14cbcSMatt Macy * have a greater impact on performance (in addition to the impact on 204eda14cbcSMatt Macy * fault tolerance noted above). 205eda14cbcSMatt Macy */ 206eda14cbcSMatt Macy static int 207eda14cbcSMatt Macy dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 208eda14cbcSMatt Macy { 209eda14cbcSMatt Macy int err; 210eda14cbcSMatt Macy dmu_buf_impl_t *db; 211eda14cbcSMatt Macy 212eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 213eda14cbcSMatt Macy db = dbuf_hold_level(dn, level, blkid, FTAG); 214eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 215eda14cbcSMatt Macy if (db == NULL) 216eda14cbcSMatt Macy return (SET_ERROR(EIO)); 21715f0b8c3SMartin Matuska /* 21815f0b8c3SMartin Matuska * PARTIAL_FIRST allows caching for uncacheable blocks. It will 21915f0b8c3SMartin Matuska * be cleared after dmu_buf_will_dirty() call dbuf_read() again. 22015f0b8c3SMartin Matuska */ 22115f0b8c3SMartin Matuska err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH | 22215f0b8c3SMartin Matuska (level == 0 ? DB_RF_PARTIAL_FIRST : 0)); 223eda14cbcSMatt Macy dbuf_rele(db, FTAG); 224eda14cbcSMatt Macy return (err); 225eda14cbcSMatt Macy } 226eda14cbcSMatt Macy 227eda14cbcSMatt Macy static void 228eda14cbcSMatt Macy dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 229eda14cbcSMatt Macy { 230eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 231eda14cbcSMatt Macy int err = 0; 232eda14cbcSMatt Macy 233eda14cbcSMatt Macy if (len == 0) 234eda14cbcSMatt Macy return; 235eda14cbcSMatt Macy 236eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG); 237eda14cbcSMatt Macy 238eda14cbcSMatt Macy if (dn == NULL) 239eda14cbcSMatt Macy return; 240eda14cbcSMatt Macy 241eda14cbcSMatt Macy /* 242eda14cbcSMatt Macy * For i/o error checking, read the blocks that will be needed 243eda14cbcSMatt Macy * to perform the write: the first and last level-0 blocks (if 244eda14cbcSMatt Macy * they are not aligned, i.e. if they are partial-block writes), 245eda14cbcSMatt Macy * and all the level-1 blocks. 246eda14cbcSMatt Macy */ 247eda14cbcSMatt Macy if (dn->dn_maxblkid == 0) { 248eda14cbcSMatt Macy if (off < dn->dn_datablksz && 249eda14cbcSMatt Macy (off > 0 || len < dn->dn_datablksz)) { 250eda14cbcSMatt Macy err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 251eda14cbcSMatt Macy if (err != 0) { 252eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 253eda14cbcSMatt Macy } 254eda14cbcSMatt Macy } 255eda14cbcSMatt Macy } else { 256eda14cbcSMatt Macy zio_t *zio = zio_root(dn->dn_objset->os_spa, 257eda14cbcSMatt Macy NULL, NULL, ZIO_FLAG_CANFAIL); 258eda14cbcSMatt Macy 259eda14cbcSMatt Macy /* first level-0 block */ 260eda14cbcSMatt Macy uint64_t start = off >> dn->dn_datablkshift; 261eda14cbcSMatt Macy if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { 262eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 0, start); 263eda14cbcSMatt Macy if (err != 0) { 264eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 265eda14cbcSMatt Macy } 266eda14cbcSMatt Macy } 267eda14cbcSMatt Macy 268eda14cbcSMatt Macy /* last level-0 block */ 269eda14cbcSMatt Macy uint64_t end = (off + len - 1) >> dn->dn_datablkshift; 270eda14cbcSMatt Macy if (end != start && end <= dn->dn_maxblkid && 271eda14cbcSMatt Macy P2PHASE(off + len, dn->dn_datablksz)) { 272eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 0, end); 273eda14cbcSMatt Macy if (err != 0) { 274eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 275eda14cbcSMatt Macy } 276eda14cbcSMatt Macy } 277eda14cbcSMatt Macy 278eda14cbcSMatt Macy /* level-1 blocks */ 279eda14cbcSMatt Macy if (dn->dn_nlevels > 1) { 280eda14cbcSMatt Macy int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 281eda14cbcSMatt Macy for (uint64_t i = (start >> shft) + 1; 282eda14cbcSMatt Macy i < end >> shft; i++) { 283eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 1, i); 284eda14cbcSMatt Macy if (err != 0) { 285eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 286eda14cbcSMatt Macy } 287eda14cbcSMatt Macy } 288eda14cbcSMatt Macy } 289eda14cbcSMatt Macy 290eda14cbcSMatt Macy err = zio_wait(zio); 291eda14cbcSMatt Macy if (err != 0) { 292eda14cbcSMatt Macy txh->txh_tx->tx_err = err; 293eda14cbcSMatt Macy } 294eda14cbcSMatt Macy } 295eda14cbcSMatt Macy } 296eda14cbcSMatt Macy 297eda14cbcSMatt Macy static void 298e639e0d2SMartin Matuska dmu_tx_count_append(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 299e639e0d2SMartin Matuska { 300e639e0d2SMartin Matuska dnode_t *dn = txh->txh_dnode; 301e639e0d2SMartin Matuska int err = 0; 302e639e0d2SMartin Matuska 303e639e0d2SMartin Matuska if (len == 0) 304e639e0d2SMartin Matuska return; 305e639e0d2SMartin Matuska 306e639e0d2SMartin Matuska (void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG); 307e639e0d2SMartin Matuska 308e639e0d2SMartin Matuska if (dn == NULL) 309e639e0d2SMartin Matuska return; 310e639e0d2SMartin Matuska 311e639e0d2SMartin Matuska /* 312e639e0d2SMartin Matuska * For i/o error checking, read the blocks that will be needed 313e639e0d2SMartin Matuska * to perform the append; first level-0 block (if not aligned, i.e. 314e639e0d2SMartin Matuska * if they are partial-block writes), no additional blocks are read. 315e639e0d2SMartin Matuska */ 316e639e0d2SMartin Matuska if (dn->dn_maxblkid == 0) { 317e639e0d2SMartin Matuska if (off < dn->dn_datablksz && 318e639e0d2SMartin Matuska (off > 0 || len < dn->dn_datablksz)) { 319e639e0d2SMartin Matuska err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 320e639e0d2SMartin Matuska if (err != 0) { 321e639e0d2SMartin Matuska txh->txh_tx->tx_err = err; 322e639e0d2SMartin Matuska } 323e639e0d2SMartin Matuska } 324e639e0d2SMartin Matuska } else { 325e639e0d2SMartin Matuska zio_t *zio = zio_root(dn->dn_objset->os_spa, 326e639e0d2SMartin Matuska NULL, NULL, ZIO_FLAG_CANFAIL); 327e639e0d2SMartin Matuska 328e639e0d2SMartin Matuska /* first level-0 block */ 329e639e0d2SMartin Matuska uint64_t start = off >> dn->dn_datablkshift; 330e639e0d2SMartin Matuska if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) { 331e639e0d2SMartin Matuska err = dmu_tx_check_ioerr(zio, dn, 0, start); 332e639e0d2SMartin Matuska if (err != 0) { 333e639e0d2SMartin Matuska txh->txh_tx->tx_err = err; 334e639e0d2SMartin Matuska } 335e639e0d2SMartin Matuska } 336e639e0d2SMartin Matuska 337e639e0d2SMartin Matuska err = zio_wait(zio); 338e639e0d2SMartin Matuska if (err != 0) { 339e639e0d2SMartin Matuska txh->txh_tx->tx_err = err; 340e639e0d2SMartin Matuska } 341e639e0d2SMartin Matuska } 342e639e0d2SMartin Matuska } 343e639e0d2SMartin Matuska 344e639e0d2SMartin Matuska static void 345eda14cbcSMatt Macy dmu_tx_count_dnode(dmu_tx_hold_t *txh) 346eda14cbcSMatt Macy { 347eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, 348eda14cbcSMatt Macy DNODE_MIN_SIZE, FTAG); 349eda14cbcSMatt Macy } 350eda14cbcSMatt Macy 351eda14cbcSMatt Macy void 352eda14cbcSMatt Macy dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 353eda14cbcSMatt Macy { 354eda14cbcSMatt Macy dmu_tx_hold_t *txh; 355eda14cbcSMatt Macy 356eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 357eda14cbcSMatt Macy ASSERT3U(len, <=, DMU_MAX_ACCESS); 358eda14cbcSMatt Macy ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 359eda14cbcSMatt Macy 360eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 361eda14cbcSMatt Macy object, THT_WRITE, off, len); 362eda14cbcSMatt Macy if (txh != NULL) { 363eda14cbcSMatt Macy dmu_tx_count_write(txh, off, len); 364eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 365eda14cbcSMatt Macy } 366eda14cbcSMatt Macy } 367eda14cbcSMatt Macy 368eda14cbcSMatt Macy void 369eda14cbcSMatt Macy dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 370eda14cbcSMatt Macy { 371eda14cbcSMatt Macy dmu_tx_hold_t *txh; 372eda14cbcSMatt Macy 373eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 374eda14cbcSMatt Macy ASSERT3U(len, <=, DMU_MAX_ACCESS); 375eda14cbcSMatt Macy ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 376eda14cbcSMatt Macy 377eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len); 378eda14cbcSMatt Macy if (txh != NULL) { 379eda14cbcSMatt Macy dmu_tx_count_write(txh, off, len); 380eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 381eda14cbcSMatt Macy } 382eda14cbcSMatt Macy } 383eda14cbcSMatt Macy 384eda14cbcSMatt Macy /* 385e639e0d2SMartin Matuska * Should be used when appending to an object and the exact offset is unknown. 386e639e0d2SMartin Matuska * The write must occur at or beyond the specified offset. Only the L0 block 387e639e0d2SMartin Matuska * at provided offset will be prefetched. 388e639e0d2SMartin Matuska */ 389e639e0d2SMartin Matuska void 390e639e0d2SMartin Matuska dmu_tx_hold_append(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 391e639e0d2SMartin Matuska { 392e639e0d2SMartin Matuska dmu_tx_hold_t *txh; 393e639e0d2SMartin Matuska 394e639e0d2SMartin Matuska ASSERT0(tx->tx_txg); 395e639e0d2SMartin Matuska ASSERT3U(len, <=, DMU_MAX_ACCESS); 396e639e0d2SMartin Matuska 397e639e0d2SMartin Matuska txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 398e639e0d2SMartin Matuska object, THT_APPEND, off, DMU_OBJECT_END); 399e639e0d2SMartin Matuska if (txh != NULL) { 400e639e0d2SMartin Matuska dmu_tx_count_append(txh, off, len); 401e639e0d2SMartin Matuska dmu_tx_count_dnode(txh); 402e639e0d2SMartin Matuska } 403e639e0d2SMartin Matuska } 404e639e0d2SMartin Matuska 405e639e0d2SMartin Matuska void 406e639e0d2SMartin Matuska dmu_tx_hold_append_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 407e639e0d2SMartin Matuska { 408e639e0d2SMartin Matuska dmu_tx_hold_t *txh; 409e639e0d2SMartin Matuska 410e639e0d2SMartin Matuska ASSERT0(tx->tx_txg); 411e639e0d2SMartin Matuska ASSERT3U(len, <=, DMU_MAX_ACCESS); 412e639e0d2SMartin Matuska 413e639e0d2SMartin Matuska txh = dmu_tx_hold_dnode_impl(tx, dn, THT_APPEND, off, DMU_OBJECT_END); 414e639e0d2SMartin Matuska if (txh != NULL) { 415e639e0d2SMartin Matuska dmu_tx_count_append(txh, off, len); 416e639e0d2SMartin Matuska dmu_tx_count_dnode(txh); 417e639e0d2SMartin Matuska } 418e639e0d2SMartin Matuska } 419e639e0d2SMartin Matuska 420e639e0d2SMartin Matuska /* 421eda14cbcSMatt Macy * This function marks the transaction as being a "net free". The end 422eda14cbcSMatt Macy * result is that refquotas will be disabled for this transaction, and 423eda14cbcSMatt Macy * this transaction will be able to use half of the pool space overhead 424eda14cbcSMatt Macy * (see dsl_pool_adjustedsize()). Therefore this function should only 425eda14cbcSMatt Macy * be called for transactions that we expect will not cause a net increase 426eda14cbcSMatt Macy * in the amount of space used (but it's OK if that is occasionally not true). 427eda14cbcSMatt Macy */ 428eda14cbcSMatt Macy void 429eda14cbcSMatt Macy dmu_tx_mark_netfree(dmu_tx_t *tx) 430eda14cbcSMatt Macy { 431eda14cbcSMatt Macy tx->tx_netfree = B_TRUE; 432eda14cbcSMatt Macy } 433eda14cbcSMatt Macy 434eda14cbcSMatt Macy static void 4352a58b312SMartin Matuska dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 436eda14cbcSMatt Macy { 437eda14cbcSMatt Macy dmu_tx_t *tx = txh->txh_tx; 438eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 439eda14cbcSMatt Macy int err; 440eda14cbcSMatt Macy 441eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 442eda14cbcSMatt Macy 443eda14cbcSMatt Macy if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz) 444eda14cbcSMatt Macy return; 445eda14cbcSMatt Macy if (len == DMU_OBJECT_END) 446eda14cbcSMatt Macy len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off; 447eda14cbcSMatt Macy 448eda14cbcSMatt Macy /* 449eda14cbcSMatt Macy * For i/o error checking, we read the first and last level-0 450eda14cbcSMatt Macy * blocks if they are not aligned, and all the level-1 blocks. 451eda14cbcSMatt Macy * 452eda14cbcSMatt Macy * Note: dbuf_free_range() assumes that we have not instantiated 453eda14cbcSMatt Macy * any level-0 dbufs that will be completely freed. Therefore we must 454eda14cbcSMatt Macy * exercise care to not read or count the first and last blocks 455eda14cbcSMatt Macy * if they are blocksize-aligned. 456eda14cbcSMatt Macy */ 457eda14cbcSMatt Macy if (dn->dn_datablkshift == 0) { 458eda14cbcSMatt Macy if (off != 0 || len < dn->dn_datablksz) 459eda14cbcSMatt Macy dmu_tx_count_write(txh, 0, dn->dn_datablksz); 460eda14cbcSMatt Macy } else { 461eda14cbcSMatt Macy /* first block will be modified if it is not aligned */ 462eda14cbcSMatt Macy if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 463eda14cbcSMatt Macy dmu_tx_count_write(txh, off, 1); 464eda14cbcSMatt Macy /* last block will be modified if it is not aligned */ 465eda14cbcSMatt Macy if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 466eda14cbcSMatt Macy dmu_tx_count_write(txh, off + len, 1); 467eda14cbcSMatt Macy } 468eda14cbcSMatt Macy 469eda14cbcSMatt Macy /* 470eda14cbcSMatt Macy * Check level-1 blocks. 471eda14cbcSMatt Macy */ 472eda14cbcSMatt Macy if (dn->dn_nlevels > 1) { 473eda14cbcSMatt Macy int shift = dn->dn_datablkshift + dn->dn_indblkshift - 474eda14cbcSMatt Macy SPA_BLKPTRSHIFT; 475eda14cbcSMatt Macy uint64_t start = off >> shift; 476eda14cbcSMatt Macy uint64_t end = (off + len) >> shift; 477eda14cbcSMatt Macy 478eda14cbcSMatt Macy ASSERT(dn->dn_indblkshift != 0); 479eda14cbcSMatt Macy 480eda14cbcSMatt Macy /* 481eda14cbcSMatt Macy * dnode_reallocate() can result in an object with indirect 482eda14cbcSMatt Macy * blocks having an odd data block size. In this case, 483eda14cbcSMatt Macy * just check the single block. 484eda14cbcSMatt Macy */ 485eda14cbcSMatt Macy if (dn->dn_datablkshift == 0) 486eda14cbcSMatt Macy start = end = 0; 487eda14cbcSMatt Macy 488eda14cbcSMatt Macy zio_t *zio = zio_root(tx->tx_pool->dp_spa, 489eda14cbcSMatt Macy NULL, NULL, ZIO_FLAG_CANFAIL); 490eda14cbcSMatt Macy for (uint64_t i = start; i <= end; i++) { 491eda14cbcSMatt Macy uint64_t ibyte = i << shift; 492eda14cbcSMatt Macy err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 493eda14cbcSMatt Macy i = ibyte >> shift; 494eda14cbcSMatt Macy if (err == ESRCH || i > end) 495eda14cbcSMatt Macy break; 496eda14cbcSMatt Macy if (err != 0) { 497eda14cbcSMatt Macy tx->tx_err = err; 498eda14cbcSMatt Macy (void) zio_wait(zio); 499eda14cbcSMatt Macy return; 500eda14cbcSMatt Macy } 501eda14cbcSMatt Macy 502eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_memory_tohold, 503eda14cbcSMatt Macy 1 << dn->dn_indblkshift, FTAG); 504eda14cbcSMatt Macy 505eda14cbcSMatt Macy err = dmu_tx_check_ioerr(zio, dn, 1, i); 506eda14cbcSMatt Macy if (err != 0) { 507eda14cbcSMatt Macy tx->tx_err = err; 508eda14cbcSMatt Macy (void) zio_wait(zio); 509eda14cbcSMatt Macy return; 510eda14cbcSMatt Macy } 511eda14cbcSMatt Macy } 512eda14cbcSMatt Macy err = zio_wait(zio); 513eda14cbcSMatt Macy if (err != 0) { 514eda14cbcSMatt Macy tx->tx_err = err; 515eda14cbcSMatt Macy return; 516eda14cbcSMatt Macy } 517eda14cbcSMatt Macy } 518eda14cbcSMatt Macy } 519eda14cbcSMatt Macy 520eda14cbcSMatt Macy void 521eda14cbcSMatt Macy dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 522eda14cbcSMatt Macy { 523eda14cbcSMatt Macy dmu_tx_hold_t *txh; 524eda14cbcSMatt Macy 525eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 526eda14cbcSMatt Macy object, THT_FREE, off, len); 5272a58b312SMartin Matuska if (txh != NULL) { 5282a58b312SMartin Matuska dmu_tx_count_dnode(txh); 5292a58b312SMartin Matuska dmu_tx_count_free(txh, off, len); 5302a58b312SMartin Matuska } 531eda14cbcSMatt Macy } 532eda14cbcSMatt Macy 533eda14cbcSMatt Macy void 534eda14cbcSMatt Macy dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len) 535eda14cbcSMatt Macy { 536eda14cbcSMatt Macy dmu_tx_hold_t *txh; 537eda14cbcSMatt Macy 538eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len); 5392a58b312SMartin Matuska if (txh != NULL) { 5402a58b312SMartin Matuska dmu_tx_count_dnode(txh); 5412a58b312SMartin Matuska dmu_tx_count_free(txh, off, len); 5422a58b312SMartin Matuska } 5432a58b312SMartin Matuska } 5442a58b312SMartin Matuska 5452a58b312SMartin Matuska static void 5462a58b312SMartin Matuska dmu_tx_count_clone(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 5472a58b312SMartin Matuska { 5482a58b312SMartin Matuska 5492a58b312SMartin Matuska /* 5502a58b312SMartin Matuska * Reuse dmu_tx_count_free(), it does exactly what we need for clone. 5512a58b312SMartin Matuska */ 5522a58b312SMartin Matuska dmu_tx_count_free(txh, off, len); 5532a58b312SMartin Matuska } 5542a58b312SMartin Matuska 5552a58b312SMartin Matuska void 5562a58b312SMartin Matuska dmu_tx_hold_clone_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len) 5572a58b312SMartin Matuska { 5582a58b312SMartin Matuska dmu_tx_hold_t *txh; 5592a58b312SMartin Matuska 5602a58b312SMartin Matuska ASSERT0(tx->tx_txg); 5612a58b312SMartin Matuska ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 5622a58b312SMartin Matuska 5632a58b312SMartin Matuska txh = dmu_tx_hold_dnode_impl(tx, dn, THT_CLONE, off, len); 5642a58b312SMartin Matuska if (txh != NULL) { 5652a58b312SMartin Matuska dmu_tx_count_dnode(txh); 5662a58b312SMartin Matuska dmu_tx_count_clone(txh, off, len); 5672a58b312SMartin Matuska } 568eda14cbcSMatt Macy } 569eda14cbcSMatt Macy 570eda14cbcSMatt Macy static void 571eda14cbcSMatt Macy dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name) 572eda14cbcSMatt Macy { 573eda14cbcSMatt Macy dmu_tx_t *tx = txh->txh_tx; 574eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 575eda14cbcSMatt Macy int err; 57615f0b8c3SMartin Matuska extern int zap_micro_max_size; 577eda14cbcSMatt Macy 578eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 579eda14cbcSMatt Macy 580eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 581eda14cbcSMatt Macy 582eda14cbcSMatt Macy /* 583eda14cbcSMatt Macy * Modifying a almost-full microzap is around the worst case (128KB) 584eda14cbcSMatt Macy * 585eda14cbcSMatt Macy * If it is a fat zap, the worst case would be 7*16KB=112KB: 586eda14cbcSMatt Macy * - 3 blocks overwritten: target leaf, ptrtbl block, header block 587eda14cbcSMatt Macy * - 4 new blocks written if adding: 588eda14cbcSMatt Macy * - 2 blocks for possibly split leaves, 589eda14cbcSMatt Macy * - 2 grown ptrtbl blocks 590eda14cbcSMatt Macy */ 591eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, 59215f0b8c3SMartin Matuska zap_micro_max_size, FTAG); 593eda14cbcSMatt Macy 594eda14cbcSMatt Macy if (dn == NULL) 595eda14cbcSMatt Macy return; 596eda14cbcSMatt Macy 597eda14cbcSMatt Macy ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 598eda14cbcSMatt Macy 599eda14cbcSMatt Macy if (dn->dn_maxblkid == 0 || name == NULL) { 600eda14cbcSMatt Macy /* 601eda14cbcSMatt Macy * This is a microzap (only one block), or we don't know 602eda14cbcSMatt Macy * the name. Check the first block for i/o errors. 603eda14cbcSMatt Macy */ 604eda14cbcSMatt Macy err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 605eda14cbcSMatt Macy if (err != 0) { 606eda14cbcSMatt Macy tx->tx_err = err; 607eda14cbcSMatt Macy } 608eda14cbcSMatt Macy } else { 609eda14cbcSMatt Macy /* 610eda14cbcSMatt Macy * Access the name so that we'll check for i/o errors to 611eda14cbcSMatt Macy * the leaf blocks, etc. We ignore ENOENT, as this name 612eda14cbcSMatt Macy * may not yet exist. 613eda14cbcSMatt Macy */ 614eda14cbcSMatt Macy err = zap_lookup_by_dnode(dn, name, 8, 0, NULL); 615eda14cbcSMatt Macy if (err == EIO || err == ECKSUM || err == ENXIO) { 616eda14cbcSMatt Macy tx->tx_err = err; 617eda14cbcSMatt Macy } 618eda14cbcSMatt Macy } 619eda14cbcSMatt Macy } 620eda14cbcSMatt Macy 621eda14cbcSMatt Macy void 622eda14cbcSMatt Macy dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 623eda14cbcSMatt Macy { 624eda14cbcSMatt Macy dmu_tx_hold_t *txh; 625eda14cbcSMatt Macy 626eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 627eda14cbcSMatt Macy 628eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 629eda14cbcSMatt Macy object, THT_ZAP, add, (uintptr_t)name); 630eda14cbcSMatt Macy if (txh != NULL) 631eda14cbcSMatt Macy dmu_tx_hold_zap_impl(txh, name); 632eda14cbcSMatt Macy } 633eda14cbcSMatt Macy 634eda14cbcSMatt Macy void 635eda14cbcSMatt Macy dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name) 636eda14cbcSMatt Macy { 637eda14cbcSMatt Macy dmu_tx_hold_t *txh; 638eda14cbcSMatt Macy 639eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 640eda14cbcSMatt Macy ASSERT(dn != NULL); 641eda14cbcSMatt Macy 642eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name); 643eda14cbcSMatt Macy if (txh != NULL) 644eda14cbcSMatt Macy dmu_tx_hold_zap_impl(txh, name); 645eda14cbcSMatt Macy } 646eda14cbcSMatt Macy 647eda14cbcSMatt Macy void 648eda14cbcSMatt Macy dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 649eda14cbcSMatt Macy { 650eda14cbcSMatt Macy dmu_tx_hold_t *txh; 651eda14cbcSMatt Macy 652eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 653eda14cbcSMatt Macy 654eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 655eda14cbcSMatt Macy object, THT_BONUS, 0, 0); 656eda14cbcSMatt Macy if (txh) 657eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 658eda14cbcSMatt Macy } 659eda14cbcSMatt Macy 660eda14cbcSMatt Macy void 661eda14cbcSMatt Macy dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn) 662eda14cbcSMatt Macy { 663eda14cbcSMatt Macy dmu_tx_hold_t *txh; 664eda14cbcSMatt Macy 665eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 666eda14cbcSMatt Macy 667eda14cbcSMatt Macy txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0); 668eda14cbcSMatt Macy if (txh) 669eda14cbcSMatt Macy dmu_tx_count_dnode(txh); 670eda14cbcSMatt Macy } 671eda14cbcSMatt Macy 672eda14cbcSMatt Macy void 673eda14cbcSMatt Macy dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 674eda14cbcSMatt Macy { 675eda14cbcSMatt Macy dmu_tx_hold_t *txh; 676eda14cbcSMatt Macy 677eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 678eda14cbcSMatt Macy 679eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 680eda14cbcSMatt Macy DMU_NEW_OBJECT, THT_SPACE, space, 0); 681eda14cbcSMatt Macy if (txh) { 682eda14cbcSMatt Macy (void) zfs_refcount_add_many( 683eda14cbcSMatt Macy &txh->txh_space_towrite, space, FTAG); 684eda14cbcSMatt Macy } 685eda14cbcSMatt Macy } 686eda14cbcSMatt Macy 687eda14cbcSMatt Macy #ifdef ZFS_DEBUG 688eda14cbcSMatt Macy void 689eda14cbcSMatt Macy dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 690eda14cbcSMatt Macy { 691eda14cbcSMatt Macy boolean_t match_object = B_FALSE; 692eda14cbcSMatt Macy boolean_t match_offset = B_FALSE; 693eda14cbcSMatt Macy 694eda14cbcSMatt Macy DB_DNODE_ENTER(db); 695eda14cbcSMatt Macy dnode_t *dn = DB_DNODE(db); 696eda14cbcSMatt Macy ASSERT(tx->tx_txg != 0); 697eda14cbcSMatt Macy ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 698eda14cbcSMatt Macy ASSERT3U(dn->dn_object, ==, db->db.db_object); 699eda14cbcSMatt Macy 700eda14cbcSMatt Macy if (tx->tx_anyobj) { 701eda14cbcSMatt Macy DB_DNODE_EXIT(db); 702eda14cbcSMatt Macy return; 703eda14cbcSMatt Macy } 704eda14cbcSMatt Macy 705eda14cbcSMatt Macy /* XXX No checking on the meta dnode for now */ 706eda14cbcSMatt Macy if (db->db.db_object == DMU_META_DNODE_OBJECT) { 707eda14cbcSMatt Macy DB_DNODE_EXIT(db); 708eda14cbcSMatt Macy return; 709eda14cbcSMatt Macy } 710eda14cbcSMatt Macy 711eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 712eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 713eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 714eda14cbcSMatt Macy if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 715eda14cbcSMatt Macy match_object = TRUE; 716eda14cbcSMatt Macy if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 717eda14cbcSMatt Macy int datablkshift = dn->dn_datablkshift ? 718eda14cbcSMatt Macy dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 719eda14cbcSMatt Macy int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 720eda14cbcSMatt Macy int shift = datablkshift + epbs * db->db_level; 721eda14cbcSMatt Macy uint64_t beginblk = shift >= 64 ? 0 : 722eda14cbcSMatt Macy (txh->txh_arg1 >> shift); 723eda14cbcSMatt Macy uint64_t endblk = shift >= 64 ? 0 : 724eda14cbcSMatt Macy ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 725eda14cbcSMatt Macy uint64_t blkid = db->db_blkid; 726eda14cbcSMatt Macy 727eda14cbcSMatt Macy /* XXX txh_arg2 better not be zero... */ 728eda14cbcSMatt Macy 729eda14cbcSMatt Macy dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 73033b8c039SMartin Matuska txh->txh_type, (u_longlong_t)beginblk, 73133b8c039SMartin Matuska (u_longlong_t)endblk); 732eda14cbcSMatt Macy 733eda14cbcSMatt Macy switch (txh->txh_type) { 734eda14cbcSMatt Macy case THT_WRITE: 735eda14cbcSMatt Macy if (blkid >= beginblk && blkid <= endblk) 736eda14cbcSMatt Macy match_offset = TRUE; 737eda14cbcSMatt Macy /* 738eda14cbcSMatt Macy * We will let this hold work for the bonus 739eda14cbcSMatt Macy * or spill buffer so that we don't need to 740eda14cbcSMatt Macy * hold it when creating a new object. 741eda14cbcSMatt Macy */ 742eda14cbcSMatt Macy if (blkid == DMU_BONUS_BLKID || 743eda14cbcSMatt Macy blkid == DMU_SPILL_BLKID) 744eda14cbcSMatt Macy match_offset = TRUE; 745eda14cbcSMatt Macy /* 746eda14cbcSMatt Macy * They might have to increase nlevels, 747eda14cbcSMatt Macy * thus dirtying the new TLIBs. Or the 748eda14cbcSMatt Macy * might have to change the block size, 749eda14cbcSMatt Macy * thus dirying the new lvl=0 blk=0. 750eda14cbcSMatt Macy */ 751eda14cbcSMatt Macy if (blkid == 0) 752eda14cbcSMatt Macy match_offset = TRUE; 753eda14cbcSMatt Macy break; 754e639e0d2SMartin Matuska case THT_APPEND: 755e639e0d2SMartin Matuska if (blkid >= beginblk && (blkid <= endblk || 756e639e0d2SMartin Matuska txh->txh_arg2 == DMU_OBJECT_END)) 757e639e0d2SMartin Matuska match_offset = TRUE; 758e639e0d2SMartin Matuska 759e639e0d2SMartin Matuska /* 760e639e0d2SMartin Matuska * THT_WRITE used for bonus and spill blocks. 761e639e0d2SMartin Matuska */ 762e639e0d2SMartin Matuska ASSERT(blkid != DMU_BONUS_BLKID && 763e639e0d2SMartin Matuska blkid != DMU_SPILL_BLKID); 764e639e0d2SMartin Matuska 765e639e0d2SMartin Matuska /* 766e639e0d2SMartin Matuska * They might have to increase nlevels, 767e639e0d2SMartin Matuska * thus dirtying the new TLIBs. Or the 768e639e0d2SMartin Matuska * might have to change the block size, 769e639e0d2SMartin Matuska * thus dirying the new lvl=0 blk=0. 770e639e0d2SMartin Matuska */ 771e639e0d2SMartin Matuska if (blkid == 0) 772e639e0d2SMartin Matuska match_offset = TRUE; 773e639e0d2SMartin Matuska break; 774eda14cbcSMatt Macy case THT_FREE: 775eda14cbcSMatt Macy /* 776eda14cbcSMatt Macy * We will dirty all the level 1 blocks in 777eda14cbcSMatt Macy * the free range and perhaps the first and 778eda14cbcSMatt Macy * last level 0 block. 779eda14cbcSMatt Macy */ 780eda14cbcSMatt Macy if (blkid >= beginblk && (blkid <= endblk || 781eda14cbcSMatt Macy txh->txh_arg2 == DMU_OBJECT_END)) 782eda14cbcSMatt Macy match_offset = TRUE; 783eda14cbcSMatt Macy break; 784eda14cbcSMatt Macy case THT_SPILL: 785eda14cbcSMatt Macy if (blkid == DMU_SPILL_BLKID) 786eda14cbcSMatt Macy match_offset = TRUE; 787eda14cbcSMatt Macy break; 788eda14cbcSMatt Macy case THT_BONUS: 789eda14cbcSMatt Macy if (blkid == DMU_BONUS_BLKID) 790eda14cbcSMatt Macy match_offset = TRUE; 791eda14cbcSMatt Macy break; 792eda14cbcSMatt Macy case THT_ZAP: 793eda14cbcSMatt Macy match_offset = TRUE; 794eda14cbcSMatt Macy break; 795eda14cbcSMatt Macy case THT_NEWOBJECT: 796eda14cbcSMatt Macy match_object = TRUE; 797eda14cbcSMatt Macy break; 7982a58b312SMartin Matuska case THT_CLONE: 7992a58b312SMartin Matuska if (blkid >= beginblk && blkid <= endblk) 8002a58b312SMartin Matuska match_offset = TRUE; 8012a58b312SMartin Matuska break; 802eda14cbcSMatt Macy default: 803eda14cbcSMatt Macy cmn_err(CE_PANIC, "bad txh_type %d", 804eda14cbcSMatt Macy txh->txh_type); 805eda14cbcSMatt Macy } 806eda14cbcSMatt Macy } 807eda14cbcSMatt Macy if (match_object && match_offset) { 808eda14cbcSMatt Macy DB_DNODE_EXIT(db); 809eda14cbcSMatt Macy return; 810eda14cbcSMatt Macy } 811eda14cbcSMatt Macy } 812eda14cbcSMatt Macy DB_DNODE_EXIT(db); 813eda14cbcSMatt Macy panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 814eda14cbcSMatt Macy (u_longlong_t)db->db.db_object, db->db_level, 815eda14cbcSMatt Macy (u_longlong_t)db->db_blkid); 816eda14cbcSMatt Macy } 817eda14cbcSMatt Macy #endif 818eda14cbcSMatt Macy 819eda14cbcSMatt Macy /* 820eda14cbcSMatt Macy * If we can't do 10 iops, something is wrong. Let us go ahead 821eda14cbcSMatt Macy * and hit zfs_dirty_data_max. 822eda14cbcSMatt Macy */ 823e92ffd9bSMartin Matuska static const hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */ 824eda14cbcSMatt Macy 825eda14cbcSMatt Macy /* 826eda14cbcSMatt Macy * We delay transactions when we've determined that the backend storage 827eda14cbcSMatt Macy * isn't able to accommodate the rate of incoming writes. 828eda14cbcSMatt Macy * 829eda14cbcSMatt Macy * If there is already a transaction waiting, we delay relative to when 830eda14cbcSMatt Macy * that transaction finishes waiting. This way the calculated min_time 831eda14cbcSMatt Macy * is independent of the number of threads concurrently executing 832eda14cbcSMatt Macy * transactions. 833eda14cbcSMatt Macy * 834eda14cbcSMatt Macy * If we are the only waiter, wait relative to when the transaction 835eda14cbcSMatt Macy * started, rather than the current time. This credits the transaction for 836eda14cbcSMatt Macy * "time already served", e.g. reading indirect blocks. 837eda14cbcSMatt Macy * 838eda14cbcSMatt Macy * The minimum time for a transaction to take is calculated as: 839eda14cbcSMatt Macy * min_time = scale * (dirty - min) / (max - dirty) 840eda14cbcSMatt Macy * min_time is then capped at zfs_delay_max_ns. 841eda14cbcSMatt Macy * 842eda14cbcSMatt Macy * The delay has two degrees of freedom that can be adjusted via tunables. 843eda14cbcSMatt Macy * The percentage of dirty data at which we start to delay is defined by 844eda14cbcSMatt Macy * zfs_delay_min_dirty_percent. This should typically be at or above 845eda14cbcSMatt Macy * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 846eda14cbcSMatt Macy * delay after writing at full speed has failed to keep up with the incoming 847eda14cbcSMatt Macy * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 848eda14cbcSMatt Macy * speaking, this variable determines the amount of delay at the midpoint of 849eda14cbcSMatt Macy * the curve. 850eda14cbcSMatt Macy * 851eda14cbcSMatt Macy * delay 852eda14cbcSMatt Macy * 10ms +-------------------------------------------------------------*+ 853eda14cbcSMatt Macy * | *| 854eda14cbcSMatt Macy * 9ms + *+ 855eda14cbcSMatt Macy * | *| 856eda14cbcSMatt Macy * 8ms + *+ 857eda14cbcSMatt Macy * | * | 858eda14cbcSMatt Macy * 7ms + * + 859eda14cbcSMatt Macy * | * | 860eda14cbcSMatt Macy * 6ms + * + 861eda14cbcSMatt Macy * | * | 862eda14cbcSMatt Macy * 5ms + * + 863eda14cbcSMatt Macy * | * | 864eda14cbcSMatt Macy * 4ms + * + 865eda14cbcSMatt Macy * | * | 866eda14cbcSMatt Macy * 3ms + * + 867eda14cbcSMatt Macy * | * | 868eda14cbcSMatt Macy * 2ms + (midpoint) * + 869eda14cbcSMatt Macy * | | ** | 870eda14cbcSMatt Macy * 1ms + v *** + 871eda14cbcSMatt Macy * | zfs_delay_scale ----------> ******** | 872eda14cbcSMatt Macy * 0 +-------------------------------------*********----------------+ 873eda14cbcSMatt Macy * 0% <- zfs_dirty_data_max -> 100% 874eda14cbcSMatt Macy * 875eda14cbcSMatt Macy * Note that since the delay is added to the outstanding time remaining on the 876eda14cbcSMatt Macy * most recent transaction, the delay is effectively the inverse of IOPS. 877eda14cbcSMatt Macy * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 878eda14cbcSMatt Macy * was chosen such that small changes in the amount of accumulated dirty data 879eda14cbcSMatt Macy * in the first 3/4 of the curve yield relatively small differences in the 880eda14cbcSMatt Macy * amount of delay. 881eda14cbcSMatt Macy * 882eda14cbcSMatt Macy * The effects can be easier to understand when the amount of delay is 883eda14cbcSMatt Macy * represented on a log scale: 884eda14cbcSMatt Macy * 885eda14cbcSMatt Macy * delay 886eda14cbcSMatt Macy * 100ms +-------------------------------------------------------------++ 887eda14cbcSMatt Macy * + + 888eda14cbcSMatt Macy * | | 889eda14cbcSMatt Macy * + *+ 890eda14cbcSMatt Macy * 10ms + *+ 891eda14cbcSMatt Macy * + ** + 892eda14cbcSMatt Macy * | (midpoint) ** | 893eda14cbcSMatt Macy * + | ** + 894eda14cbcSMatt Macy * 1ms + v **** + 895eda14cbcSMatt Macy * + zfs_delay_scale ----------> ***** + 896eda14cbcSMatt Macy * | **** | 897eda14cbcSMatt Macy * + **** + 898eda14cbcSMatt Macy * 100us + ** + 899eda14cbcSMatt Macy * + * + 900eda14cbcSMatt Macy * | * | 901eda14cbcSMatt Macy * + * + 902eda14cbcSMatt Macy * 10us + * + 903eda14cbcSMatt Macy * + + 904eda14cbcSMatt Macy * | | 905eda14cbcSMatt Macy * + + 906eda14cbcSMatt Macy * +--------------------------------------------------------------+ 907eda14cbcSMatt Macy * 0% <- zfs_dirty_data_max -> 100% 908eda14cbcSMatt Macy * 909eda14cbcSMatt Macy * Note here that only as the amount of dirty data approaches its limit does 910eda14cbcSMatt Macy * the delay start to increase rapidly. The goal of a properly tuned system 911eda14cbcSMatt Macy * should be to keep the amount of dirty data out of that range by first 912eda14cbcSMatt Macy * ensuring that the appropriate limits are set for the I/O scheduler to reach 913eda14cbcSMatt Macy * optimal throughput on the backend storage, and then by changing the value 914eda14cbcSMatt Macy * of zfs_delay_scale to increase the steepness of the curve. 915eda14cbcSMatt Macy */ 916eda14cbcSMatt Macy static void 917eda14cbcSMatt Macy dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 918eda14cbcSMatt Macy { 919eda14cbcSMatt Macy dsl_pool_t *dp = tx->tx_pool; 920e3aa18adSMartin Matuska uint64_t delay_min_bytes, wrlog; 921e3aa18adSMartin Matuska hrtime_t wakeup, tx_time = 0, now; 922e3aa18adSMartin Matuska 923e3aa18adSMartin Matuska /* Calculate minimum transaction time for the dirty data amount. */ 924e3aa18adSMartin Matuska delay_min_bytes = 925eda14cbcSMatt Macy zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 926e3aa18adSMartin Matuska if (dirty > delay_min_bytes) { 927eda14cbcSMatt Macy /* 928eda14cbcSMatt Macy * The caller has already waited until we are under the max. 929eda14cbcSMatt Macy * We make them pass us the amount of dirty data so we don't 930e3aa18adSMartin Matuska * have to handle the case of it being >= the max, which 931e3aa18adSMartin Matuska * could cause a divide-by-zero if it's == the max. 932eda14cbcSMatt Macy */ 933eda14cbcSMatt Macy ASSERT3U(dirty, <, zfs_dirty_data_max); 934eda14cbcSMatt Macy 935e3aa18adSMartin Matuska tx_time = zfs_delay_scale * (dirty - delay_min_bytes) / 936e3aa18adSMartin Matuska (zfs_dirty_data_max - dirty); 937e3aa18adSMartin Matuska } 938e3aa18adSMartin Matuska 939e3aa18adSMartin Matuska /* Calculate minimum transaction time for the TX_WRITE log size. */ 940e3aa18adSMartin Matuska wrlog = aggsum_upper_bound(&dp->dp_wrlog_total); 941e3aa18adSMartin Matuska delay_min_bytes = 942e3aa18adSMartin Matuska zfs_wrlog_data_max * zfs_delay_min_dirty_percent / 100; 943e3aa18adSMartin Matuska if (wrlog >= zfs_wrlog_data_max) { 944e3aa18adSMartin Matuska tx_time = zfs_delay_max_ns; 945e3aa18adSMartin Matuska } else if (wrlog > delay_min_bytes) { 946e3aa18adSMartin Matuska tx_time = MAX(zfs_delay_scale * (wrlog - delay_min_bytes) / 947e3aa18adSMartin Matuska (zfs_wrlog_data_max - wrlog), tx_time); 948e3aa18adSMartin Matuska } 949e3aa18adSMartin Matuska 950e3aa18adSMartin Matuska if (tx_time == 0) 951e3aa18adSMartin Matuska return; 952e3aa18adSMartin Matuska 953e3aa18adSMartin Matuska tx_time = MIN(tx_time, zfs_delay_max_ns); 954eda14cbcSMatt Macy now = gethrtime(); 955e3aa18adSMartin Matuska if (now > tx->tx_start + tx_time) 956eda14cbcSMatt Macy return; 957eda14cbcSMatt Macy 958eda14cbcSMatt Macy DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 959e3aa18adSMartin Matuska uint64_t, tx_time); 960eda14cbcSMatt Macy 961eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 962e3aa18adSMartin Matuska wakeup = MAX(tx->tx_start + tx_time, dp->dp_last_wakeup + tx_time); 963eda14cbcSMatt Macy dp->dp_last_wakeup = wakeup; 964eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 965eda14cbcSMatt Macy 966eda14cbcSMatt Macy zfs_sleep_until(wakeup); 967eda14cbcSMatt Macy } 968eda14cbcSMatt Macy 969eda14cbcSMatt Macy /* 970eda14cbcSMatt Macy * This routine attempts to assign the transaction to a transaction group. 971eda14cbcSMatt Macy * To do so, we must determine if there is sufficient free space on disk. 972eda14cbcSMatt Macy * 973eda14cbcSMatt Macy * If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree() 974eda14cbcSMatt Macy * on it), then it is assumed that there is sufficient free space, 975eda14cbcSMatt Macy * unless there's insufficient slop space in the pool (see the comment 976eda14cbcSMatt Macy * above spa_slop_shift in spa_misc.c). 977eda14cbcSMatt Macy * 978eda14cbcSMatt Macy * If it is not a "netfree" transaction, then if the data already on disk 979eda14cbcSMatt Macy * is over the allowed usage (e.g. quota), this will fail with EDQUOT or 980eda14cbcSMatt Macy * ENOSPC. Otherwise, if the current rough estimate of pending changes, 981eda14cbcSMatt Macy * plus the rough estimate of this transaction's changes, may exceed the 982eda14cbcSMatt Macy * allowed usage, then this will fail with ERESTART, which will cause the 983eda14cbcSMatt Macy * caller to wait for the pending changes to be written to disk (by waiting 984eda14cbcSMatt Macy * for the next TXG to open), and then check the space usage again. 985eda14cbcSMatt Macy * 986eda14cbcSMatt Macy * The rough estimate of pending changes is comprised of the sum of: 987eda14cbcSMatt Macy * 988eda14cbcSMatt Macy * - this transaction's holds' txh_space_towrite 989eda14cbcSMatt Macy * 990eda14cbcSMatt Macy * - dd_tempreserved[], which is the sum of in-flight transactions' 991eda14cbcSMatt Macy * holds' txh_space_towrite (i.e. those transactions that have called 992eda14cbcSMatt Macy * dmu_tx_assign() but not yet called dmu_tx_commit()). 993eda14cbcSMatt Macy * 994eda14cbcSMatt Macy * - dd_space_towrite[], which is the amount of dirtied dbufs. 995eda14cbcSMatt Macy * 996eda14cbcSMatt Macy * Note that all of these values are inflated by spa_get_worst_case_asize(), 997eda14cbcSMatt Macy * which means that we may get ERESTART well before we are actually in danger 998eda14cbcSMatt Macy * of running out of space, but this also mitigates any small inaccuracies 999eda14cbcSMatt Macy * in the rough estimate (e.g. txh_space_towrite doesn't take into account 1000eda14cbcSMatt Macy * indirect blocks, and dd_space_towrite[] doesn't take into account changes 1001eda14cbcSMatt Macy * to the MOS). 1002eda14cbcSMatt Macy * 1003eda14cbcSMatt Macy * Note that due to this algorithm, it is possible to exceed the allowed 1004eda14cbcSMatt Macy * usage by one transaction. Also, as we approach the allowed usage, 1005eda14cbcSMatt Macy * we will allow a very limited amount of changes into each TXG, thus 1006eda14cbcSMatt Macy * decreasing performance. 1007eda14cbcSMatt Macy */ 1008eda14cbcSMatt Macy static int 1009eda14cbcSMatt Macy dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) 1010eda14cbcSMatt Macy { 1011eda14cbcSMatt Macy spa_t *spa = tx->tx_pool->dp_spa; 1012eda14cbcSMatt Macy 1013eda14cbcSMatt Macy ASSERT0(tx->tx_txg); 1014eda14cbcSMatt Macy 1015eda14cbcSMatt Macy if (tx->tx_err) { 1016eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_error); 1017eda14cbcSMatt Macy return (tx->tx_err); 1018eda14cbcSMatt Macy } 1019eda14cbcSMatt Macy 1020eda14cbcSMatt Macy if (spa_suspended(spa)) { 1021eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_suspended); 1022eda14cbcSMatt Macy 1023eda14cbcSMatt Macy /* 1024eda14cbcSMatt Macy * If the user has indicated a blocking failure mode 1025eda14cbcSMatt Macy * then return ERESTART which will block in dmu_tx_wait(). 1026eda14cbcSMatt Macy * Otherwise, return EIO so that an error can get 1027eda14cbcSMatt Macy * propagated back to the VOP calls. 1028eda14cbcSMatt Macy * 1029eda14cbcSMatt Macy * Note that we always honor the txg_how flag regardless 1030eda14cbcSMatt Macy * of the failuremode setting. 1031eda14cbcSMatt Macy */ 1032eda14cbcSMatt Macy if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1033eda14cbcSMatt Macy !(txg_how & TXG_WAIT)) 1034eda14cbcSMatt Macy return (SET_ERROR(EIO)); 1035eda14cbcSMatt Macy 1036eda14cbcSMatt Macy return (SET_ERROR(ERESTART)); 1037eda14cbcSMatt Macy } 1038eda14cbcSMatt Macy 1039eda14cbcSMatt Macy if (!tx->tx_dirty_delayed && 1040e3aa18adSMartin Matuska dsl_pool_need_wrlog_delay(tx->tx_pool)) { 1041e3aa18adSMartin Matuska tx->tx_wait_dirty = B_TRUE; 1042e3aa18adSMartin Matuska DMU_TX_STAT_BUMP(dmu_tx_wrlog_delay); 10433f9d360cSMartin Matuska return (SET_ERROR(ERESTART)); 10443f9d360cSMartin Matuska } 10453f9d360cSMartin Matuska 10463f9d360cSMartin Matuska if (!tx->tx_dirty_delayed && 1047eda14cbcSMatt Macy dsl_pool_need_dirty_delay(tx->tx_pool)) { 1048eda14cbcSMatt Macy tx->tx_wait_dirty = B_TRUE; 1049eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_dirty_delay); 1050eda14cbcSMatt Macy return (SET_ERROR(ERESTART)); 1051eda14cbcSMatt Macy } 1052eda14cbcSMatt Macy 1053eda14cbcSMatt Macy tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1054eda14cbcSMatt Macy tx->tx_needassign_txh = NULL; 1055eda14cbcSMatt Macy 1056eda14cbcSMatt Macy /* 1057eda14cbcSMatt Macy * NB: No error returns are allowed after txg_hold_open, but 1058eda14cbcSMatt Macy * before processing the dnode holds, due to the 1059eda14cbcSMatt Macy * dmu_tx_unassign() logic. 1060eda14cbcSMatt Macy */ 1061eda14cbcSMatt Macy 1062eda14cbcSMatt Macy uint64_t towrite = 0; 1063eda14cbcSMatt Macy uint64_t tohold = 0; 1064eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 1065eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 1066eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 1067eda14cbcSMatt Macy if (dn != NULL) { 1068eda14cbcSMatt Macy /* 1069eda14cbcSMatt Macy * This thread can't hold the dn_struct_rwlock 1070eda14cbcSMatt Macy * while assigning the tx, because this can lead to 1071eda14cbcSMatt Macy * deadlock. Specifically, if this dnode is already 1072eda14cbcSMatt Macy * assigned to an earlier txg, this thread may need 1073eda14cbcSMatt Macy * to wait for that txg to sync (the ERESTART case 1074eda14cbcSMatt Macy * below). The other thread that has assigned this 1075eda14cbcSMatt Macy * dnode to an earlier txg prevents this txg from 1076eda14cbcSMatt Macy * syncing until its tx can complete (calling 1077eda14cbcSMatt Macy * dmu_tx_commit()), but it may need to acquire the 1078eda14cbcSMatt Macy * dn_struct_rwlock to do so (e.g. via 1079eda14cbcSMatt Macy * dmu_buf_hold*()). 1080eda14cbcSMatt Macy * 1081eda14cbcSMatt Macy * Note that this thread can't hold the lock for 1082eda14cbcSMatt Macy * read either, but the rwlock doesn't record 1083eda14cbcSMatt Macy * enough information to make that assertion. 1084eda14cbcSMatt Macy */ 1085eda14cbcSMatt Macy ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1086eda14cbcSMatt Macy 1087eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 1088eda14cbcSMatt Macy if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1089eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1090eda14cbcSMatt Macy tx->tx_needassign_txh = txh; 1091eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_group); 1092eda14cbcSMatt Macy return (SET_ERROR(ERESTART)); 1093eda14cbcSMatt Macy } 1094eda14cbcSMatt Macy if (dn->dn_assigned_txg == 0) 1095eda14cbcSMatt Macy dn->dn_assigned_txg = tx->tx_txg; 1096eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1097eda14cbcSMatt Macy (void) zfs_refcount_add(&dn->dn_tx_holds, tx); 1098eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1099eda14cbcSMatt Macy } 1100eda14cbcSMatt Macy towrite += zfs_refcount_count(&txh->txh_space_towrite); 1101eda14cbcSMatt Macy tohold += zfs_refcount_count(&txh->txh_memory_tohold); 1102eda14cbcSMatt Macy } 1103eda14cbcSMatt Macy 1104eda14cbcSMatt Macy /* needed allocation: worst-case estimate of write space */ 1105eda14cbcSMatt Macy uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite); 1106eda14cbcSMatt Macy /* calculate memory footprint estimate */ 1107eda14cbcSMatt Macy uint64_t memory = towrite + tohold; 1108eda14cbcSMatt Macy 1109eda14cbcSMatt Macy if (tx->tx_dir != NULL && asize != 0) { 1110eda14cbcSMatt Macy int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1111eda14cbcSMatt Macy asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx); 1112eda14cbcSMatt Macy if (err != 0) 1113eda14cbcSMatt Macy return (err); 1114eda14cbcSMatt Macy } 1115eda14cbcSMatt Macy 1116eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_assigned); 1117eda14cbcSMatt Macy 1118eda14cbcSMatt Macy return (0); 1119eda14cbcSMatt Macy } 1120eda14cbcSMatt Macy 1121eda14cbcSMatt Macy static void 1122eda14cbcSMatt Macy dmu_tx_unassign(dmu_tx_t *tx) 1123eda14cbcSMatt Macy { 1124eda14cbcSMatt Macy if (tx->tx_txg == 0) 1125eda14cbcSMatt Macy return; 1126eda14cbcSMatt Macy 1127eda14cbcSMatt Macy txg_rele_to_quiesce(&tx->tx_txgh); 1128eda14cbcSMatt Macy 1129eda14cbcSMatt Macy /* 1130eda14cbcSMatt Macy * Walk the transaction's hold list, removing the hold on the 1131eda14cbcSMatt Macy * associated dnode, and notifying waiters if the refcount drops to 0. 1132eda14cbcSMatt Macy */ 1133eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); 1134eda14cbcSMatt Macy txh && txh != tx->tx_needassign_txh; 1135eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 1136eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 1137eda14cbcSMatt Macy 1138eda14cbcSMatt Macy if (dn == NULL) 1139eda14cbcSMatt Macy continue; 1140eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 1141eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1142eda14cbcSMatt Macy 1143eda14cbcSMatt Macy if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1144eda14cbcSMatt Macy dn->dn_assigned_txg = 0; 1145eda14cbcSMatt Macy cv_broadcast(&dn->dn_notxholds); 1146eda14cbcSMatt Macy } 1147eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1148eda14cbcSMatt Macy } 1149eda14cbcSMatt Macy 1150eda14cbcSMatt Macy txg_rele_to_sync(&tx->tx_txgh); 1151eda14cbcSMatt Macy 1152eda14cbcSMatt Macy tx->tx_lasttried_txg = tx->tx_txg; 1153eda14cbcSMatt Macy tx->tx_txg = 0; 1154eda14cbcSMatt Macy } 1155eda14cbcSMatt Macy 1156eda14cbcSMatt Macy /* 1157eda14cbcSMatt Macy * Assign tx to a transaction group; txg_how is a bitmask: 1158eda14cbcSMatt Macy * 1159eda14cbcSMatt Macy * If TXG_WAIT is set and the currently open txg is full, this function 1160eda14cbcSMatt Macy * will wait until there's a new txg. This should be used when no locks 1161eda14cbcSMatt Macy * are being held. With this bit set, this function will only fail if 1162eda14cbcSMatt Macy * we're truly out of space (or over quota). 1163eda14cbcSMatt Macy * 1164eda14cbcSMatt Macy * If TXG_WAIT is *not* set and we can't assign into the currently open 1165eda14cbcSMatt Macy * txg without blocking, this function will return immediately with 1166eda14cbcSMatt Macy * ERESTART. This should be used whenever locks are being held. On an 1167eda14cbcSMatt Macy * ERESTART error, the caller should drop all locks, call dmu_tx_wait(), 1168eda14cbcSMatt Macy * and try again. 1169eda14cbcSMatt Macy * 1170eda14cbcSMatt Macy * If TXG_NOTHROTTLE is set, this indicates that this tx should not be 1171eda14cbcSMatt Macy * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for 1172eda14cbcSMatt Macy * details on the throttle). This is used by the VFS operations, after 1173eda14cbcSMatt Macy * they have already called dmu_tx_wait() (though most likely on a 1174eda14cbcSMatt Macy * different tx). 1175184c1b94SMartin Matuska * 1176184c1b94SMartin Matuska * It is guaranteed that subsequent successful calls to dmu_tx_assign() 1177184c1b94SMartin Matuska * will assign the tx to monotonically increasing txgs. Of course this is 1178184c1b94SMartin Matuska * not strong monotonicity, because the same txg can be returned multiple 1179184c1b94SMartin Matuska * times in a row. This guarantee holds both for subsequent calls from 1180184c1b94SMartin Matuska * one thread and for multiple threads. For example, it is impossible to 1181184c1b94SMartin Matuska * observe the following sequence of events: 1182184c1b94SMartin Matuska * 1183184c1b94SMartin Matuska * Thread 1 Thread 2 1184184c1b94SMartin Matuska * 1185184c1b94SMartin Matuska * dmu_tx_assign(T1, ...) 1186184c1b94SMartin Matuska * 1 <- dmu_tx_get_txg(T1) 1187184c1b94SMartin Matuska * dmu_tx_assign(T2, ...) 1188184c1b94SMartin Matuska * 2 <- dmu_tx_get_txg(T2) 1189184c1b94SMartin Matuska * dmu_tx_assign(T3, ...) 1190184c1b94SMartin Matuska * 1 <- dmu_tx_get_txg(T3) 1191eda14cbcSMatt Macy */ 1192eda14cbcSMatt Macy int 1193eda14cbcSMatt Macy dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) 1194eda14cbcSMatt Macy { 1195eda14cbcSMatt Macy int err; 1196eda14cbcSMatt Macy 1197eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1198eda14cbcSMatt Macy ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE)); 1199eda14cbcSMatt Macy ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1200eda14cbcSMatt Macy 1201eda14cbcSMatt Macy /* If we might wait, we must not hold the config lock. */ 1202eda14cbcSMatt Macy IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool)); 1203eda14cbcSMatt Macy 1204eda14cbcSMatt Macy if ((txg_how & TXG_NOTHROTTLE)) 1205eda14cbcSMatt Macy tx->tx_dirty_delayed = B_TRUE; 1206eda14cbcSMatt Macy 1207eda14cbcSMatt Macy while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1208eda14cbcSMatt Macy dmu_tx_unassign(tx); 1209eda14cbcSMatt Macy 1210eda14cbcSMatt Macy if (err != ERESTART || !(txg_how & TXG_WAIT)) 1211eda14cbcSMatt Macy return (err); 1212eda14cbcSMatt Macy 1213eda14cbcSMatt Macy dmu_tx_wait(tx); 1214eda14cbcSMatt Macy } 1215eda14cbcSMatt Macy 1216eda14cbcSMatt Macy txg_rele_to_quiesce(&tx->tx_txgh); 1217eda14cbcSMatt Macy 1218eda14cbcSMatt Macy return (0); 1219eda14cbcSMatt Macy } 1220eda14cbcSMatt Macy 1221eda14cbcSMatt Macy void 1222eda14cbcSMatt Macy dmu_tx_wait(dmu_tx_t *tx) 1223eda14cbcSMatt Macy { 1224eda14cbcSMatt Macy spa_t *spa = tx->tx_pool->dp_spa; 1225eda14cbcSMatt Macy dsl_pool_t *dp = tx->tx_pool; 1226eda14cbcSMatt Macy hrtime_t before; 1227eda14cbcSMatt Macy 1228eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1229eda14cbcSMatt Macy ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1230eda14cbcSMatt Macy 1231eda14cbcSMatt Macy before = gethrtime(); 1232eda14cbcSMatt Macy 1233eda14cbcSMatt Macy if (tx->tx_wait_dirty) { 1234eda14cbcSMatt Macy uint64_t dirty; 1235eda14cbcSMatt Macy 1236eda14cbcSMatt Macy /* 1237eda14cbcSMatt Macy * dmu_tx_try_assign() has determined that we need to wait 1238eda14cbcSMatt Macy * because we've consumed much or all of the dirty buffer 1239eda14cbcSMatt Macy * space. 1240eda14cbcSMatt Macy */ 1241eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 1242eda14cbcSMatt Macy if (dp->dp_dirty_total >= zfs_dirty_data_max) 1243eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max); 1244eda14cbcSMatt Macy while (dp->dp_dirty_total >= zfs_dirty_data_max) 1245eda14cbcSMatt Macy cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1246eda14cbcSMatt Macy dirty = dp->dp_dirty_total; 1247eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 1248eda14cbcSMatt Macy 1249eda14cbcSMatt Macy dmu_tx_delay(tx, dirty); 1250eda14cbcSMatt Macy 1251eda14cbcSMatt Macy tx->tx_wait_dirty = B_FALSE; 1252eda14cbcSMatt Macy 1253eda14cbcSMatt Macy /* 1254eda14cbcSMatt Macy * Note: setting tx_dirty_delayed only has effect if the 1255eda14cbcSMatt Macy * caller used TX_WAIT. Otherwise they are going to 1256eda14cbcSMatt Macy * destroy this tx and try again. The common case, 1257eda14cbcSMatt Macy * zfs_write(), uses TX_WAIT. 1258eda14cbcSMatt Macy */ 1259eda14cbcSMatt Macy tx->tx_dirty_delayed = B_TRUE; 1260eda14cbcSMatt Macy } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1261eda14cbcSMatt Macy /* 1262eda14cbcSMatt Macy * If the pool is suspended we need to wait until it 1263eda14cbcSMatt Macy * is resumed. Note that it's possible that the pool 1264eda14cbcSMatt Macy * has become active after this thread has tried to 1265eda14cbcSMatt Macy * obtain a tx. If that's the case then tx_lasttried_txg 1266eda14cbcSMatt Macy * would not have been set. 1267eda14cbcSMatt Macy */ 1268eda14cbcSMatt Macy txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1269eda14cbcSMatt Macy } else if (tx->tx_needassign_txh) { 1270eda14cbcSMatt Macy dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1271eda14cbcSMatt Macy 1272eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 1273eda14cbcSMatt Macy while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1274eda14cbcSMatt Macy cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1275eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1276eda14cbcSMatt Macy tx->tx_needassign_txh = NULL; 1277eda14cbcSMatt Macy } else { 1278eda14cbcSMatt Macy /* 1279eda14cbcSMatt Macy * If we have a lot of dirty data just wait until we sync 1280eda14cbcSMatt Macy * out a TXG at which point we'll hopefully have synced 1281eda14cbcSMatt Macy * a portion of the changes. 1282eda14cbcSMatt Macy */ 1283eda14cbcSMatt Macy txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1284eda14cbcSMatt Macy } 1285eda14cbcSMatt Macy 1286eda14cbcSMatt Macy spa_tx_assign_add_nsecs(spa, gethrtime() - before); 1287eda14cbcSMatt Macy } 1288eda14cbcSMatt Macy 1289eda14cbcSMatt Macy static void 1290eda14cbcSMatt Macy dmu_tx_destroy(dmu_tx_t *tx) 1291eda14cbcSMatt Macy { 1292eda14cbcSMatt Macy dmu_tx_hold_t *txh; 1293eda14cbcSMatt Macy 1294eda14cbcSMatt Macy while ((txh = list_head(&tx->tx_holds)) != NULL) { 1295eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 1296eda14cbcSMatt Macy 1297eda14cbcSMatt Macy list_remove(&tx->tx_holds, txh); 1298eda14cbcSMatt Macy zfs_refcount_destroy_many(&txh->txh_space_towrite, 1299eda14cbcSMatt Macy zfs_refcount_count(&txh->txh_space_towrite)); 1300eda14cbcSMatt Macy zfs_refcount_destroy_many(&txh->txh_memory_tohold, 1301eda14cbcSMatt Macy zfs_refcount_count(&txh->txh_memory_tohold)); 1302eda14cbcSMatt Macy kmem_free(txh, sizeof (dmu_tx_hold_t)); 1303eda14cbcSMatt Macy if (dn != NULL) 1304eda14cbcSMatt Macy dnode_rele(dn, tx); 1305eda14cbcSMatt Macy } 1306eda14cbcSMatt Macy 1307eda14cbcSMatt Macy list_destroy(&tx->tx_callbacks); 1308eda14cbcSMatt Macy list_destroy(&tx->tx_holds); 1309eda14cbcSMatt Macy kmem_free(tx, sizeof (dmu_tx_t)); 1310eda14cbcSMatt Macy } 1311eda14cbcSMatt Macy 1312eda14cbcSMatt Macy void 1313eda14cbcSMatt Macy dmu_tx_commit(dmu_tx_t *tx) 1314eda14cbcSMatt Macy { 1315eda14cbcSMatt Macy ASSERT(tx->tx_txg != 0); 1316eda14cbcSMatt Macy 1317eda14cbcSMatt Macy /* 1318eda14cbcSMatt Macy * Go through the transaction's hold list and remove holds on 1319eda14cbcSMatt Macy * associated dnodes, notifying waiters if no holds remain. 1320eda14cbcSMatt Macy */ 1321eda14cbcSMatt Macy for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL; 1322eda14cbcSMatt Macy txh = list_next(&tx->tx_holds, txh)) { 1323eda14cbcSMatt Macy dnode_t *dn = txh->txh_dnode; 1324eda14cbcSMatt Macy 1325eda14cbcSMatt Macy if (dn == NULL) 1326eda14cbcSMatt Macy continue; 1327eda14cbcSMatt Macy 1328eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 1329eda14cbcSMatt Macy ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1330eda14cbcSMatt Macy 1331eda14cbcSMatt Macy if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1332eda14cbcSMatt Macy dn->dn_assigned_txg = 0; 1333eda14cbcSMatt Macy cv_broadcast(&dn->dn_notxholds); 1334eda14cbcSMatt Macy } 1335eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 1336eda14cbcSMatt Macy } 1337eda14cbcSMatt Macy 1338eda14cbcSMatt Macy if (tx->tx_tempreserve_cookie) 1339eda14cbcSMatt Macy dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1340eda14cbcSMatt Macy 1341eda14cbcSMatt Macy if (!list_is_empty(&tx->tx_callbacks)) 1342eda14cbcSMatt Macy txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1343eda14cbcSMatt Macy 1344eda14cbcSMatt Macy if (tx->tx_anyobj == FALSE) 1345eda14cbcSMatt Macy txg_rele_to_sync(&tx->tx_txgh); 1346eda14cbcSMatt Macy 1347eda14cbcSMatt Macy dmu_tx_destroy(tx); 1348eda14cbcSMatt Macy } 1349eda14cbcSMatt Macy 1350eda14cbcSMatt Macy void 1351eda14cbcSMatt Macy dmu_tx_abort(dmu_tx_t *tx) 1352eda14cbcSMatt Macy { 1353eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1354eda14cbcSMatt Macy 1355eda14cbcSMatt Macy /* 1356eda14cbcSMatt Macy * Call any registered callbacks with an error code. 1357eda14cbcSMatt Macy */ 1358eda14cbcSMatt Macy if (!list_is_empty(&tx->tx_callbacks)) 1359eda14cbcSMatt Macy dmu_tx_do_callbacks(&tx->tx_callbacks, SET_ERROR(ECANCELED)); 1360eda14cbcSMatt Macy 1361eda14cbcSMatt Macy dmu_tx_destroy(tx); 1362eda14cbcSMatt Macy } 1363eda14cbcSMatt Macy 1364eda14cbcSMatt Macy uint64_t 1365eda14cbcSMatt Macy dmu_tx_get_txg(dmu_tx_t *tx) 1366eda14cbcSMatt Macy { 1367eda14cbcSMatt Macy ASSERT(tx->tx_txg != 0); 1368eda14cbcSMatt Macy return (tx->tx_txg); 1369eda14cbcSMatt Macy } 1370eda14cbcSMatt Macy 1371eda14cbcSMatt Macy dsl_pool_t * 1372eda14cbcSMatt Macy dmu_tx_pool(dmu_tx_t *tx) 1373eda14cbcSMatt Macy { 1374eda14cbcSMatt Macy ASSERT(tx->tx_pool != NULL); 1375eda14cbcSMatt Macy return (tx->tx_pool); 1376eda14cbcSMatt Macy } 1377eda14cbcSMatt Macy 1378eda14cbcSMatt Macy void 1379eda14cbcSMatt Macy dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1380eda14cbcSMatt Macy { 1381eda14cbcSMatt Macy dmu_tx_callback_t *dcb; 1382eda14cbcSMatt Macy 1383eda14cbcSMatt Macy dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1384eda14cbcSMatt Macy 1385eda14cbcSMatt Macy dcb->dcb_func = func; 1386eda14cbcSMatt Macy dcb->dcb_data = data; 1387eda14cbcSMatt Macy 1388eda14cbcSMatt Macy list_insert_tail(&tx->tx_callbacks, dcb); 1389eda14cbcSMatt Macy } 1390eda14cbcSMatt Macy 1391eda14cbcSMatt Macy /* 1392eda14cbcSMatt Macy * Call all the commit callbacks on a list, with a given error code. 1393eda14cbcSMatt Macy */ 1394eda14cbcSMatt Macy void 1395eda14cbcSMatt Macy dmu_tx_do_callbacks(list_t *cb_list, int error) 1396eda14cbcSMatt Macy { 1397eda14cbcSMatt Macy dmu_tx_callback_t *dcb; 1398eda14cbcSMatt Macy 1399*4e8d558cSMartin Matuska while ((dcb = list_remove_tail(cb_list)) != NULL) { 1400eda14cbcSMatt Macy dcb->dcb_func(dcb->dcb_data, error); 1401eda14cbcSMatt Macy kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1402eda14cbcSMatt Macy } 1403eda14cbcSMatt Macy } 1404eda14cbcSMatt Macy 1405eda14cbcSMatt Macy /* 1406eda14cbcSMatt Macy * Interface to hold a bunch of attributes. 1407eda14cbcSMatt Macy * used for creating new files. 1408eda14cbcSMatt Macy * attrsize is the total size of all attributes 1409eda14cbcSMatt Macy * to be added during object creation 1410eda14cbcSMatt Macy * 1411eda14cbcSMatt Macy * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1412eda14cbcSMatt Macy */ 1413eda14cbcSMatt Macy 1414eda14cbcSMatt Macy /* 1415eda14cbcSMatt Macy * hold necessary attribute name for attribute registration. 1416eda14cbcSMatt Macy * should be a very rare case where this is needed. If it does 1417eda14cbcSMatt Macy * happen it would only happen on the first write to the file system. 1418eda14cbcSMatt Macy */ 1419eda14cbcSMatt Macy static void 1420eda14cbcSMatt Macy dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1421eda14cbcSMatt Macy { 1422eda14cbcSMatt Macy if (!sa->sa_need_attr_registration) 1423eda14cbcSMatt Macy return; 1424eda14cbcSMatt Macy 1425eda14cbcSMatt Macy for (int i = 0; i != sa->sa_num_attrs; i++) { 1426eda14cbcSMatt Macy if (!sa->sa_attr_table[i].sa_registered) { 1427eda14cbcSMatt Macy if (sa->sa_reg_attr_obj) 1428eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1429eda14cbcSMatt Macy B_TRUE, sa->sa_attr_table[i].sa_name); 1430eda14cbcSMatt Macy else 1431eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1432eda14cbcSMatt Macy B_TRUE, sa->sa_attr_table[i].sa_name); 1433eda14cbcSMatt Macy } 1434eda14cbcSMatt Macy } 1435eda14cbcSMatt Macy } 1436eda14cbcSMatt Macy 1437eda14cbcSMatt Macy void 1438eda14cbcSMatt Macy dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1439eda14cbcSMatt Macy { 1440eda14cbcSMatt Macy dmu_tx_hold_t *txh; 1441eda14cbcSMatt Macy 1442eda14cbcSMatt Macy txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1443eda14cbcSMatt Macy THT_SPILL, 0, 0); 1444eda14cbcSMatt Macy if (txh != NULL) 1445eda14cbcSMatt Macy (void) zfs_refcount_add_many(&txh->txh_space_towrite, 1446eda14cbcSMatt Macy SPA_OLD_MAXBLOCKSIZE, FTAG); 1447eda14cbcSMatt Macy } 1448eda14cbcSMatt Macy 1449eda14cbcSMatt Macy void 1450eda14cbcSMatt Macy dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1451eda14cbcSMatt Macy { 1452eda14cbcSMatt Macy sa_os_t *sa = tx->tx_objset->os_sa; 1453eda14cbcSMatt Macy 1454eda14cbcSMatt Macy dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1455eda14cbcSMatt Macy 1456eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_master_obj == 0) 1457eda14cbcSMatt Macy return; 1458eda14cbcSMatt Macy 1459eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_layout_attr_obj) { 1460eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1461eda14cbcSMatt Macy } else { 1462eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1463eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1464eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1465eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1466eda14cbcSMatt Macy } 1467eda14cbcSMatt Macy 1468eda14cbcSMatt Macy dmu_tx_sa_registration_hold(sa, tx); 1469eda14cbcSMatt Macy 1470eda14cbcSMatt Macy if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill) 1471eda14cbcSMatt Macy return; 1472eda14cbcSMatt Macy 1473eda14cbcSMatt Macy (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1474eda14cbcSMatt Macy THT_SPILL, 0, 0); 1475eda14cbcSMatt Macy } 1476eda14cbcSMatt Macy 1477eda14cbcSMatt Macy /* 1478eda14cbcSMatt Macy * Hold SA attribute 1479eda14cbcSMatt Macy * 1480eda14cbcSMatt Macy * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1481eda14cbcSMatt Macy * 1482eda14cbcSMatt Macy * variable_size is the total size of all variable sized attributes 1483eda14cbcSMatt Macy * passed to this function. It is not the total size of all 1484eda14cbcSMatt Macy * variable size attributes that *may* exist on this object. 1485eda14cbcSMatt Macy */ 1486eda14cbcSMatt Macy void 1487eda14cbcSMatt Macy dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1488eda14cbcSMatt Macy { 1489eda14cbcSMatt Macy uint64_t object; 1490eda14cbcSMatt Macy sa_os_t *sa = tx->tx_objset->os_sa; 1491eda14cbcSMatt Macy 1492eda14cbcSMatt Macy ASSERT(hdl != NULL); 1493eda14cbcSMatt Macy 1494eda14cbcSMatt Macy object = sa_handle_object(hdl); 1495eda14cbcSMatt Macy 1496eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1497eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1498eda14cbcSMatt Macy dmu_tx_hold_bonus_by_dnode(tx, DB_DNODE(db)); 1499eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1500eda14cbcSMatt Macy 1501eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_master_obj == 0) 1502eda14cbcSMatt Macy return; 1503eda14cbcSMatt Macy 1504eda14cbcSMatt Macy if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1505eda14cbcSMatt Macy tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1506eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1507eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1508eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1509eda14cbcSMatt Macy dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1510eda14cbcSMatt Macy } 1511eda14cbcSMatt Macy 1512eda14cbcSMatt Macy dmu_tx_sa_registration_hold(sa, tx); 1513eda14cbcSMatt Macy 1514eda14cbcSMatt Macy if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1515eda14cbcSMatt Macy dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1516eda14cbcSMatt Macy 1517eda14cbcSMatt Macy if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1518eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1519eda14cbcSMatt Macy dmu_tx_hold_spill(tx, object); 1520eda14cbcSMatt Macy } else { 1521eda14cbcSMatt Macy dnode_t *dn; 1522eda14cbcSMatt Macy 1523eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1524eda14cbcSMatt Macy dn = DB_DNODE(db); 1525eda14cbcSMatt Macy if (dn->dn_have_spill) { 1526eda14cbcSMatt Macy ASSERT(tx->tx_txg == 0); 1527eda14cbcSMatt Macy dmu_tx_hold_spill(tx, object); 1528eda14cbcSMatt Macy } 1529eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1530eda14cbcSMatt Macy } 1531eda14cbcSMatt Macy } 1532eda14cbcSMatt Macy 1533eda14cbcSMatt Macy void 1534eda14cbcSMatt Macy dmu_tx_init(void) 1535eda14cbcSMatt Macy { 1536eda14cbcSMatt Macy dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc", 1537eda14cbcSMatt Macy KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t), 1538eda14cbcSMatt Macy KSTAT_FLAG_VIRTUAL); 1539eda14cbcSMatt Macy 1540eda14cbcSMatt Macy if (dmu_tx_ksp != NULL) { 1541eda14cbcSMatt Macy dmu_tx_ksp->ks_data = &dmu_tx_stats; 1542eda14cbcSMatt Macy kstat_install(dmu_tx_ksp); 1543eda14cbcSMatt Macy } 1544eda14cbcSMatt Macy } 1545eda14cbcSMatt Macy 1546eda14cbcSMatt Macy void 1547eda14cbcSMatt Macy dmu_tx_fini(void) 1548eda14cbcSMatt Macy { 1549eda14cbcSMatt Macy if (dmu_tx_ksp != NULL) { 1550eda14cbcSMatt Macy kstat_delete(dmu_tx_ksp); 1551eda14cbcSMatt Macy dmu_tx_ksp = NULL; 1552eda14cbcSMatt Macy } 1553eda14cbcSMatt Macy } 1554eda14cbcSMatt Macy 1555eda14cbcSMatt Macy #if defined(_KERNEL) 1556eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_create); 1557eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_write); 1558eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode); 1559e639e0d2SMartin Matuska EXPORT_SYMBOL(dmu_tx_hold_append); 1560e639e0d2SMartin Matuska EXPORT_SYMBOL(dmu_tx_hold_append_by_dnode); 1561eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_free); 1562eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode); 1563eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_zap); 1564eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode); 1565eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_bonus); 1566eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode); 1567eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_abort); 1568eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_assign); 1569eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_wait); 1570eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_commit); 1571eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_mark_netfree); 1572eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_get_txg); 1573eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_callback_register); 1574eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_do_callbacks); 1575eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_spill); 1576eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_sa_create); 1577eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_tx_hold_sa); 1578eda14cbcSMatt Macy #endif 1579