1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5f65e61c0Sahrens * Common Development and Distribution License (the "License"). 6f65e61c0Sahrens * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2201025c89SJohn Harres * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 239dccfd2aSAlbert Lee * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 2446e1baa6SMatthew Ahrens * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25*c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 269dccfd2aSAlbert Lee */ 27fa9e4066Sahrens 28fa9e4066Sahrens #include <sys/dmu.h> 29fa9e4066Sahrens #include <sys/dmu_impl.h> 30fa9e4066Sahrens #include <sys/dbuf.h> 31fa9e4066Sahrens #include <sys/dmu_tx.h> 32fa9e4066Sahrens #include <sys/dmu_objset.h> 33fa9e4066Sahrens #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 34fa9e4066Sahrens #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 35fa9e4066Sahrens #include <sys/dsl_pool.h> 368a2f1b91Sahrens #include <sys/zap_impl.h> /* for fzap_default_block_shift */ 37fa9e4066Sahrens #include <sys/spa.h> 380a586ceaSMark Shellenbaum #include <sys/sa.h> 390a586ceaSMark Shellenbaum #include <sys/sa_impl.h> 40fa9e4066Sahrens #include <sys/zfs_context.h> 410a586ceaSMark Shellenbaum #include <sys/varargs.h> 42fa9e4066Sahrens 43ea8dc4b6Seschrock typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 44ea8dc4b6Seschrock uint64_t arg1, uint64_t arg2); 45ea8dc4b6Seschrock 46fa9e4066Sahrens 47fa9e4066Sahrens dmu_tx_t * 481d452cf5Sahrens dmu_tx_create_dd(dsl_dir_t *dd) 49fa9e4066Sahrens { 50fa9e4066Sahrens dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 51fa9e4066Sahrens tx->tx_dir = dd; 524445fffbSMatthew Ahrens if (dd != NULL) 53fa9e4066Sahrens tx->tx_pool = dd->dd_pool; 54fa9e4066Sahrens list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 558a2f1b91Sahrens offsetof(dmu_tx_hold_t, txh_node)); 56d20e665cSRicardo M. Correia list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 57d20e665cSRicardo M. Correia offsetof(dmu_tx_callback_t, dcb_node)); 5869962b56SMatthew Ahrens tx->tx_start = gethrtime(); 598a2f1b91Sahrens #ifdef ZFS_DEBUG 60fa9e4066Sahrens refcount_create(&tx->tx_space_written); 61fa9e4066Sahrens refcount_create(&tx->tx_space_freed); 628a2f1b91Sahrens #endif 63fa9e4066Sahrens return (tx); 64fa9e4066Sahrens } 65fa9e4066Sahrens 66fa9e4066Sahrens dmu_tx_t * 67fa9e4066Sahrens dmu_tx_create(objset_t *os) 68fa9e4066Sahrens { 69503ad85cSMatthew Ahrens dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 70fa9e4066Sahrens tx->tx_objset = os; 71503ad85cSMatthew Ahrens tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 72fa9e4066Sahrens return (tx); 73fa9e4066Sahrens } 74fa9e4066Sahrens 75fa9e4066Sahrens dmu_tx_t * 76fa9e4066Sahrens dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 77fa9e4066Sahrens { 781d452cf5Sahrens dmu_tx_t *tx = dmu_tx_create_dd(NULL); 79fa9e4066Sahrens 80fa9e4066Sahrens ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 81fa9e4066Sahrens tx->tx_pool = dp; 82fa9e4066Sahrens tx->tx_txg = txg; 83fa9e4066Sahrens tx->tx_anyobj = TRUE; 84fa9e4066Sahrens 85fa9e4066Sahrens return (tx); 86fa9e4066Sahrens } 87fa9e4066Sahrens 88fa9e4066Sahrens int 89fa9e4066Sahrens dmu_tx_is_syncing(dmu_tx_t *tx) 90fa9e4066Sahrens { 91fa9e4066Sahrens return (tx->tx_anyobj); 92fa9e4066Sahrens } 93fa9e4066Sahrens 94fa9e4066Sahrens int 95fa9e4066Sahrens dmu_tx_private_ok(dmu_tx_t *tx) 96fa9e4066Sahrens { 97ea8dc4b6Seschrock return (tx->tx_anyobj); 98fa9e4066Sahrens } 99fa9e4066Sahrens 1008a2f1b91Sahrens static dmu_tx_hold_t * 101fa9e4066Sahrens dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 1028a2f1b91Sahrens enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 103fa9e4066Sahrens { 1048a2f1b91Sahrens dmu_tx_hold_t *txh; 105fa9e4066Sahrens dnode_t *dn = NULL; 106ea8dc4b6Seschrock int err; 107fa9e4066Sahrens 108fa9e4066Sahrens if (object != DMU_NEW_OBJECT) { 109503ad85cSMatthew Ahrens err = dnode_hold(os, object, tx, &dn); 110ea8dc4b6Seschrock if (err) { 111ea8dc4b6Seschrock tx->tx_err = err; 1128a2f1b91Sahrens return (NULL); 113ea8dc4b6Seschrock } 114fa9e4066Sahrens 115ea8dc4b6Seschrock if (err == 0 && tx->tx_txg != 0) { 116fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 117fa9e4066Sahrens /* 118fa9e4066Sahrens * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 119fa9e4066Sahrens * problem, but there's no way for it to happen (for 120fa9e4066Sahrens * now, at least). 121fa9e4066Sahrens */ 122fa9e4066Sahrens ASSERT(dn->dn_assigned_txg == 0); 123fa9e4066Sahrens dn->dn_assigned_txg = tx->tx_txg; 124fa9e4066Sahrens (void) refcount_add(&dn->dn_tx_holds, tx); 125fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 126fa9e4066Sahrens } 127fa9e4066Sahrens } 128fa9e4066Sahrens 1298a2f1b91Sahrens txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 1308a2f1b91Sahrens txh->txh_tx = tx; 1318a2f1b91Sahrens txh->txh_dnode = dn; 1328a2f1b91Sahrens #ifdef ZFS_DEBUG 1338a2f1b91Sahrens txh->txh_type = type; 1348a2f1b91Sahrens txh->txh_arg1 = arg1; 1358a2f1b91Sahrens txh->txh_arg2 = arg2; 1368a2f1b91Sahrens #endif 1378a2f1b91Sahrens list_insert_tail(&tx->tx_holds, txh); 138ea8dc4b6Seschrock 1398a2f1b91Sahrens return (txh); 140fa9e4066Sahrens } 141fa9e4066Sahrens 142fa9e4066Sahrens void 143fa9e4066Sahrens dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 144fa9e4066Sahrens { 145fa9e4066Sahrens /* 146fa9e4066Sahrens * If we're syncing, they can manipulate any object anyhow, and 147fa9e4066Sahrens * the hold on the dnode_t can cause problems. 148fa9e4066Sahrens */ 149fa9e4066Sahrens if (!dmu_tx_is_syncing(tx)) { 1508a2f1b91Sahrens (void) dmu_tx_hold_object_impl(tx, os, 1518a2f1b91Sahrens object, THT_NEWOBJECT, 0, 0); 152fa9e4066Sahrens } 153fa9e4066Sahrens } 154fa9e4066Sahrens 155ea8dc4b6Seschrock static int 156ea8dc4b6Seschrock dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 157ea8dc4b6Seschrock { 158ea8dc4b6Seschrock int err; 159ea8dc4b6Seschrock dmu_buf_impl_t *db; 160ea8dc4b6Seschrock 161ea8dc4b6Seschrock rw_enter(&dn->dn_struct_rwlock, RW_READER); 162ea8dc4b6Seschrock db = dbuf_hold_level(dn, level, blkid, FTAG); 163ea8dc4b6Seschrock rw_exit(&dn->dn_struct_rwlock); 164ea8dc4b6Seschrock if (db == NULL) 165be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 1661ab7f2deSmaybee err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 167ea8dc4b6Seschrock dbuf_rele(db, FTAG); 168ea8dc4b6Seschrock return (err); 169ea8dc4b6Seschrock } 170ea8dc4b6Seschrock 1714a7f2a75SMark Maybee static void 172b24ab676SJeff Bonwick dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 173b24ab676SJeff Bonwick int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 1744a7f2a75SMark Maybee { 175b24ab676SJeff Bonwick objset_t *os = dn->dn_objset; 176b24ab676SJeff Bonwick dsl_dataset_t *ds = os->os_dsl_dataset; 177b24ab676SJeff Bonwick int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 178b24ab676SJeff Bonwick dmu_buf_impl_t *parent = NULL; 179b24ab676SJeff Bonwick blkptr_t *bp = NULL; 180b24ab676SJeff Bonwick uint64_t space; 1814a7f2a75SMark Maybee 182b24ab676SJeff Bonwick if (level >= dn->dn_nlevels || history[level] == blkid) 1834a7f2a75SMark Maybee return; 1844a7f2a75SMark Maybee 185b24ab676SJeff Bonwick history[level] = blkid; 1864a7f2a75SMark Maybee 187b24ab676SJeff Bonwick space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 188b24ab676SJeff Bonwick 189b24ab676SJeff Bonwick if (db == NULL || db == dn->dn_dbuf) { 190b24ab676SJeff Bonwick ASSERT(level != 0); 191b24ab676SJeff Bonwick db = NULL; 192b24ab676SJeff Bonwick } else { 193744947dcSTom Erickson ASSERT(DB_DNODE(db) == dn); 194b24ab676SJeff Bonwick ASSERT(db->db_level == level); 195b24ab676SJeff Bonwick ASSERT(db->db.db_size == space); 196b24ab676SJeff Bonwick ASSERT(db->db_blkid == blkid); 197b24ab676SJeff Bonwick bp = db->db_blkptr; 198b24ab676SJeff Bonwick parent = db->db_parent; 1994a7f2a75SMark Maybee } 2004a7f2a75SMark Maybee 201b24ab676SJeff Bonwick freeable = (bp && (freeable || 202c7cd2421SGeorge Wilson dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 2034a7f2a75SMark Maybee 2044a7f2a75SMark Maybee if (freeable) 2054a7f2a75SMark Maybee txh->txh_space_tooverwrite += space; 2064a7f2a75SMark Maybee else 2074a7f2a75SMark Maybee txh->txh_space_towrite += space; 208b24ab676SJeff Bonwick if (bp) 209b24ab676SJeff Bonwick txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); 210b24ab676SJeff Bonwick 211b24ab676SJeff Bonwick dmu_tx_count_twig(txh, dn, parent, level + 1, 212b24ab676SJeff Bonwick blkid >> epbs, freeable, history); 2134a7f2a75SMark Maybee } 2144a7f2a75SMark Maybee 215fa9e4066Sahrens /* ARGSUSED */ 216fa9e4066Sahrens static void 2178a2f1b91Sahrens dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 218fa9e4066Sahrens { 2198a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 2208a2f1b91Sahrens uint64_t start, end, i; 221fa9e4066Sahrens int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 2228a2f1b91Sahrens int err = 0; 223fa9e4066Sahrens 224fa9e4066Sahrens if (len == 0) 225fa9e4066Sahrens return; 226fa9e4066Sahrens 227fa9e4066Sahrens min_bs = SPA_MINBLOCKSHIFT; 228b5152584SMatthew Ahrens max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; 229fa9e4066Sahrens min_ibs = DN_MIN_INDBLKSHIFT; 230fa9e4066Sahrens max_ibs = DN_MAX_INDBLKSHIFT; 231fa9e4066Sahrens 2324a7f2a75SMark Maybee if (dn) { 233b24ab676SJeff Bonwick uint64_t history[DN_MAX_LEVELS]; 2344a7f2a75SMark Maybee int nlvls = dn->dn_nlevels; 2354a7f2a75SMark Maybee int delta; 2364a7f2a75SMark Maybee 237fa9e4066Sahrens /* 238ea8dc4b6Seschrock * For i/o error checking, read the first and last level-0 23999653d4eSeschrock * blocks (if they are not aligned), and all the level-1 blocks. 240ea8dc4b6Seschrock */ 241ea8dc4b6Seschrock if (dn->dn_maxblkid == 0) { 2424a7f2a75SMark Maybee delta = dn->dn_datablksz; 2434a7f2a75SMark Maybee start = (off < dn->dn_datablksz) ? 0 : 1; 2444a7f2a75SMark Maybee end = (off+len <= dn->dn_datablksz) ? 0 : 1; 2454a7f2a75SMark Maybee if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 246ea8dc4b6Seschrock err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 2478a2f1b91Sahrens if (err) 2488a2f1b91Sahrens goto out; 2494a7f2a75SMark Maybee delta -= off; 25082c9918fSTim Haley } 251ea8dc4b6Seschrock } else { 2528a2f1b91Sahrens zio_t *zio = zio_root(dn->dn_objset->os_spa, 253ea8dc4b6Seschrock NULL, NULL, ZIO_FLAG_CANFAIL); 254ea8dc4b6Seschrock 255ea8dc4b6Seschrock /* first level-0 block */ 25699653d4eSeschrock start = off >> dn->dn_datablkshift; 25799653d4eSeschrock if (P2PHASE(off, dn->dn_datablksz) || 25899653d4eSeschrock len < dn->dn_datablksz) { 259ea8dc4b6Seschrock err = dmu_tx_check_ioerr(zio, dn, 0, start); 2608a2f1b91Sahrens if (err) 2618a2f1b91Sahrens goto out; 26299653d4eSeschrock } 263ea8dc4b6Seschrock 264ea8dc4b6Seschrock /* last level-0 block */ 26599653d4eSeschrock end = (off+len-1) >> dn->dn_datablkshift; 26682c9918fSTim Haley if (end != start && end <= dn->dn_maxblkid && 26799653d4eSeschrock P2PHASE(off+len, dn->dn_datablksz)) { 268ea8dc4b6Seschrock err = dmu_tx_check_ioerr(zio, dn, 0, end); 2698a2f1b91Sahrens if (err) 2708a2f1b91Sahrens goto out; 271ea8dc4b6Seschrock } 272ea8dc4b6Seschrock 273ea8dc4b6Seschrock /* level-1 blocks */ 2744a7f2a75SMark Maybee if (nlvls > 1) { 2754a7f2a75SMark Maybee int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2764a7f2a75SMark Maybee for (i = (start>>shft)+1; i < end>>shft; i++) { 277ea8dc4b6Seschrock err = dmu_tx_check_ioerr(zio, dn, 1, i); 2788a2f1b91Sahrens if (err) 2798a2f1b91Sahrens goto out; 280ea8dc4b6Seschrock } 281ea8dc4b6Seschrock } 282ea8dc4b6Seschrock 283ea8dc4b6Seschrock err = zio_wait(zio); 2848a2f1b91Sahrens if (err) 2858a2f1b91Sahrens goto out; 2864a7f2a75SMark Maybee delta = P2NPHASE(off, dn->dn_datablksz); 287ea8dc4b6Seschrock } 2884a7f2a75SMark Maybee 289bda88194SGeorge Wilson min_ibs = max_ibs = dn->dn_indblkshift; 2904a7f2a75SMark Maybee if (dn->dn_maxblkid > 0) { 2914a7f2a75SMark Maybee /* 2924a7f2a75SMark Maybee * The blocksize can't change, 2934a7f2a75SMark Maybee * so we can make a more precise estimate. 2944a7f2a75SMark Maybee */ 2954a7f2a75SMark Maybee ASSERT(dn->dn_datablkshift != 0); 2964a7f2a75SMark Maybee min_bs = max_bs = dn->dn_datablkshift; 297b5152584SMatthew Ahrens } else { 298b5152584SMatthew Ahrens /* 299b5152584SMatthew Ahrens * The blocksize can increase up to the recordsize, 300b5152584SMatthew Ahrens * or if it is already more than the recordsize, 301b5152584SMatthew Ahrens * up to the next power of 2. 302b5152584SMatthew Ahrens */ 303b5152584SMatthew Ahrens min_bs = highbit64(dn->dn_datablksz - 1); 304b5152584SMatthew Ahrens max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); 305ea8dc4b6Seschrock } 306ea8dc4b6Seschrock 307ea8dc4b6Seschrock /* 3084a7f2a75SMark Maybee * If this write is not off the end of the file 3094a7f2a75SMark Maybee * we need to account for overwrites/unref. 310fa9e4066Sahrens */ 311b24ab676SJeff Bonwick if (start <= dn->dn_maxblkid) { 312b24ab676SJeff Bonwick for (int l = 0; l < DN_MAX_LEVELS; l++) 313b24ab676SJeff Bonwick history[l] = -1ULL; 314b24ab676SJeff Bonwick } 3154a7f2a75SMark Maybee while (start <= dn->dn_maxblkid) { 3164a7f2a75SMark Maybee dmu_buf_impl_t *db; 3174a7f2a75SMark Maybee 3184a7f2a75SMark Maybee rw_enter(&dn->dn_struct_rwlock, RW_READER); 319a2cdcdd2SPaul Dagnelie err = dbuf_hold_impl(dn, 0, start, 320a2cdcdd2SPaul Dagnelie FALSE, FALSE, FTAG, &db); 3214a7f2a75SMark Maybee rw_exit(&dn->dn_struct_rwlock); 32201025c89SJohn Harres 32301025c89SJohn Harres if (err) { 32401025c89SJohn Harres txh->txh_tx->tx_err = err; 32501025c89SJohn Harres return; 32601025c89SJohn Harres } 32701025c89SJohn Harres 328b24ab676SJeff Bonwick dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 329b24ab676SJeff Bonwick history); 3304a7f2a75SMark Maybee dbuf_rele(db, FTAG); 3314a7f2a75SMark Maybee if (++start > end) { 3324a7f2a75SMark Maybee /* 3334a7f2a75SMark Maybee * Account for new indirects appearing 3344a7f2a75SMark Maybee * before this IO gets assigned into a txg. 3354a7f2a75SMark Maybee */ 3364a7f2a75SMark Maybee bits = 64 - min_bs; 3374a7f2a75SMark Maybee epbs = min_ibs - SPA_BLKPTRSHIFT; 3384a7f2a75SMark Maybee for (bits -= epbs * (nlvls - 1); 3394a7f2a75SMark Maybee bits >= 0; bits -= epbs) 3404a7f2a75SMark Maybee txh->txh_fudge += 1ULL << max_ibs; 3414a7f2a75SMark Maybee goto out; 3424a7f2a75SMark Maybee } 3434a7f2a75SMark Maybee off += delta; 3444a7f2a75SMark Maybee if (len >= delta) 3454a7f2a75SMark Maybee len -= delta; 3464a7f2a75SMark Maybee delta = dn->dn_datablksz; 3474a7f2a75SMark Maybee } 348fa9e4066Sahrens } 349fa9e4066Sahrens 350fa9e4066Sahrens /* 351fa9e4066Sahrens * 'end' is the last thing we will access, not one past. 352fa9e4066Sahrens * This way we won't overflow when accessing the last byte. 353fa9e4066Sahrens */ 354fa9e4066Sahrens start = P2ALIGN(off, 1ULL << max_bs); 355fa9e4066Sahrens end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 3568a2f1b91Sahrens txh->txh_space_towrite += end - start + 1; 357fa9e4066Sahrens 358fa9e4066Sahrens start >>= min_bs; 359fa9e4066Sahrens end >>= min_bs; 360fa9e4066Sahrens 361fa9e4066Sahrens epbs = min_ibs - SPA_BLKPTRSHIFT; 362fa9e4066Sahrens 363fa9e4066Sahrens /* 364fa9e4066Sahrens * The object contains at most 2^(64 - min_bs) blocks, 365fa9e4066Sahrens * and each indirect level maps 2^epbs. 366fa9e4066Sahrens */ 367fa9e4066Sahrens for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 368fa9e4066Sahrens start >>= epbs; 369fa9e4066Sahrens end >>= epbs; 3704a7f2a75SMark Maybee ASSERT3U(end, >=, start); 3718a2f1b91Sahrens txh->txh_space_towrite += (end - start + 1) << max_ibs; 3724a7f2a75SMark Maybee if (start != 0) { 3734a7f2a75SMark Maybee /* 3744a7f2a75SMark Maybee * We also need a new blkid=0 indirect block 3754a7f2a75SMark Maybee * to reference any existing file data. 3764a7f2a75SMark Maybee */ 3774a7f2a75SMark Maybee txh->txh_space_towrite += 1ULL << max_ibs; 3784a7f2a75SMark Maybee } 379fa9e4066Sahrens } 380fa9e4066Sahrens 3818a2f1b91Sahrens out: 3824a7f2a75SMark Maybee if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 3834a7f2a75SMark Maybee 2 * DMU_MAX_ACCESS) 384be6fd75aSMatthew Ahrens err = SET_ERROR(EFBIG); 3854a7f2a75SMark Maybee 3868a2f1b91Sahrens if (err) 3878a2f1b91Sahrens txh->txh_tx->tx_err = err; 388fa9e4066Sahrens } 389fa9e4066Sahrens 390fa9e4066Sahrens static void 3918a2f1b91Sahrens dmu_tx_count_dnode(dmu_tx_hold_t *txh) 392fa9e4066Sahrens { 3938a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 394744947dcSTom Erickson dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 3958a2f1b91Sahrens uint64_t space = mdn->dn_datablksz + 3968a2f1b91Sahrens ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 397fa9e4066Sahrens 398fa9e4066Sahrens if (dn && dn->dn_dbuf->db_blkptr && 399fa9e4066Sahrens dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 400c7cd2421SGeorge Wilson dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 4018a2f1b91Sahrens txh->txh_space_tooverwrite += space; 4024a7f2a75SMark Maybee txh->txh_space_tounref += space; 4038a2f1b91Sahrens } else { 4048a2f1b91Sahrens txh->txh_space_towrite += space; 405a9799022Sck153898 if (dn && dn->dn_dbuf->db_blkptr) 406a9799022Sck153898 txh->txh_space_tounref += space; 407fa9e4066Sahrens } 408fa9e4066Sahrens } 409fa9e4066Sahrens 410fa9e4066Sahrens void 411fa9e4066Sahrens dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 412fa9e4066Sahrens { 4138a2f1b91Sahrens dmu_tx_hold_t *txh; 4148a2f1b91Sahrens 415fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 416ea8dc4b6Seschrock ASSERT(len < DMU_MAX_ACCESS); 417dd6ef538Smaybee ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 418fa9e4066Sahrens 4198a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 4208a2f1b91Sahrens object, THT_WRITE, off, len); 4218a2f1b91Sahrens if (txh == NULL) 4228a2f1b91Sahrens return; 4238a2f1b91Sahrens 4248a2f1b91Sahrens dmu_tx_count_write(txh, off, len); 4258a2f1b91Sahrens dmu_tx_count_dnode(txh); 426fa9e4066Sahrens } 427fa9e4066Sahrens 428fa9e4066Sahrens static void 4298a2f1b91Sahrens dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 430fa9e4066Sahrens { 431cdb0ab79Smaybee uint64_t blkid, nblks, lastblk; 432cdb0ab79Smaybee uint64_t space = 0, unref = 0, skipped = 0; 4338a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 434fa9e4066Sahrens dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 4358a2f1b91Sahrens spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 436cdb0ab79Smaybee int epbs; 43731495a1eSArne Jansen uint64_t l0span = 0, nl1blks = 0; 438fa9e4066Sahrens 439cdb0ab79Smaybee if (dn->dn_nlevels == 0) 440c543ec06Sahrens return; 441c543ec06Sahrens 442c543ec06Sahrens /* 443cdb0ab79Smaybee * The struct_rwlock protects us against dn_nlevels 444c543ec06Sahrens * changing, in case (against all odds) we manage to dirty & 445c543ec06Sahrens * sync out the changes after we check for being dirty. 44601025c89SJohn Harres * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 447fa9e4066Sahrens */ 448fa9e4066Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 449cdb0ab79Smaybee epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 450cdb0ab79Smaybee if (dn->dn_maxblkid == 0) { 451c543ec06Sahrens if (off == 0 && len >= dn->dn_datablksz) { 452c543ec06Sahrens blkid = 0; 453c543ec06Sahrens nblks = 1; 454c543ec06Sahrens } else { 455ea8dc4b6Seschrock rw_exit(&dn->dn_struct_rwlock); 456ea8dc4b6Seschrock return; 457ea8dc4b6Seschrock } 458c543ec06Sahrens } else { 459c543ec06Sahrens blkid = off >> dn->dn_datablkshift; 460cdb0ab79Smaybee nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 461fa9e4066Sahrens 462be9000ccSMatthew Ahrens if (blkid > dn->dn_maxblkid) { 463c543ec06Sahrens rw_exit(&dn->dn_struct_rwlock); 464c543ec06Sahrens return; 465c543ec06Sahrens } 466cdb0ab79Smaybee if (blkid + nblks > dn->dn_maxblkid) 467be9000ccSMatthew Ahrens nblks = dn->dn_maxblkid - blkid + 1; 468c543ec06Sahrens 469c543ec06Sahrens } 47031495a1eSArne Jansen l0span = nblks; /* save for later use to calc level > 1 overhead */ 471cdb0ab79Smaybee if (dn->dn_nlevels == 1) { 472fa9e4066Sahrens int i; 473fa9e4066Sahrens for (i = 0; i < nblks; i++) { 474fa9e4066Sahrens blkptr_t *bp = dn->dn_phys->dn_blkptr; 475cdb0ab79Smaybee ASSERT3U(blkid + i, <, dn->dn_nblkptr); 476fa9e4066Sahrens bp += blkid + i; 477c7cd2421SGeorge Wilson if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 478fa9e4066Sahrens dprintf_bp(bp, "can free old%s", ""); 479b24ab676SJeff Bonwick space += bp_get_dsize(spa, bp); 480fa9e4066Sahrens } 481a9799022Sck153898 unref += BP_GET_ASIZE(bp); 482fa9e4066Sahrens } 48331495a1eSArne Jansen nl1blks = 1; 484ea8dc4b6Seschrock nblks = 0; 485fa9e4066Sahrens } 486fa9e4066Sahrens 487cdb0ab79Smaybee lastblk = blkid + nblks - 1; 488fa9e4066Sahrens while (nblks) { 489fa9e4066Sahrens dmu_buf_impl_t *dbuf; 490cdb0ab79Smaybee uint64_t ibyte, new_blkid; 491cdb0ab79Smaybee int epb = 1 << epbs; 492cdb0ab79Smaybee int err, i, blkoff, tochk; 493fa9e4066Sahrens blkptr_t *bp; 494fa9e4066Sahrens 495cdb0ab79Smaybee ibyte = blkid << dn->dn_datablkshift; 496cdb0ab79Smaybee err = dnode_next_offset(dn, 497cdb0ab79Smaybee DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 498cdb0ab79Smaybee new_blkid = ibyte >> dn->dn_datablkshift; 499b7e50089Smaybee if (err == ESRCH) { 500b7e50089Smaybee skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 501cdb0ab79Smaybee break; 502b7e50089Smaybee } 503cdb0ab79Smaybee if (err) { 504cdb0ab79Smaybee txh->txh_tx->tx_err = err; 505cdb0ab79Smaybee break; 506cdb0ab79Smaybee } 507b7e50089Smaybee if (new_blkid > lastblk) { 508b7e50089Smaybee skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 509cdb0ab79Smaybee break; 510b7e50089Smaybee } 511cdb0ab79Smaybee 512cdb0ab79Smaybee if (new_blkid > blkid) { 513b7e50089Smaybee ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 514b7e50089Smaybee skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 515cdb0ab79Smaybee nblks -= new_blkid - blkid; 516cdb0ab79Smaybee blkid = new_blkid; 517cdb0ab79Smaybee } 518cdb0ab79Smaybee blkoff = P2PHASE(blkid, epb); 519cdb0ab79Smaybee tochk = MIN(epb - blkoff, nblks); 520cdb0ab79Smaybee 521a2cdcdd2SPaul Dagnelie err = dbuf_hold_impl(dn, 1, blkid >> epbs, 522a2cdcdd2SPaul Dagnelie FALSE, FALSE, FTAG, &dbuf); 52301025c89SJohn Harres if (err) { 52401025c89SJohn Harres txh->txh_tx->tx_err = err; 52501025c89SJohn Harres break; 52601025c89SJohn Harres } 527cdb0ab79Smaybee 528cdb0ab79Smaybee txh->txh_memory_tohold += dbuf->db.db_size; 52977179d12SLori Alt 53077179d12SLori Alt /* 53177179d12SLori Alt * We don't check memory_tohold against DMU_MAX_ACCESS because 53277179d12SLori Alt * memory_tohold is an over-estimation (especially the >L1 53377179d12SLori Alt * indirect blocks), so it could fail. Callers should have 53477179d12SLori Alt * already verified that they will not be holding too much 53577179d12SLori Alt * memory. 53677179d12SLori Alt */ 53777179d12SLori Alt 538cdb0ab79Smaybee err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 539ea8dc4b6Seschrock if (err != 0) { 5408a2f1b91Sahrens txh->txh_tx->tx_err = err; 541ea8dc4b6Seschrock dbuf_rele(dbuf, FTAG); 542ea8dc4b6Seschrock break; 543ea8dc4b6Seschrock } 544fa9e4066Sahrens 545fa9e4066Sahrens bp = dbuf->db.db_data; 546fa9e4066Sahrens bp += blkoff; 547fa9e4066Sahrens 548fa9e4066Sahrens for (i = 0; i < tochk; i++) { 549c7cd2421SGeorge Wilson if (dsl_dataset_block_freeable(ds, &bp[i], 550c7cd2421SGeorge Wilson bp[i].blk_birth)) { 551cdb0ab79Smaybee dprintf_bp(&bp[i], "can free old%s", ""); 552b24ab676SJeff Bonwick space += bp_get_dsize(spa, &bp[i]); 553fa9e4066Sahrens } 554a9799022Sck153898 unref += BP_GET_ASIZE(bp); 555fa9e4066Sahrens } 556ea8dc4b6Seschrock dbuf_rele(dbuf, FTAG); 557fa9e4066Sahrens 55831495a1eSArne Jansen ++nl1blks; 559fa9e4066Sahrens blkid += tochk; 560fa9e4066Sahrens nblks -= tochk; 561fa9e4066Sahrens } 562fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 563fa9e4066Sahrens 56431495a1eSArne Jansen /* 56531495a1eSArne Jansen * Add in memory requirements of higher-level indirects. 56631495a1eSArne Jansen * This assumes a worst-possible scenario for dn_nlevels and a 56731495a1eSArne Jansen * worst-possible distribution of l1-blocks over the region to free. 56831495a1eSArne Jansen */ 56931495a1eSArne Jansen { 57031495a1eSArne Jansen uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 57131495a1eSArne Jansen int level = 2; 57231495a1eSArne Jansen /* 57331495a1eSArne Jansen * Here we don't use DN_MAX_LEVEL, but calculate it with the 57431495a1eSArne Jansen * given datablkshift and indblkshift. This makes the 57531495a1eSArne Jansen * difference between 19 and 8 on large files. 57631495a1eSArne Jansen */ 57731495a1eSArne Jansen int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 57831495a1eSArne Jansen (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 57931495a1eSArne Jansen 58031495a1eSArne Jansen while (level++ < maxlevel) { 5818f0b538dSChristopher Siden txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) 58231495a1eSArne Jansen << dn->dn_indblkshift; 58331495a1eSArne Jansen blkcnt = 1 + (blkcnt >> epbs); 58431495a1eSArne Jansen } 58531495a1eSArne Jansen } 58631495a1eSArne Jansen 587cdb0ab79Smaybee /* account for new level 1 indirect blocks that might show up */ 588b7e50089Smaybee if (skipped > 0) { 589715614a4Smaybee txh->txh_fudge += skipped << dn->dn_indblkshift; 590cdb0ab79Smaybee skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 591cdb0ab79Smaybee txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 592cdb0ab79Smaybee } 5938a2f1b91Sahrens txh->txh_space_tofree += space; 594a9799022Sck153898 txh->txh_space_tounref += unref; 595fa9e4066Sahrens } 596fa9e4066Sahrens 5974bb73804SMatthew Ahrens /* 5984bb73804SMatthew Ahrens * This function marks the transaction as being a "net free". The end 5994bb73804SMatthew Ahrens * result is that refquotas will be disabled for this transaction, and 6004bb73804SMatthew Ahrens * this transaction will be able to use half of the pool space overhead 6014bb73804SMatthew Ahrens * (see dsl_pool_adjustedsize()). Therefore this function should only 6024bb73804SMatthew Ahrens * be called for transactions that we expect will not cause a net increase 6034bb73804SMatthew Ahrens * in the amount of space used (but it's OK if that is occasionally not true). 6044bb73804SMatthew Ahrens */ 6054bb73804SMatthew Ahrens void 6064bb73804SMatthew Ahrens dmu_tx_mark_netfree(dmu_tx_t *tx) 6074bb73804SMatthew Ahrens { 6084bb73804SMatthew Ahrens dmu_tx_hold_t *txh; 6094bb73804SMatthew Ahrens 6104bb73804SMatthew Ahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 6114bb73804SMatthew Ahrens DMU_NEW_OBJECT, THT_FREE, 0, 0); 6124bb73804SMatthew Ahrens 6134bb73804SMatthew Ahrens /* 6144bb73804SMatthew Ahrens * Pretend that this operation will free 1GB of space. This 6154bb73804SMatthew Ahrens * should be large enough to cancel out the largest write. 6164bb73804SMatthew Ahrens * We don't want to use something like UINT64_MAX, because that would 6174bb73804SMatthew Ahrens * cause overflows when doing math with these values (e.g. in 6184bb73804SMatthew Ahrens * dmu_tx_try_assign()). 6194bb73804SMatthew Ahrens */ 6204bb73804SMatthew Ahrens txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024; 6214bb73804SMatthew Ahrens } 6224bb73804SMatthew Ahrens 6238a2f1b91Sahrens void 6248a2f1b91Sahrens dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 625fa9e4066Sahrens { 6268a2f1b91Sahrens dmu_tx_hold_t *txh; 6278a2f1b91Sahrens dnode_t *dn; 6282f3d8780SMatthew Ahrens int err; 629ea8dc4b6Seschrock zio_t *zio; 630fa9e4066Sahrens 6318a2f1b91Sahrens ASSERT(tx->tx_txg == 0); 6328a2f1b91Sahrens 6338a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 6348a2f1b91Sahrens object, THT_FREE, off, len); 6358a2f1b91Sahrens if (txh == NULL) 6368a2f1b91Sahrens return; 6378a2f1b91Sahrens dn = txh->txh_dnode; 63869962b56SMatthew Ahrens dmu_tx_count_dnode(txh); 6398a2f1b91Sahrens 640fa9e4066Sahrens if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 641fa9e4066Sahrens return; 642fa9e4066Sahrens if (len == DMU_OBJECT_END) 643fa9e4066Sahrens len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 644fa9e4066Sahrens 645ea8dc4b6Seschrock /* 6462f3d8780SMatthew Ahrens * For i/o error checking, we read the first and last level-0 6472f3d8780SMatthew Ahrens * blocks if they are not aligned, and all the level-1 blocks. 6482f3d8780SMatthew Ahrens * 6492f3d8780SMatthew Ahrens * Note: dbuf_free_range() assumes that we have not instantiated 6502f3d8780SMatthew Ahrens * any level-0 dbufs that will be completely freed. Therefore we must 6512f3d8780SMatthew Ahrens * exercise care to not read or count the first and last blocks 6522f3d8780SMatthew Ahrens * if they are blocksize-aligned. 6532f3d8780SMatthew Ahrens */ 6542f3d8780SMatthew Ahrens if (dn->dn_datablkshift == 0) { 655713d6c20SMatthew Ahrens if (off != 0 || len < dn->dn_datablksz) 6565253393bSMatthew Ahrens dmu_tx_count_write(txh, 0, dn->dn_datablksz); 6572f3d8780SMatthew Ahrens } else { 6582f3d8780SMatthew Ahrens /* first block will be modified if it is not aligned */ 6592f3d8780SMatthew Ahrens if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 6602f3d8780SMatthew Ahrens dmu_tx_count_write(txh, off, 1); 6612f3d8780SMatthew Ahrens /* last block will be modified if it is not aligned */ 6622f3d8780SMatthew Ahrens if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 6632f3d8780SMatthew Ahrens dmu_tx_count_write(txh, off+len, 1); 6642f3d8780SMatthew Ahrens } 6652f3d8780SMatthew Ahrens 6662f3d8780SMatthew Ahrens /* 6672f3d8780SMatthew Ahrens * Check level-1 blocks. 668ea8dc4b6Seschrock */ 66998572ac1Sahrens if (dn->dn_nlevels > 1) { 6702f3d8780SMatthew Ahrens int shift = dn->dn_datablkshift + dn->dn_indblkshift - 67198572ac1Sahrens SPA_BLKPTRSHIFT; 6722f3d8780SMatthew Ahrens uint64_t start = off >> shift; 6732f3d8780SMatthew Ahrens uint64_t end = (off + len) >> shift; 6742f3d8780SMatthew Ahrens 6752f3d8780SMatthew Ahrens ASSERT(dn->dn_indblkshift != 0); 676ea8dc4b6Seschrock 677bb411a08SMatthew Ahrens /* 678bb411a08SMatthew Ahrens * dnode_reallocate() can result in an object with indirect 679bb411a08SMatthew Ahrens * blocks having an odd data block size. In this case, 680bb411a08SMatthew Ahrens * just check the single block. 681bb411a08SMatthew Ahrens */ 682bb411a08SMatthew Ahrens if (dn->dn_datablkshift == 0) 683bb411a08SMatthew Ahrens start = end = 0; 684bb411a08SMatthew Ahrens 68598572ac1Sahrens zio = zio_root(tx->tx_pool->dp_spa, 68698572ac1Sahrens NULL, NULL, ZIO_FLAG_CANFAIL); 6872f3d8780SMatthew Ahrens for (uint64_t i = start; i <= end; i++) { 688ea8dc4b6Seschrock uint64_t ibyte = i << shift; 689cdb0ab79Smaybee err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 690ea8dc4b6Seschrock i = ibyte >> shift; 69146e1baa6SMatthew Ahrens if (err == ESRCH || i > end) 692ea8dc4b6Seschrock break; 693ea8dc4b6Seschrock if (err) { 694ea8dc4b6Seschrock tx->tx_err = err; 695ea8dc4b6Seschrock return; 696ea8dc4b6Seschrock } 697ea8dc4b6Seschrock 698ea8dc4b6Seschrock err = dmu_tx_check_ioerr(zio, dn, 1, i); 699ea8dc4b6Seschrock if (err) { 700ea8dc4b6Seschrock tx->tx_err = err; 701ea8dc4b6Seschrock return; 702ea8dc4b6Seschrock } 703ea8dc4b6Seschrock } 704ea8dc4b6Seschrock err = zio_wait(zio); 705ea8dc4b6Seschrock if (err) { 706ea8dc4b6Seschrock tx->tx_err = err; 707ea8dc4b6Seschrock return; 708ea8dc4b6Seschrock } 70998572ac1Sahrens } 710ea8dc4b6Seschrock 7118a2f1b91Sahrens dmu_tx_count_free(txh, off, len); 712fa9e4066Sahrens } 713fa9e4066Sahrens 714fa9e4066Sahrens void 71514843421SMatthew Ahrens dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 716fa9e4066Sahrens { 7178a2f1b91Sahrens dmu_tx_hold_t *txh; 7188a2f1b91Sahrens dnode_t *dn; 719c1379625SJustin T. Gibbs dsl_dataset_phys_t *ds_phys; 720fa9e4066Sahrens uint64_t nblocks; 721ea8dc4b6Seschrock int epbs, err; 722fa9e4066Sahrens 7238a2f1b91Sahrens ASSERT(tx->tx_txg == 0); 7248a2f1b91Sahrens 7258a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 7268a2f1b91Sahrens object, THT_ZAP, add, (uintptr_t)name); 7278a2f1b91Sahrens if (txh == NULL) 7288a2f1b91Sahrens return; 7298a2f1b91Sahrens dn = txh->txh_dnode; 7308a2f1b91Sahrens 7318a2f1b91Sahrens dmu_tx_count_dnode(txh); 732fa9e4066Sahrens 733fa9e4066Sahrens if (dn == NULL) { 734fa9e4066Sahrens /* 735ea8dc4b6Seschrock * We will be able to fit a new object's entries into one leaf 736fa9e4066Sahrens * block. So there will be at most 2 blocks total, 737fa9e4066Sahrens * including the header block. 738fa9e4066Sahrens */ 7398a2f1b91Sahrens dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 740fa9e4066Sahrens return; 741fa9e4066Sahrens } 742fa9e4066Sahrens 743ad135b5dSChristopher Siden ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 744fa9e4066Sahrens 745ea8dc4b6Seschrock if (dn->dn_maxblkid == 0 && !add) { 7469dccfd2aSAlbert Lee blkptr_t *bp; 7479dccfd2aSAlbert Lee 748fa9e4066Sahrens /* 749fa9e4066Sahrens * If there is only one block (i.e. this is a micro-zap) 750ea8dc4b6Seschrock * and we are not adding anything, the accounting is simple. 751fa9e4066Sahrens */ 752ea8dc4b6Seschrock err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 753ea8dc4b6Seschrock if (err) { 754ea8dc4b6Seschrock tx->tx_err = err; 755ea8dc4b6Seschrock return; 756ea8dc4b6Seschrock } 757ea8dc4b6Seschrock 758b6130eadSmaybee /* 759b6130eadSmaybee * Use max block size here, since we don't know how much 760b6130eadSmaybee * the size will change between now and the dbuf dirty call. 761b6130eadSmaybee */ 7629dccfd2aSAlbert Lee bp = &dn->dn_phys->dn_blkptr[0]; 763fa9e4066Sahrens if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 7649dccfd2aSAlbert Lee bp, bp->blk_birth)) 765b5152584SMatthew Ahrens txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ; 7669dccfd2aSAlbert Lee else 767b5152584SMatthew Ahrens txh->txh_space_towrite += MZAP_MAX_BLKSZ; 7689dccfd2aSAlbert Lee if (!BP_IS_HOLE(bp)) 769b5152584SMatthew Ahrens txh->txh_space_tounref += MZAP_MAX_BLKSZ; 770fa9e4066Sahrens return; 771fa9e4066Sahrens } 772fa9e4066Sahrens 773ea8dc4b6Seschrock if (dn->dn_maxblkid > 0 && name) { 774fa9e4066Sahrens /* 775ea8dc4b6Seschrock * access the name in this fat-zap so that we'll check 776ea8dc4b6Seschrock * for i/o errors to the leaf blocks, etc. 777ea8dc4b6Seschrock */ 778503ad85cSMatthew Ahrens err = zap_lookup(dn->dn_objset, dn->dn_object, name, 779ea8dc4b6Seschrock 8, 0, NULL); 780ea8dc4b6Seschrock if (err == EIO) { 781ea8dc4b6Seschrock tx->tx_err = err; 782ea8dc4b6Seschrock return; 783ea8dc4b6Seschrock } 784ea8dc4b6Seschrock } 785ea8dc4b6Seschrock 786503ad85cSMatthew Ahrens err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 787720d1aa1SSanjeev Bagewadi &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 788fa9e4066Sahrens 789fa9e4066Sahrens /* 790fa9e4066Sahrens * If the modified blocks are scattered to the four winds, 791fa9e4066Sahrens * we'll have to modify an indirect twig for each. 792fa9e4066Sahrens */ 793fa9e4066Sahrens epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 794c1379625SJustin T. Gibbs ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset); 795fa9e4066Sahrens for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 796c1379625SJustin T. Gibbs if (ds_phys->ds_prev_snap_obj) 7978a2f1b91Sahrens txh->txh_space_towrite += 3 << dn->dn_indblkshift; 7983d692628SSanjeev Bagewadi else 7993d692628SSanjeev Bagewadi txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 800fa9e4066Sahrens } 801fa9e4066Sahrens 802fa9e4066Sahrens void 803fa9e4066Sahrens dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 804fa9e4066Sahrens { 8058a2f1b91Sahrens dmu_tx_hold_t *txh; 8068a2f1b91Sahrens 807fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 808fa9e4066Sahrens 8098a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 8108a2f1b91Sahrens object, THT_BONUS, 0, 0); 8118a2f1b91Sahrens if (txh) 8128a2f1b91Sahrens dmu_tx_count_dnode(txh); 813fa9e4066Sahrens } 814fa9e4066Sahrens 815fa9e4066Sahrens void 816fa9e4066Sahrens dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 817fa9e4066Sahrens { 8188a2f1b91Sahrens dmu_tx_hold_t *txh; 819fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 820fa9e4066Sahrens 8218a2f1b91Sahrens txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 8228a2f1b91Sahrens DMU_NEW_OBJECT, THT_SPACE, space, 0); 8238a2f1b91Sahrens 8248a2f1b91Sahrens txh->txh_space_towrite += space; 825fa9e4066Sahrens } 826fa9e4066Sahrens 827fa9e4066Sahrens int 828fa9e4066Sahrens dmu_tx_holds(dmu_tx_t *tx, uint64_t object) 829fa9e4066Sahrens { 8308a2f1b91Sahrens dmu_tx_hold_t *txh; 831fa9e4066Sahrens int holds = 0; 832fa9e4066Sahrens 833fa9e4066Sahrens /* 834fa9e4066Sahrens * By asserting that the tx is assigned, we're counting the 835fa9e4066Sahrens * number of dn_tx_holds, which is the same as the number of 836fa9e4066Sahrens * dn_holds. Otherwise, we'd be counting dn_holds, but 837fa9e4066Sahrens * dn_tx_holds could be 0. 838fa9e4066Sahrens */ 839fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 840fa9e4066Sahrens 841fa9e4066Sahrens /* if (tx->tx_anyobj == TRUE) */ 842fa9e4066Sahrens /* return (0); */ 843fa9e4066Sahrens 8448a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh; 8458a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 8468a2f1b91Sahrens if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 847fa9e4066Sahrens holds++; 848fa9e4066Sahrens } 849fa9e4066Sahrens 850fa9e4066Sahrens return (holds); 851fa9e4066Sahrens } 852fa9e4066Sahrens 8539c9dc39aSek110237 #ifdef ZFS_DEBUG 854fa9e4066Sahrens void 855fa9e4066Sahrens dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 856fa9e4066Sahrens { 8578a2f1b91Sahrens dmu_tx_hold_t *txh; 858fa9e4066Sahrens int match_object = FALSE, match_offset = FALSE; 859744947dcSTom Erickson dnode_t *dn; 860fa9e4066Sahrens 861744947dcSTom Erickson DB_DNODE_ENTER(db); 862744947dcSTom Erickson dn = DB_DNODE(db); 863fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 864503ad85cSMatthew Ahrens ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 865fa9e4066Sahrens ASSERT3U(dn->dn_object, ==, db->db.db_object); 866fa9e4066Sahrens 867744947dcSTom Erickson if (tx->tx_anyobj) { 868744947dcSTom Erickson DB_DNODE_EXIT(db); 869fa9e4066Sahrens return; 870744947dcSTom Erickson } 871fa9e4066Sahrens 872fa9e4066Sahrens /* XXX No checking on the meta dnode for now */ 873744947dcSTom Erickson if (db->db.db_object == DMU_META_DNODE_OBJECT) { 874744947dcSTom Erickson DB_DNODE_EXIT(db); 875fa9e4066Sahrens return; 876744947dcSTom Erickson } 877fa9e4066Sahrens 8788a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh; 8798a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 880fa9e4066Sahrens ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 8818a2f1b91Sahrens if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 882fa9e4066Sahrens match_object = TRUE; 8838a2f1b91Sahrens if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 884fa9e4066Sahrens int datablkshift = dn->dn_datablkshift ? 885fa9e4066Sahrens dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 886fa9e4066Sahrens int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 887fa9e4066Sahrens int shift = datablkshift + epbs * db->db_level; 888fa9e4066Sahrens uint64_t beginblk = shift >= 64 ? 0 : 8898a2f1b91Sahrens (txh->txh_arg1 >> shift); 890fa9e4066Sahrens uint64_t endblk = shift >= 64 ? 0 : 8918a2f1b91Sahrens ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 892fa9e4066Sahrens uint64_t blkid = db->db_blkid; 893fa9e4066Sahrens 8948a2f1b91Sahrens /* XXX txh_arg2 better not be zero... */ 895fa9e4066Sahrens 8968a2f1b91Sahrens dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 8978a2f1b91Sahrens txh->txh_type, beginblk, endblk); 898fa9e4066Sahrens 8998a2f1b91Sahrens switch (txh->txh_type) { 900fa9e4066Sahrens case THT_WRITE: 901fa9e4066Sahrens if (blkid >= beginblk && blkid <= endblk) 902fa9e4066Sahrens match_offset = TRUE; 903fa9e4066Sahrens /* 904fa9e4066Sahrens * We will let this hold work for the bonus 9050a586ceaSMark Shellenbaum * or spill buffer so that we don't need to 9060a586ceaSMark Shellenbaum * hold it when creating a new object. 907fa9e4066Sahrens */ 9080a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID || 9090a586ceaSMark Shellenbaum blkid == DMU_SPILL_BLKID) 910fa9e4066Sahrens match_offset = TRUE; 911fa9e4066Sahrens /* 912fa9e4066Sahrens * They might have to increase nlevels, 913fa9e4066Sahrens * thus dirtying the new TLIBs. Or the 914fa9e4066Sahrens * might have to change the block size, 915fa9e4066Sahrens * thus dirying the new lvl=0 blk=0. 916fa9e4066Sahrens */ 917fa9e4066Sahrens if (blkid == 0) 918fa9e4066Sahrens match_offset = TRUE; 919fa9e4066Sahrens break; 920fa9e4066Sahrens case THT_FREE: 921cdb0ab79Smaybee /* 922cdb0ab79Smaybee * We will dirty all the level 1 blocks in 923cdb0ab79Smaybee * the free range and perhaps the first and 924cdb0ab79Smaybee * last level 0 block. 925cdb0ab79Smaybee */ 926cdb0ab79Smaybee if (blkid >= beginblk && (blkid <= endblk || 927cdb0ab79Smaybee txh->txh_arg2 == DMU_OBJECT_END)) 928fa9e4066Sahrens match_offset = TRUE; 929fa9e4066Sahrens break; 9300a586ceaSMark Shellenbaum case THT_SPILL: 9310a586ceaSMark Shellenbaum if (blkid == DMU_SPILL_BLKID) 9320a586ceaSMark Shellenbaum match_offset = TRUE; 9330a586ceaSMark Shellenbaum break; 934fa9e4066Sahrens case THT_BONUS: 9350a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID) 936fa9e4066Sahrens match_offset = TRUE; 937fa9e4066Sahrens break; 938fa9e4066Sahrens case THT_ZAP: 939fa9e4066Sahrens match_offset = TRUE; 940fa9e4066Sahrens break; 941fa9e4066Sahrens case THT_NEWOBJECT: 942fa9e4066Sahrens match_object = TRUE; 943fa9e4066Sahrens break; 944fa9e4066Sahrens default: 9458a2f1b91Sahrens ASSERT(!"bad txh_type"); 946fa9e4066Sahrens } 947fa9e4066Sahrens } 948744947dcSTom Erickson if (match_object && match_offset) { 949744947dcSTom Erickson DB_DNODE_EXIT(db); 950fa9e4066Sahrens return; 951fa9e4066Sahrens } 952744947dcSTom Erickson } 953744947dcSTom Erickson DB_DNODE_EXIT(db); 954fa9e4066Sahrens panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 955fa9e4066Sahrens (u_longlong_t)db->db.db_object, db->db_level, 956fa9e4066Sahrens (u_longlong_t)db->db_blkid); 957fa9e4066Sahrens } 9589c9dc39aSek110237 #endif 959fa9e4066Sahrens 96069962b56SMatthew Ahrens /* 96169962b56SMatthew Ahrens * If we can't do 10 iops, something is wrong. Let us go ahead 96269962b56SMatthew Ahrens * and hit zfs_dirty_data_max. 96369962b56SMatthew Ahrens */ 96469962b56SMatthew Ahrens hrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 96569962b56SMatthew Ahrens int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 96669962b56SMatthew Ahrens 96769962b56SMatthew Ahrens /* 96869962b56SMatthew Ahrens * We delay transactions when we've determined that the backend storage 96969962b56SMatthew Ahrens * isn't able to accommodate the rate of incoming writes. 97069962b56SMatthew Ahrens * 97169962b56SMatthew Ahrens * If there is already a transaction waiting, we delay relative to when 97269962b56SMatthew Ahrens * that transaction finishes waiting. This way the calculated min_time 97369962b56SMatthew Ahrens * is independent of the number of threads concurrently executing 97469962b56SMatthew Ahrens * transactions. 97569962b56SMatthew Ahrens * 97669962b56SMatthew Ahrens * If we are the only waiter, wait relative to when the transaction 97769962b56SMatthew Ahrens * started, rather than the current time. This credits the transaction for 97869962b56SMatthew Ahrens * "time already served", e.g. reading indirect blocks. 97969962b56SMatthew Ahrens * 98069962b56SMatthew Ahrens * The minimum time for a transaction to take is calculated as: 98169962b56SMatthew Ahrens * min_time = scale * (dirty - min) / (max - dirty) 98269962b56SMatthew Ahrens * min_time is then capped at zfs_delay_max_ns. 98369962b56SMatthew Ahrens * 98469962b56SMatthew Ahrens * The delay has two degrees of freedom that can be adjusted via tunables. 98569962b56SMatthew Ahrens * The percentage of dirty data at which we start to delay is defined by 98669962b56SMatthew Ahrens * zfs_delay_min_dirty_percent. This should typically be at or above 98769962b56SMatthew Ahrens * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 98869962b56SMatthew Ahrens * delay after writing at full speed has failed to keep up with the incoming 98969962b56SMatthew Ahrens * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 99069962b56SMatthew Ahrens * speaking, this variable determines the amount of delay at the midpoint of 99169962b56SMatthew Ahrens * the curve. 99269962b56SMatthew Ahrens * 99369962b56SMatthew Ahrens * delay 99469962b56SMatthew Ahrens * 10ms +-------------------------------------------------------------*+ 99569962b56SMatthew Ahrens * | *| 99669962b56SMatthew Ahrens * 9ms + *+ 99769962b56SMatthew Ahrens * | *| 99869962b56SMatthew Ahrens * 8ms + *+ 99969962b56SMatthew Ahrens * | * | 100069962b56SMatthew Ahrens * 7ms + * + 100169962b56SMatthew Ahrens * | * | 100269962b56SMatthew Ahrens * 6ms + * + 100369962b56SMatthew Ahrens * | * | 100469962b56SMatthew Ahrens * 5ms + * + 100569962b56SMatthew Ahrens * | * | 100669962b56SMatthew Ahrens * 4ms + * + 100769962b56SMatthew Ahrens * | * | 100869962b56SMatthew Ahrens * 3ms + * + 100969962b56SMatthew Ahrens * | * | 101069962b56SMatthew Ahrens * 2ms + (midpoint) * + 101169962b56SMatthew Ahrens * | | ** | 101269962b56SMatthew Ahrens * 1ms + v *** + 101369962b56SMatthew Ahrens * | zfs_delay_scale ----------> ******** | 101469962b56SMatthew Ahrens * 0 +-------------------------------------*********----------------+ 101569962b56SMatthew Ahrens * 0% <- zfs_dirty_data_max -> 100% 101669962b56SMatthew Ahrens * 101769962b56SMatthew Ahrens * Note that since the delay is added to the outstanding time remaining on the 101869962b56SMatthew Ahrens * most recent transaction, the delay is effectively the inverse of IOPS. 101969962b56SMatthew Ahrens * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 102069962b56SMatthew Ahrens * was chosen such that small changes in the amount of accumulated dirty data 102169962b56SMatthew Ahrens * in the first 3/4 of the curve yield relatively small differences in the 102269962b56SMatthew Ahrens * amount of delay. 102369962b56SMatthew Ahrens * 102469962b56SMatthew Ahrens * The effects can be easier to understand when the amount of delay is 102569962b56SMatthew Ahrens * represented on a log scale: 102669962b56SMatthew Ahrens * 102769962b56SMatthew Ahrens * delay 102869962b56SMatthew Ahrens * 100ms +-------------------------------------------------------------++ 102969962b56SMatthew Ahrens * + + 103069962b56SMatthew Ahrens * | | 103169962b56SMatthew Ahrens * + *+ 103269962b56SMatthew Ahrens * 10ms + *+ 103369962b56SMatthew Ahrens * + ** + 103469962b56SMatthew Ahrens * | (midpoint) ** | 103569962b56SMatthew Ahrens * + | ** + 103669962b56SMatthew Ahrens * 1ms + v **** + 103769962b56SMatthew Ahrens * + zfs_delay_scale ----------> ***** + 103869962b56SMatthew Ahrens * | **** | 103969962b56SMatthew Ahrens * + **** + 104069962b56SMatthew Ahrens * 100us + ** + 104169962b56SMatthew Ahrens * + * + 104269962b56SMatthew Ahrens * | * | 104369962b56SMatthew Ahrens * + * + 104469962b56SMatthew Ahrens * 10us + * + 104569962b56SMatthew Ahrens * + + 104669962b56SMatthew Ahrens * | | 104769962b56SMatthew Ahrens * + + 104869962b56SMatthew Ahrens * +--------------------------------------------------------------+ 104969962b56SMatthew Ahrens * 0% <- zfs_dirty_data_max -> 100% 105069962b56SMatthew Ahrens * 105169962b56SMatthew Ahrens * Note here that only as the amount of dirty data approaches its limit does 105269962b56SMatthew Ahrens * the delay start to increase rapidly. The goal of a properly tuned system 105369962b56SMatthew Ahrens * should be to keep the amount of dirty data out of that range by first 105469962b56SMatthew Ahrens * ensuring that the appropriate limits are set for the I/O scheduler to reach 105569962b56SMatthew Ahrens * optimal throughput on the backend storage, and then by changing the value 105669962b56SMatthew Ahrens * of zfs_delay_scale to increase the steepness of the curve. 105769962b56SMatthew Ahrens */ 105869962b56SMatthew Ahrens static void 105969962b56SMatthew Ahrens dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 106069962b56SMatthew Ahrens { 106169962b56SMatthew Ahrens dsl_pool_t *dp = tx->tx_pool; 106269962b56SMatthew Ahrens uint64_t delay_min_bytes = 106369962b56SMatthew Ahrens zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 106469962b56SMatthew Ahrens hrtime_t wakeup, min_tx_time, now; 106569962b56SMatthew Ahrens 106669962b56SMatthew Ahrens if (dirty <= delay_min_bytes) 106769962b56SMatthew Ahrens return; 106869962b56SMatthew Ahrens 106969962b56SMatthew Ahrens /* 107069962b56SMatthew Ahrens * The caller has already waited until we are under the max. 107169962b56SMatthew Ahrens * We make them pass us the amount of dirty data so we don't 107269962b56SMatthew Ahrens * have to handle the case of it being >= the max, which could 107369962b56SMatthew Ahrens * cause a divide-by-zero if it's == the max. 107469962b56SMatthew Ahrens */ 107569962b56SMatthew Ahrens ASSERT3U(dirty, <, zfs_dirty_data_max); 107669962b56SMatthew Ahrens 107769962b56SMatthew Ahrens now = gethrtime(); 107869962b56SMatthew Ahrens min_tx_time = zfs_delay_scale * 107969962b56SMatthew Ahrens (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 108069962b56SMatthew Ahrens if (now > tx->tx_start + min_tx_time) 108169962b56SMatthew Ahrens return; 108269962b56SMatthew Ahrens 108369962b56SMatthew Ahrens min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 108469962b56SMatthew Ahrens 108569962b56SMatthew Ahrens DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 108669962b56SMatthew Ahrens uint64_t, min_tx_time); 108769962b56SMatthew Ahrens 108869962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 108969962b56SMatthew Ahrens wakeup = MAX(tx->tx_start + min_tx_time, 109069962b56SMatthew Ahrens dp->dp_last_wakeup + min_tx_time); 109169962b56SMatthew Ahrens dp->dp_last_wakeup = wakeup; 109269962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 109369962b56SMatthew Ahrens 109469962b56SMatthew Ahrens #ifdef _KERNEL 109569962b56SMatthew Ahrens mutex_enter(&curthread->t_delay_lock); 109669962b56SMatthew Ahrens while (cv_timedwait_hires(&curthread->t_delay_cv, 109769962b56SMatthew Ahrens &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 109869962b56SMatthew Ahrens CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 109969962b56SMatthew Ahrens continue; 110069962b56SMatthew Ahrens mutex_exit(&curthread->t_delay_lock); 110169962b56SMatthew Ahrens #else 110269962b56SMatthew Ahrens hrtime_t delta = wakeup - gethrtime(); 110369962b56SMatthew Ahrens struct timespec ts; 110469962b56SMatthew Ahrens ts.tv_sec = delta / NANOSEC; 110569962b56SMatthew Ahrens ts.tv_nsec = delta % NANOSEC; 110669962b56SMatthew Ahrens (void) nanosleep(&ts, NULL); 110769962b56SMatthew Ahrens #endif 110869962b56SMatthew Ahrens } 110969962b56SMatthew Ahrens 1110fa9e4066Sahrens static int 11113b2aab18SMatthew Ahrens dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1112fa9e4066Sahrens { 11138a2f1b91Sahrens dmu_tx_hold_t *txh; 11140a4e9518Sgw25295 spa_t *spa = tx->tx_pool->dp_spa; 1115cdb0ab79Smaybee uint64_t memory, asize, fsize, usize; 1116715614a4Smaybee uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1117fa9e4066Sahrens 1118fb09f5aaSMadhav Suresh ASSERT0(tx->tx_txg); 11190a4e9518Sgw25295 1120ea8dc4b6Seschrock if (tx->tx_err) 1121ea8dc4b6Seschrock return (tx->tx_err); 1122fa9e4066Sahrens 1123e14bb325SJeff Bonwick if (spa_suspended(spa)) { 11240a4e9518Sgw25295 /* 11250a4e9518Sgw25295 * If the user has indicated a blocking failure mode 11260a4e9518Sgw25295 * then return ERESTART which will block in dmu_tx_wait(). 11270a4e9518Sgw25295 * Otherwise, return EIO so that an error can get 11280a4e9518Sgw25295 * propagated back to the VOP calls. 11290a4e9518Sgw25295 * 11300a4e9518Sgw25295 * Note that we always honor the txg_how flag regardless 11310a4e9518Sgw25295 * of the failuremode setting. 11320a4e9518Sgw25295 */ 11330a4e9518Sgw25295 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 11340a4e9518Sgw25295 txg_how != TXG_WAIT) 1135be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 11360a4e9518Sgw25295 1137be6fd75aSMatthew Ahrens return (SET_ERROR(ERESTART)); 11380a4e9518Sgw25295 } 11390a4e9518Sgw25295 114069962b56SMatthew Ahrens if (!tx->tx_waited && 114169962b56SMatthew Ahrens dsl_pool_need_dirty_delay(tx->tx_pool)) { 114269962b56SMatthew Ahrens tx->tx_wait_dirty = B_TRUE; 114369962b56SMatthew Ahrens return (SET_ERROR(ERESTART)); 114469962b56SMatthew Ahrens } 114569962b56SMatthew Ahrens 11468a2f1b91Sahrens tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 11478a2f1b91Sahrens tx->tx_needassign_txh = NULL; 11488a2f1b91Sahrens 11498a2f1b91Sahrens /* 11508a2f1b91Sahrens * NB: No error returns are allowed after txg_hold_open, but 11518a2f1b91Sahrens * before processing the dnode holds, due to the 11528a2f1b91Sahrens * dmu_tx_unassign() logic. 11538a2f1b91Sahrens */ 11548a2f1b91Sahrens 1155715614a4Smaybee towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 11568a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh; 11578a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 11588a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1159fa9e4066Sahrens if (dn != NULL) { 1160fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 11618a2f1b91Sahrens if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1162fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 11638a2f1b91Sahrens tx->tx_needassign_txh = txh; 1164be6fd75aSMatthew Ahrens return (SET_ERROR(ERESTART)); 1165fa9e4066Sahrens } 11668a2f1b91Sahrens if (dn->dn_assigned_txg == 0) 1167fa9e4066Sahrens dn->dn_assigned_txg = tx->tx_txg; 11688a2f1b91Sahrens ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1169fa9e4066Sahrens (void) refcount_add(&dn->dn_tx_holds, tx); 1170fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1171fa9e4066Sahrens } 11728a2f1b91Sahrens towrite += txh->txh_space_towrite; 11738a2f1b91Sahrens tofree += txh->txh_space_tofree; 11748a2f1b91Sahrens tooverwrite += txh->txh_space_tooverwrite; 1175a9799022Sck153898 tounref += txh->txh_space_tounref; 1176cdb0ab79Smaybee tohold += txh->txh_memory_tohold; 1177715614a4Smaybee fudge += txh->txh_fudge; 1178ea8dc4b6Seschrock } 1179ea8dc4b6Seschrock 1180ea8dc4b6Seschrock /* 1181ea8dc4b6Seschrock * If a snapshot has been taken since we made our estimates, 1182ea8dc4b6Seschrock * assume that we won't be able to free or overwrite anything. 1183ea8dc4b6Seschrock */ 1184ea8dc4b6Seschrock if (tx->tx_objset && 1185503ad85cSMatthew Ahrens dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1186ea8dc4b6Seschrock tx->tx_lastsnap_txg) { 11878a2f1b91Sahrens towrite += tooverwrite; 11888a2f1b91Sahrens tooverwrite = tofree = 0; 1189fa9e4066Sahrens } 1190fa9e4066Sahrens 1191cdb0ab79Smaybee /* needed allocation: worst-case estimate of write space */ 1192cdb0ab79Smaybee asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1193cdb0ab79Smaybee /* freed space estimate: worst-case overwrite + free estimate */ 11948a2f1b91Sahrens fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1195cdb0ab79Smaybee /* convert unrefd space to worst-case estimate */ 1196a9799022Sck153898 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1197cdb0ab79Smaybee /* calculate memory footprint estimate */ 1198cdb0ab79Smaybee memory = towrite + tooverwrite + tohold; 11998a2f1b91Sahrens 12008a2f1b91Sahrens #ifdef ZFS_DEBUG 1201715614a4Smaybee /* 1202715614a4Smaybee * Add in 'tohold' to account for our dirty holds on this memory 1203715614a4Smaybee * XXX - the "fudge" factor is to account for skipped blocks that 1204715614a4Smaybee * we missed because dnode_next_offset() misses in-core-only blocks. 1205715614a4Smaybee */ 1206cdb0ab79Smaybee tx->tx_space_towrite = asize + 1207715614a4Smaybee spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 12088a2f1b91Sahrens tx->tx_space_tofree = tofree; 12098a2f1b91Sahrens tx->tx_space_tooverwrite = tooverwrite; 1210a9799022Sck153898 tx->tx_space_tounref = tounref; 12118a2f1b91Sahrens #endif 1212fa9e4066Sahrens 1213fa9e4066Sahrens if (tx->tx_dir && asize != 0) { 1214cdb0ab79Smaybee int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1215cdb0ab79Smaybee asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 12168a2f1b91Sahrens if (err) 1217fa9e4066Sahrens return (err); 1218fa9e4066Sahrens } 1219fa9e4066Sahrens 1220fa9e4066Sahrens return (0); 1221fa9e4066Sahrens } 1222fa9e4066Sahrens 12238a2f1b91Sahrens static void 12248a2f1b91Sahrens dmu_tx_unassign(dmu_tx_t *tx) 1225fa9e4066Sahrens { 12268a2f1b91Sahrens dmu_tx_hold_t *txh; 1227fa9e4066Sahrens 12288a2f1b91Sahrens if (tx->tx_txg == 0) 12298a2f1b91Sahrens return; 1230fa9e4066Sahrens 1231fa9e4066Sahrens txg_rele_to_quiesce(&tx->tx_txgh); 1232fa9e4066Sahrens 12333e30c24aSWill Andrews /* 12343e30c24aSWill Andrews * Walk the transaction's hold list, removing the hold on the 12353e30c24aSWill Andrews * associated dnode, and notifying waiters if the refcount drops to 0. 12363e30c24aSWill Andrews */ 12378a2f1b91Sahrens for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 12388a2f1b91Sahrens txh = list_next(&tx->tx_holds, txh)) { 12398a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1240fa9e4066Sahrens 1241fa9e4066Sahrens if (dn == NULL) 1242fa9e4066Sahrens continue; 1243fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 12448a2f1b91Sahrens ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1245fa9e4066Sahrens 1246fa9e4066Sahrens if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1247fa9e4066Sahrens dn->dn_assigned_txg = 0; 1248fa9e4066Sahrens cv_broadcast(&dn->dn_notxholds); 1249fa9e4066Sahrens } 1250fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1251fa9e4066Sahrens } 1252fa9e4066Sahrens 1253fa9e4066Sahrens txg_rele_to_sync(&tx->tx_txgh); 1254fa9e4066Sahrens 12558a2f1b91Sahrens tx->tx_lasttried_txg = tx->tx_txg; 1256fa9e4066Sahrens tx->tx_txg = 0; 1257fa9e4066Sahrens } 1258fa9e4066Sahrens 1259fa9e4066Sahrens /* 1260fa9e4066Sahrens * Assign tx to a transaction group. txg_how can be one of: 1261fa9e4066Sahrens * 1262fa9e4066Sahrens * (1) TXG_WAIT. If the current open txg is full, waits until there's 1263fa9e4066Sahrens * a new one. This should be used when you're not holding locks. 12643b2aab18SMatthew Ahrens * It will only fail if we're truly out of space (or over quota). 1265fa9e4066Sahrens * 1266fa9e4066Sahrens * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1267fa9e4066Sahrens * blocking, returns immediately with ERESTART. This should be used 1268fa9e4066Sahrens * whenever you're holding locks. On an ERESTART error, the caller 12698a2f1b91Sahrens * should drop locks, do a dmu_tx_wait(tx), and try again. 127069962b56SMatthew Ahrens * 127169962b56SMatthew Ahrens * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 127269962b56SMatthew Ahrens * has already been called on behalf of this operation (though 127369962b56SMatthew Ahrens * most likely on a different tx). 1274fa9e4066Sahrens */ 1275fa9e4066Sahrens int 12763b2aab18SMatthew Ahrens dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1277fa9e4066Sahrens { 1278fa9e4066Sahrens int err; 1279fa9e4066Sahrens 1280fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 128169962b56SMatthew Ahrens ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 128269962b56SMatthew Ahrens txg_how == TXG_WAITED); 1283fa9e4066Sahrens ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1284fa9e4066Sahrens 12853b2aab18SMatthew Ahrens /* If we might wait, we must not hold the config lock. */ 12863b2aab18SMatthew Ahrens ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 12873b2aab18SMatthew Ahrens 128869962b56SMatthew Ahrens if (txg_how == TXG_WAITED) 128969962b56SMatthew Ahrens tx->tx_waited = B_TRUE; 129069962b56SMatthew Ahrens 12918a2f1b91Sahrens while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 12928a2f1b91Sahrens dmu_tx_unassign(tx); 1293fa9e4066Sahrens 1294fa9e4066Sahrens if (err != ERESTART || txg_how != TXG_WAIT) 1295fa9e4066Sahrens return (err); 1296fa9e4066Sahrens 12978a2f1b91Sahrens dmu_tx_wait(tx); 1298fa9e4066Sahrens } 1299fa9e4066Sahrens 1300fa9e4066Sahrens txg_rele_to_quiesce(&tx->tx_txgh); 1301fa9e4066Sahrens 1302fa9e4066Sahrens return (0); 1303fa9e4066Sahrens } 1304fa9e4066Sahrens 1305fa9e4066Sahrens void 13068a2f1b91Sahrens dmu_tx_wait(dmu_tx_t *tx) 13078a2f1b91Sahrens { 13080a4e9518Sgw25295 spa_t *spa = tx->tx_pool->dp_spa; 130969962b56SMatthew Ahrens dsl_pool_t *dp = tx->tx_pool; 13108a2f1b91Sahrens 13110a4e9518Sgw25295 ASSERT(tx->tx_txg == 0); 13123b2aab18SMatthew Ahrens ASSERT(!dsl_pool_config_held(tx->tx_pool)); 13130a4e9518Sgw25295 131469962b56SMatthew Ahrens if (tx->tx_wait_dirty) { 13150a4e9518Sgw25295 /* 131669962b56SMatthew Ahrens * dmu_tx_try_assign() has determined that we need to wait 131769962b56SMatthew Ahrens * because we've consumed much or all of the dirty buffer 131869962b56SMatthew Ahrens * space. 13190a4e9518Sgw25295 */ 132069962b56SMatthew Ahrens mutex_enter(&dp->dp_lock); 132169962b56SMatthew Ahrens while (dp->dp_dirty_total >= zfs_dirty_data_max) 132269962b56SMatthew Ahrens cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 132369962b56SMatthew Ahrens uint64_t dirty = dp->dp_dirty_total; 132469962b56SMatthew Ahrens mutex_exit(&dp->dp_lock); 132569962b56SMatthew Ahrens 132669962b56SMatthew Ahrens dmu_tx_delay(tx, dirty); 132769962b56SMatthew Ahrens 132869962b56SMatthew Ahrens tx->tx_wait_dirty = B_FALSE; 132969962b56SMatthew Ahrens 133069962b56SMatthew Ahrens /* 133169962b56SMatthew Ahrens * Note: setting tx_waited only has effect if the caller 133269962b56SMatthew Ahrens * used TX_WAIT. Otherwise they are going to destroy 133369962b56SMatthew Ahrens * this tx and try again. The common case, zfs_write(), 133469962b56SMatthew Ahrens * uses TX_WAIT. 133569962b56SMatthew Ahrens */ 133669962b56SMatthew Ahrens tx->tx_waited = B_TRUE; 133769962b56SMatthew Ahrens } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 133869962b56SMatthew Ahrens /* 133969962b56SMatthew Ahrens * If the pool is suspended we need to wait until it 134069962b56SMatthew Ahrens * is resumed. Note that it's possible that the pool 134169962b56SMatthew Ahrens * has become active after this thread has tried to 134269962b56SMatthew Ahrens * obtain a tx. If that's the case then tx_lasttried_txg 134369962b56SMatthew Ahrens * would not have been set. 134469962b56SMatthew Ahrens */ 134569962b56SMatthew Ahrens txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 13460a4e9518Sgw25295 } else if (tx->tx_needassign_txh) { 134769962b56SMatthew Ahrens /* 134869962b56SMatthew Ahrens * A dnode is assigned to the quiescing txg. Wait for its 134969962b56SMatthew Ahrens * transaction to complete. 135069962b56SMatthew Ahrens */ 13518a2f1b91Sahrens dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 13528a2f1b91Sahrens 13538a2f1b91Sahrens mutex_enter(&dn->dn_mtx); 13548a2f1b91Sahrens while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 13558a2f1b91Sahrens cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 13568a2f1b91Sahrens mutex_exit(&dn->dn_mtx); 13578a2f1b91Sahrens tx->tx_needassign_txh = NULL; 13588a2f1b91Sahrens } else { 13598a2f1b91Sahrens txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 13608a2f1b91Sahrens } 13618a2f1b91Sahrens } 13628a2f1b91Sahrens 13638a2f1b91Sahrens void 1364fa9e4066Sahrens dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1365fa9e4066Sahrens { 13668a2f1b91Sahrens #ifdef ZFS_DEBUG 1367fa9e4066Sahrens if (tx->tx_dir == NULL || delta == 0) 1368fa9e4066Sahrens return; 1369fa9e4066Sahrens 1370fa9e4066Sahrens if (delta > 0) { 1371fa9e4066Sahrens ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1372fa9e4066Sahrens tx->tx_space_towrite); 1373fa9e4066Sahrens (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1374fa9e4066Sahrens } else { 1375fa9e4066Sahrens (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1376fa9e4066Sahrens } 13778a2f1b91Sahrens #endif 1378fa9e4066Sahrens } 1379fa9e4066Sahrens 1380fa9e4066Sahrens void 1381fa9e4066Sahrens dmu_tx_commit(dmu_tx_t *tx) 1382fa9e4066Sahrens { 13838a2f1b91Sahrens dmu_tx_hold_t *txh; 1384fa9e4066Sahrens 1385fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1386fa9e4066Sahrens 13873e30c24aSWill Andrews /* 13883e30c24aSWill Andrews * Go through the transaction's hold list and remove holds on 13893e30c24aSWill Andrews * associated dnodes, notifying waiters if no holds remain. 13903e30c24aSWill Andrews */ 13918a2f1b91Sahrens while (txh = list_head(&tx->tx_holds)) { 13928a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1393fa9e4066Sahrens 13948a2f1b91Sahrens list_remove(&tx->tx_holds, txh); 13958a2f1b91Sahrens kmem_free(txh, sizeof (dmu_tx_hold_t)); 1396fa9e4066Sahrens if (dn == NULL) 1397fa9e4066Sahrens continue; 1398fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1399fa9e4066Sahrens ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1400fa9e4066Sahrens 1401fa9e4066Sahrens if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1402fa9e4066Sahrens dn->dn_assigned_txg = 0; 1403fa9e4066Sahrens cv_broadcast(&dn->dn_notxholds); 1404fa9e4066Sahrens } 1405fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1406fa9e4066Sahrens dnode_rele(dn, tx); 1407fa9e4066Sahrens } 1408fa9e4066Sahrens 14098a2f1b91Sahrens if (tx->tx_tempreserve_cookie) 1410fa9e4066Sahrens dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1411fa9e4066Sahrens 1412d20e665cSRicardo M. Correia if (!list_is_empty(&tx->tx_callbacks)) 1413d20e665cSRicardo M. Correia txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1414d20e665cSRicardo M. Correia 1415fa9e4066Sahrens if (tx->tx_anyobj == FALSE) 1416fa9e4066Sahrens txg_rele_to_sync(&tx->tx_txgh); 1417d20e665cSRicardo M. Correia 1418d20e665cSRicardo M. Correia list_destroy(&tx->tx_callbacks); 14198f38d419Sek110237 list_destroy(&tx->tx_holds); 14208a2f1b91Sahrens #ifdef ZFS_DEBUG 1421fa9e4066Sahrens dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1422fa9e4066Sahrens tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1423fa9e4066Sahrens tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1424fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_written, 1425fa9e4066Sahrens refcount_count(&tx->tx_space_written)); 1426fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_freed, 1427fa9e4066Sahrens refcount_count(&tx->tx_space_freed)); 1428fa9e4066Sahrens #endif 1429fa9e4066Sahrens kmem_free(tx, sizeof (dmu_tx_t)); 1430fa9e4066Sahrens } 1431fa9e4066Sahrens 1432fa9e4066Sahrens void 1433fa9e4066Sahrens dmu_tx_abort(dmu_tx_t *tx) 1434fa9e4066Sahrens { 14358a2f1b91Sahrens dmu_tx_hold_t *txh; 1436fa9e4066Sahrens 1437fa9e4066Sahrens ASSERT(tx->tx_txg == 0); 1438fa9e4066Sahrens 14398a2f1b91Sahrens while (txh = list_head(&tx->tx_holds)) { 14408a2f1b91Sahrens dnode_t *dn = txh->txh_dnode; 1441fa9e4066Sahrens 14428a2f1b91Sahrens list_remove(&tx->tx_holds, txh); 14438a2f1b91Sahrens kmem_free(txh, sizeof (dmu_tx_hold_t)); 1444fa9e4066Sahrens if (dn != NULL) 1445fa9e4066Sahrens dnode_rele(dn, tx); 1446fa9e4066Sahrens } 1447d20e665cSRicardo M. Correia 1448d20e665cSRicardo M. Correia /* 1449d20e665cSRicardo M. Correia * Call any registered callbacks with an error code. 1450d20e665cSRicardo M. Correia */ 1451d20e665cSRicardo M. Correia if (!list_is_empty(&tx->tx_callbacks)) 1452d20e665cSRicardo M. Correia dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1453d20e665cSRicardo M. Correia 1454d20e665cSRicardo M. Correia list_destroy(&tx->tx_callbacks); 14558f38d419Sek110237 list_destroy(&tx->tx_holds); 14568a2f1b91Sahrens #ifdef ZFS_DEBUG 1457fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_written, 1458fa9e4066Sahrens refcount_count(&tx->tx_space_written)); 1459fa9e4066Sahrens refcount_destroy_many(&tx->tx_space_freed, 1460fa9e4066Sahrens refcount_count(&tx->tx_space_freed)); 1461fa9e4066Sahrens #endif 1462fa9e4066Sahrens kmem_free(tx, sizeof (dmu_tx_t)); 1463fa9e4066Sahrens } 1464fa9e4066Sahrens 1465fa9e4066Sahrens uint64_t 1466fa9e4066Sahrens dmu_tx_get_txg(dmu_tx_t *tx) 1467fa9e4066Sahrens { 1468fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1469fa9e4066Sahrens return (tx->tx_txg); 1470fa9e4066Sahrens } 1471d20e665cSRicardo M. Correia 14723b2aab18SMatthew Ahrens dsl_pool_t * 14733b2aab18SMatthew Ahrens dmu_tx_pool(dmu_tx_t *tx) 14743b2aab18SMatthew Ahrens { 14753b2aab18SMatthew Ahrens ASSERT(tx->tx_pool != NULL); 14763b2aab18SMatthew Ahrens return (tx->tx_pool); 14773b2aab18SMatthew Ahrens } 14783b2aab18SMatthew Ahrens 14793b2aab18SMatthew Ahrens 1480d20e665cSRicardo M. Correia void 1481d20e665cSRicardo M. Correia dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1482d20e665cSRicardo M. Correia { 1483d20e665cSRicardo M. Correia dmu_tx_callback_t *dcb; 1484d20e665cSRicardo M. Correia 1485d20e665cSRicardo M. Correia dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1486d20e665cSRicardo M. Correia 1487d20e665cSRicardo M. Correia dcb->dcb_func = func; 1488d20e665cSRicardo M. Correia dcb->dcb_data = data; 1489d20e665cSRicardo M. Correia 1490d20e665cSRicardo M. Correia list_insert_tail(&tx->tx_callbacks, dcb); 1491d20e665cSRicardo M. Correia } 1492d20e665cSRicardo M. Correia 1493d20e665cSRicardo M. Correia /* 1494d20e665cSRicardo M. Correia * Call all the commit callbacks on a list, with a given error code. 1495d20e665cSRicardo M. Correia */ 1496d20e665cSRicardo M. Correia void 1497d20e665cSRicardo M. Correia dmu_tx_do_callbacks(list_t *cb_list, int error) 1498d20e665cSRicardo M. Correia { 1499d20e665cSRicardo M. Correia dmu_tx_callback_t *dcb; 1500d20e665cSRicardo M. Correia 1501d20e665cSRicardo M. Correia while (dcb = list_head(cb_list)) { 1502d20e665cSRicardo M. Correia list_remove(cb_list, dcb); 1503d20e665cSRicardo M. Correia dcb->dcb_func(dcb->dcb_data, error); 1504d20e665cSRicardo M. Correia kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1505d20e665cSRicardo M. Correia } 1506d20e665cSRicardo M. Correia } 15070a586ceaSMark Shellenbaum 15080a586ceaSMark Shellenbaum /* 15090a586ceaSMark Shellenbaum * Interface to hold a bunch of attributes. 15100a586ceaSMark Shellenbaum * used for creating new files. 15110a586ceaSMark Shellenbaum * attrsize is the total size of all attributes 15120a586ceaSMark Shellenbaum * to be added during object creation 15130a586ceaSMark Shellenbaum * 15140a586ceaSMark Shellenbaum * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 15150a586ceaSMark Shellenbaum */ 15160a586ceaSMark Shellenbaum 15170a586ceaSMark Shellenbaum /* 15180a586ceaSMark Shellenbaum * hold necessary attribute name for attribute registration. 15190a586ceaSMark Shellenbaum * should be a very rare case where this is needed. If it does 15200a586ceaSMark Shellenbaum * happen it would only happen on the first write to the file system. 15210a586ceaSMark Shellenbaum */ 15220a586ceaSMark Shellenbaum static void 15230a586ceaSMark Shellenbaum dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 15240a586ceaSMark Shellenbaum { 15250a586ceaSMark Shellenbaum int i; 15260a586ceaSMark Shellenbaum 15270a586ceaSMark Shellenbaum if (!sa->sa_need_attr_registration) 15280a586ceaSMark Shellenbaum return; 15290a586ceaSMark Shellenbaum 15300a586ceaSMark Shellenbaum for (i = 0; i != sa->sa_num_attrs; i++) { 15310a586ceaSMark Shellenbaum if (!sa->sa_attr_table[i].sa_registered) { 15320a586ceaSMark Shellenbaum if (sa->sa_reg_attr_obj) 15330a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 15340a586ceaSMark Shellenbaum B_TRUE, sa->sa_attr_table[i].sa_name); 15350a586ceaSMark Shellenbaum else 15360a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 15370a586ceaSMark Shellenbaum B_TRUE, sa->sa_attr_table[i].sa_name); 15380a586ceaSMark Shellenbaum } 15390a586ceaSMark Shellenbaum } 15400a586ceaSMark Shellenbaum } 15410a586ceaSMark Shellenbaum 15420a586ceaSMark Shellenbaum 15430a586ceaSMark Shellenbaum void 15440a586ceaSMark Shellenbaum dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 15450a586ceaSMark Shellenbaum { 15460a586ceaSMark Shellenbaum dnode_t *dn; 15470a586ceaSMark Shellenbaum dmu_tx_hold_t *txh; 15480a586ceaSMark Shellenbaum 15490a586ceaSMark Shellenbaum txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 15500a586ceaSMark Shellenbaum THT_SPILL, 0, 0); 15510a586ceaSMark Shellenbaum 15520a586ceaSMark Shellenbaum dn = txh->txh_dnode; 15530a586ceaSMark Shellenbaum 15540a586ceaSMark Shellenbaum if (dn == NULL) 15550a586ceaSMark Shellenbaum return; 15560a586ceaSMark Shellenbaum 15570a586ceaSMark Shellenbaum /* If blkptr doesn't exist then add space to towrite */ 15589dccfd2aSAlbert Lee if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 1559b5152584SMatthew Ahrens txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 15600a586ceaSMark Shellenbaum } else { 15619dccfd2aSAlbert Lee blkptr_t *bp; 15629dccfd2aSAlbert Lee 15639dccfd2aSAlbert Lee bp = &dn->dn_phys->dn_spill; 15640a586ceaSMark Shellenbaum if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1565c7cd2421SGeorge Wilson bp, bp->blk_birth)) 1566b5152584SMatthew Ahrens txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE; 15670a586ceaSMark Shellenbaum else 1568b5152584SMatthew Ahrens txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 15699dccfd2aSAlbert Lee if (!BP_IS_HOLE(bp)) 1570b5152584SMatthew Ahrens txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE; 15710a586ceaSMark Shellenbaum } 15720a586ceaSMark Shellenbaum } 15730a586ceaSMark Shellenbaum 15740a586ceaSMark Shellenbaum void 15750a586ceaSMark Shellenbaum dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 15760a586ceaSMark Shellenbaum { 15770a586ceaSMark Shellenbaum sa_os_t *sa = tx->tx_objset->os_sa; 15780a586ceaSMark Shellenbaum 15790a586ceaSMark Shellenbaum dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 15800a586ceaSMark Shellenbaum 15810a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_master_obj == 0) 15820a586ceaSMark Shellenbaum return; 15830a586ceaSMark Shellenbaum 15840a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_layout_attr_obj) 15850a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 15860a586ceaSMark Shellenbaum else { 15870a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 15880a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 15890a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 15900a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 15910a586ceaSMark Shellenbaum } 15920a586ceaSMark Shellenbaum 15930a586ceaSMark Shellenbaum dmu_tx_sa_registration_hold(sa, tx); 15940a586ceaSMark Shellenbaum 15950a586ceaSMark Shellenbaum if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 15960a586ceaSMark Shellenbaum return; 15970a586ceaSMark Shellenbaum 15980a586ceaSMark Shellenbaum (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 15990a586ceaSMark Shellenbaum THT_SPILL, 0, 0); 16000a586ceaSMark Shellenbaum } 16010a586ceaSMark Shellenbaum 16020a586ceaSMark Shellenbaum /* 16030a586ceaSMark Shellenbaum * Hold SA attribute 16040a586ceaSMark Shellenbaum * 16050a586ceaSMark Shellenbaum * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 16060a586ceaSMark Shellenbaum * 16070a586ceaSMark Shellenbaum * variable_size is the total size of all variable sized attributes 16080a586ceaSMark Shellenbaum * passed to this function. It is not the total size of all 16090a586ceaSMark Shellenbaum * variable size attributes that *may* exist on this object. 16100a586ceaSMark Shellenbaum */ 16110a586ceaSMark Shellenbaum void 16120a586ceaSMark Shellenbaum dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 16130a586ceaSMark Shellenbaum { 16140a586ceaSMark Shellenbaum uint64_t object; 16150a586ceaSMark Shellenbaum sa_os_t *sa = tx->tx_objset->os_sa; 16160a586ceaSMark Shellenbaum 16170a586ceaSMark Shellenbaum ASSERT(hdl != NULL); 16180a586ceaSMark Shellenbaum 16190a586ceaSMark Shellenbaum object = sa_handle_object(hdl); 16200a586ceaSMark Shellenbaum 16210a586ceaSMark Shellenbaum dmu_tx_hold_bonus(tx, object); 16220a586ceaSMark Shellenbaum 16230a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_master_obj == 0) 16240a586ceaSMark Shellenbaum return; 16250a586ceaSMark Shellenbaum 16260a586ceaSMark Shellenbaum if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 16270a586ceaSMark Shellenbaum tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 16280a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 16290a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 16300a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 16310a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 16320a586ceaSMark Shellenbaum } 16330a586ceaSMark Shellenbaum 16340a586ceaSMark Shellenbaum dmu_tx_sa_registration_hold(sa, tx); 16350a586ceaSMark Shellenbaum 16360a586ceaSMark Shellenbaum if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 16370a586ceaSMark Shellenbaum dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 16380a586ceaSMark Shellenbaum 1639744947dcSTom Erickson if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 16400a586ceaSMark Shellenbaum ASSERT(tx->tx_txg == 0); 16410a586ceaSMark Shellenbaum dmu_tx_hold_spill(tx, object); 1642744947dcSTom Erickson } else { 1643744947dcSTom Erickson dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1644744947dcSTom Erickson dnode_t *dn; 1645744947dcSTom Erickson 1646744947dcSTom Erickson DB_DNODE_ENTER(db); 1647744947dcSTom Erickson dn = DB_DNODE(db); 1648744947dcSTom Erickson if (dn->dn_have_spill) { 1649744947dcSTom Erickson ASSERT(tx->tx_txg == 0); 1650744947dcSTom Erickson dmu_tx_hold_spill(tx, object); 1651744947dcSTom Erickson } 1652744947dcSTom Erickson DB_DNODE_EXIT(db); 16530a586ceaSMark Shellenbaum } 16540a586ceaSMark Shellenbaum } 1655