1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5f65e61c0Sahrens * Common Development and Distribution License (the "License"). 6f65e61c0Sahrens * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2206e0070dSMark Shellenbaum * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 233f2366c2SGordon Ross * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 2494c2d0ebSMatthew Ahrens * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 25aad02571SSaso Kiselkov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26810e43b2SBill Pijewski * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 29fa9e4066Sahrens */ 30fa9e4066Sahrens 31fa9e4066Sahrens #include <sys/zfs_context.h> 32fa9e4066Sahrens #include <sys/dmu.h> 332f3d8780SMatthew Ahrens #include <sys/dmu_send.h> 34fa9e4066Sahrens #include <sys/dmu_impl.h> 35fa9e4066Sahrens #include <sys/dbuf.h> 36fa9e4066Sahrens #include <sys/dmu_objset.h> 37fa9e4066Sahrens #include <sys/dsl_dataset.h> 38fa9e4066Sahrens #include <sys/dsl_dir.h> 39fa9e4066Sahrens #include <sys/dmu_tx.h> 40fa9e4066Sahrens #include <sys/spa.h> 41fa9e4066Sahrens #include <sys/zio.h> 42fa9e4066Sahrens #include <sys/dmu_zfetch.h> 430a586ceaSMark Shellenbaum #include <sys/sa.h> 440a586ceaSMark Shellenbaum #include <sys/sa_impl.h> 455d7b4d43SMatthew Ahrens #include <sys/zfeature.h> 465d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 47bf16b11eSMatthew Ahrens #include <sys/range_tree.h> 48dcbf3bd6SGeorge Wilson #include <sys/callb.h> 49770499e1SDan Kimmel #include <sys/abd.h> 505cabbc6bSPrashanth Sreenivasa #include <sys/vdev.h> 51*3a2d8a1bSPaul Dagnelie #include <sys/cityhash.h> 52dcbf3bd6SGeorge Wilson 53dcbf3bd6SGeorge Wilson uint_t zfs_dbuf_evict_key; 54fa9e4066Sahrens 553b2aab18SMatthew Ahrens static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 56088f3894Sahrens static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 57fa9e4066Sahrens 58bc9014e6SJustin Gibbs #ifndef __lint 59bc9014e6SJustin Gibbs extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 6040510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_sync, 6140510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_async, 6240510e8eSJosef 'Jeff' Sipek dmu_buf_t **clear_on_evict_dbufp); 63bc9014e6SJustin Gibbs #endif /* ! __lint */ 64bc9014e6SJustin Gibbs 65fa9e4066Sahrens /* 66fa9e4066Sahrens * Global data structures and functions for the dbuf cache. 67fa9e4066Sahrens */ 68dcbf3bd6SGeorge Wilson static kmem_cache_t *dbuf_kmem_cache; 69bc9014e6SJustin Gibbs static taskq_t *dbu_evict_taskq; 70fa9e4066Sahrens 71dcbf3bd6SGeorge Wilson static kthread_t *dbuf_cache_evict_thread; 72dcbf3bd6SGeorge Wilson static kmutex_t dbuf_evict_lock; 73dcbf3bd6SGeorge Wilson static kcondvar_t dbuf_evict_cv; 74dcbf3bd6SGeorge Wilson static boolean_t dbuf_evict_thread_exit; 75dcbf3bd6SGeorge Wilson 76dcbf3bd6SGeorge Wilson /* 77dcbf3bd6SGeorge Wilson * LRU cache of dbufs. The dbuf cache maintains a list of dbufs that 78dcbf3bd6SGeorge Wilson * are not currently held but have been recently released. These dbufs 79dcbf3bd6SGeorge Wilson * are not eligible for arc eviction until they are aged out of the cache. 80dcbf3bd6SGeorge Wilson * Dbufs are added to the dbuf cache once the last hold is released. If a 81dcbf3bd6SGeorge Wilson * dbuf is later accessed and still exists in the dbuf cache, then it will 82dcbf3bd6SGeorge Wilson * be removed from the cache and later re-added to the head of the cache. 83dcbf3bd6SGeorge Wilson * Dbufs that are aged out of the cache will be immediately destroyed and 84dcbf3bd6SGeorge Wilson * become eligible for arc eviction. 85dcbf3bd6SGeorge Wilson */ 8694c2d0ebSMatthew Ahrens static multilist_t *dbuf_cache; 87dcbf3bd6SGeorge Wilson static refcount_t dbuf_cache_size; 88dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_max_bytes = 100 * 1024 * 1024; 89dcbf3bd6SGeorge Wilson 90dcbf3bd6SGeorge Wilson /* Cap the size of the dbuf cache to log2 fraction of arc size. */ 91dcbf3bd6SGeorge Wilson int dbuf_cache_max_shift = 5; 92dcbf3bd6SGeorge Wilson 93dcbf3bd6SGeorge Wilson /* 94dcbf3bd6SGeorge Wilson * The dbuf cache uses a three-stage eviction policy: 95dcbf3bd6SGeorge Wilson * - A low water marker designates when the dbuf eviction thread 96dcbf3bd6SGeorge Wilson * should stop evicting from the dbuf cache. 97dcbf3bd6SGeorge Wilson * - When we reach the maximum size (aka mid water mark), we 98dcbf3bd6SGeorge Wilson * signal the eviction thread to run. 99dcbf3bd6SGeorge Wilson * - The high water mark indicates when the eviction thread 100dcbf3bd6SGeorge Wilson * is unable to keep up with the incoming load and eviction must 101dcbf3bd6SGeorge Wilson * happen in the context of the calling thread. 102dcbf3bd6SGeorge Wilson * 103dcbf3bd6SGeorge Wilson * The dbuf cache: 104dcbf3bd6SGeorge Wilson * (max size) 105dcbf3bd6SGeorge Wilson * low water mid water hi water 106dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 107dcbf3bd6SGeorge Wilson * | | | | 108dcbf3bd6SGeorge Wilson * | | | | 109dcbf3bd6SGeorge Wilson * | | | | 110dcbf3bd6SGeorge Wilson * | | | | 111dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 112dcbf3bd6SGeorge Wilson * stop signal evict 113dcbf3bd6SGeorge Wilson * evicting eviction directly 114dcbf3bd6SGeorge Wilson * thread 115dcbf3bd6SGeorge Wilson * 116dcbf3bd6SGeorge Wilson * The high and low water marks indicate the operating range for the eviction 117dcbf3bd6SGeorge Wilson * thread. The low water mark is, by default, 90% of the total size of the 118dcbf3bd6SGeorge Wilson * cache and the high water mark is at 110% (both of these percentages can be 119dcbf3bd6SGeorge Wilson * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 120dcbf3bd6SGeorge Wilson * respectively). The eviction thread will try to ensure that the cache remains 121dcbf3bd6SGeorge Wilson * within this range by waking up every second and checking if the cache is 122dcbf3bd6SGeorge Wilson * above the low water mark. The thread can also be woken up by callers adding 123dcbf3bd6SGeorge Wilson * elements into the cache if the cache is larger than the mid water (i.e max 124dcbf3bd6SGeorge Wilson * cache size). Once the eviction thread is woken up and eviction is required, 125dcbf3bd6SGeorge Wilson * it will continue evicting buffers until it's able to reduce the cache size 126dcbf3bd6SGeorge Wilson * to the low water mark. If the cache size continues to grow and hits the high 127dcbf3bd6SGeorge Wilson * water mark, then callers adding elments to the cache will begin to evict 128dcbf3bd6SGeorge Wilson * directly from the cache until the cache is no longer above the high water 129dcbf3bd6SGeorge Wilson * mark. 130dcbf3bd6SGeorge Wilson */ 131dcbf3bd6SGeorge Wilson 132dcbf3bd6SGeorge Wilson /* 133dcbf3bd6SGeorge Wilson * The percentage above and below the maximum cache size. 134dcbf3bd6SGeorge Wilson */ 135dcbf3bd6SGeorge Wilson uint_t dbuf_cache_hiwater_pct = 10; 136dcbf3bd6SGeorge Wilson uint_t dbuf_cache_lowater_pct = 10; 137dcbf3bd6SGeorge Wilson 138fa9e4066Sahrens /* ARGSUSED */ 139fa9e4066Sahrens static int 140fa9e4066Sahrens dbuf_cons(void *vdb, void *unused, int kmflag) 141fa9e4066Sahrens { 142fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 143fa9e4066Sahrens bzero(db, sizeof (dmu_buf_impl_t)); 144fa9e4066Sahrens 145fa9e4066Sahrens mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 146fa9e4066Sahrens cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 147dcbf3bd6SGeorge Wilson multilist_link_init(&db->db_cache_link); 148fa9e4066Sahrens refcount_create(&db->db_holds); 1490f6d88adSAlex Reece 150fa9e4066Sahrens return (0); 151fa9e4066Sahrens } 152fa9e4066Sahrens 153fa9e4066Sahrens /* ARGSUSED */ 154fa9e4066Sahrens static void 155fa9e4066Sahrens dbuf_dest(void *vdb, void *unused) 156fa9e4066Sahrens { 157fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 158fa9e4066Sahrens mutex_destroy(&db->db_mtx); 159fa9e4066Sahrens cv_destroy(&db->db_changed); 160dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 161fa9e4066Sahrens refcount_destroy(&db->db_holds); 162fa9e4066Sahrens } 163fa9e4066Sahrens 164fa9e4066Sahrens /* 165fa9e4066Sahrens * dbuf hash table routines 166fa9e4066Sahrens */ 167fa9e4066Sahrens static dbuf_hash_table_t dbuf_hash_table; 168fa9e4066Sahrens 169fa9e4066Sahrens static uint64_t dbuf_hash_count; 170fa9e4066Sahrens 171*3a2d8a1bSPaul Dagnelie /* 172*3a2d8a1bSPaul Dagnelie * We use Cityhash for this. It's fast, and has good hash properties without 173*3a2d8a1bSPaul Dagnelie * requiring any large static buffers. 174*3a2d8a1bSPaul Dagnelie */ 175fa9e4066Sahrens static uint64_t 176fa9e4066Sahrens dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 177fa9e4066Sahrens { 178*3a2d8a1bSPaul Dagnelie return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 179fa9e4066Sahrens } 180fa9e4066Sahrens 181fa9e4066Sahrens #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 182fa9e4066Sahrens ((dbuf)->db.db_object == (obj) && \ 183fa9e4066Sahrens (dbuf)->db_objset == (os) && \ 184fa9e4066Sahrens (dbuf)->db_level == (level) && \ 185fa9e4066Sahrens (dbuf)->db_blkid == (blkid)) 186fa9e4066Sahrens 187fa9e4066Sahrens dmu_buf_impl_t * 188e57a022bSJustin T. Gibbs dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 189fa9e4066Sahrens { 190fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 191dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 192fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 193fa9e4066Sahrens dmu_buf_impl_t *db; 194fa9e4066Sahrens 195fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 196fa9e4066Sahrens for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 197fa9e4066Sahrens if (DBUF_EQUAL(db, os, obj, level, blkid)) { 198fa9e4066Sahrens mutex_enter(&db->db_mtx); 199ea8dc4b6Seschrock if (db->db_state != DB_EVICTING) { 200fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 201fa9e4066Sahrens return (db); 202fa9e4066Sahrens } 203fa9e4066Sahrens mutex_exit(&db->db_mtx); 204fa9e4066Sahrens } 205fa9e4066Sahrens } 206fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 207fa9e4066Sahrens return (NULL); 208fa9e4066Sahrens } 209fa9e4066Sahrens 210e57a022bSJustin T. Gibbs static dmu_buf_impl_t * 211e57a022bSJustin T. Gibbs dbuf_find_bonus(objset_t *os, uint64_t object) 212e57a022bSJustin T. Gibbs { 213e57a022bSJustin T. Gibbs dnode_t *dn; 214e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = NULL; 215e57a022bSJustin T. Gibbs 216e57a022bSJustin T. Gibbs if (dnode_hold(os, object, FTAG, &dn) == 0) { 217e57a022bSJustin T. Gibbs rw_enter(&dn->dn_struct_rwlock, RW_READER); 218e57a022bSJustin T. Gibbs if (dn->dn_bonus != NULL) { 219e57a022bSJustin T. Gibbs db = dn->dn_bonus; 220e57a022bSJustin T. Gibbs mutex_enter(&db->db_mtx); 221e57a022bSJustin T. Gibbs } 222e57a022bSJustin T. Gibbs rw_exit(&dn->dn_struct_rwlock); 223e57a022bSJustin T. Gibbs dnode_rele(dn, FTAG); 224e57a022bSJustin T. Gibbs } 225e57a022bSJustin T. Gibbs return (db); 226e57a022bSJustin T. Gibbs } 227e57a022bSJustin T. Gibbs 228fa9e4066Sahrens /* 229fa9e4066Sahrens * Insert an entry into the hash table. If there is already an element 230fa9e4066Sahrens * equal to elem in the hash table, then the already existing element 231fa9e4066Sahrens * will be returned and the new element will not be inserted. 232fa9e4066Sahrens * Otherwise returns NULL. 233fa9e4066Sahrens */ 234fa9e4066Sahrens static dmu_buf_impl_t * 235fa9e4066Sahrens dbuf_hash_insert(dmu_buf_impl_t *db) 236fa9e4066Sahrens { 237fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 238503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 239fa9e4066Sahrens uint64_t obj = db->db.db_object; 240fa9e4066Sahrens int level = db->db_level; 241fa9e4066Sahrens uint64_t blkid = db->db_blkid; 242dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 243fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 244fa9e4066Sahrens dmu_buf_impl_t *dbf; 245fa9e4066Sahrens 246fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 247fa9e4066Sahrens for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 248fa9e4066Sahrens if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 249fa9e4066Sahrens mutex_enter(&dbf->db_mtx); 250ea8dc4b6Seschrock if (dbf->db_state != DB_EVICTING) { 251fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 252fa9e4066Sahrens return (dbf); 253fa9e4066Sahrens } 254fa9e4066Sahrens mutex_exit(&dbf->db_mtx); 255fa9e4066Sahrens } 256fa9e4066Sahrens } 257fa9e4066Sahrens 258fa9e4066Sahrens mutex_enter(&db->db_mtx); 259fa9e4066Sahrens db->db_hash_next = h->hash_table[idx]; 260fa9e4066Sahrens h->hash_table[idx] = db; 261fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 2621a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&dbuf_hash_count); 263fa9e4066Sahrens 264fa9e4066Sahrens return (NULL); 265fa9e4066Sahrens } 266fa9e4066Sahrens 267fa9e4066Sahrens /* 268bbfa8ea8SMatthew Ahrens * Remove an entry from the hash table. It must be in the EVICTING state. 269fa9e4066Sahrens */ 270fa9e4066Sahrens static void 271fa9e4066Sahrens dbuf_hash_remove(dmu_buf_impl_t *db) 272fa9e4066Sahrens { 273fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 274dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 275fa9e4066Sahrens db->db_level, db->db_blkid); 276fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 277fa9e4066Sahrens dmu_buf_impl_t *dbf, **dbp; 278fa9e4066Sahrens 279fa9e4066Sahrens /* 280bbfa8ea8SMatthew Ahrens * We musn't hold db_mtx to maintain lock ordering: 281fa9e4066Sahrens * DBUF_HASH_MUTEX > db_mtx. 282fa9e4066Sahrens */ 283fa9e4066Sahrens ASSERT(refcount_is_zero(&db->db_holds)); 284ea8dc4b6Seschrock ASSERT(db->db_state == DB_EVICTING); 285fa9e4066Sahrens ASSERT(!MUTEX_HELD(&db->db_mtx)); 286fa9e4066Sahrens 287fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 288fa9e4066Sahrens dbp = &h->hash_table[idx]; 289fa9e4066Sahrens while ((dbf = *dbp) != db) { 290fa9e4066Sahrens dbp = &dbf->db_hash_next; 291fa9e4066Sahrens ASSERT(dbf != NULL); 292fa9e4066Sahrens } 293fa9e4066Sahrens *dbp = db->db_hash_next; 294fa9e4066Sahrens db->db_hash_next = NULL; 295fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 2961a5e258fSJosef 'Jeff' Sipek atomic_dec_64(&dbuf_hash_count); 297fa9e4066Sahrens } 298fa9e4066Sahrens 299bc9014e6SJustin Gibbs typedef enum { 300bc9014e6SJustin Gibbs DBVU_EVICTING, 301bc9014e6SJustin Gibbs DBVU_NOT_EVICTING 302bc9014e6SJustin Gibbs } dbvu_verify_type_t; 303bc9014e6SJustin Gibbs 304bc9014e6SJustin Gibbs static void 305bc9014e6SJustin Gibbs dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 306bc9014e6SJustin Gibbs { 307bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 308bc9014e6SJustin Gibbs int64_t holds; 309bc9014e6SJustin Gibbs 310bc9014e6SJustin Gibbs if (db->db_user == NULL) 311bc9014e6SJustin Gibbs return; 312bc9014e6SJustin Gibbs 313bc9014e6SJustin Gibbs /* Only data blocks support the attachment of user data. */ 314bc9014e6SJustin Gibbs ASSERT(db->db_level == 0); 315bc9014e6SJustin Gibbs 316bc9014e6SJustin Gibbs /* Clients must resolve a dbuf before attaching user data. */ 317bc9014e6SJustin Gibbs ASSERT(db->db.db_data != NULL); 318bc9014e6SJustin Gibbs ASSERT3U(db->db_state, ==, DB_CACHED); 319bc9014e6SJustin Gibbs 320bc9014e6SJustin Gibbs holds = refcount_count(&db->db_holds); 321bc9014e6SJustin Gibbs if (verify_type == DBVU_EVICTING) { 322bc9014e6SJustin Gibbs /* 323bc9014e6SJustin Gibbs * Immediate eviction occurs when holds == dirtycnt. 324bc9014e6SJustin Gibbs * For normal eviction buffers, holds is zero on 325bc9014e6SJustin Gibbs * eviction, except when dbuf_fix_old_data() calls 326bc9014e6SJustin Gibbs * dbuf_clear_data(). However, the hold count can grow 327bc9014e6SJustin Gibbs * during eviction even though db_mtx is held (see 328bc9014e6SJustin Gibbs * dmu_bonus_hold() for an example), so we can only 329bc9014e6SJustin Gibbs * test the generic invariant that holds >= dirtycnt. 330bc9014e6SJustin Gibbs */ 331bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 332bc9014e6SJustin Gibbs } else { 333d2058105SJustin T. Gibbs if (db->db_user_immediate_evict == TRUE) 334bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 335bc9014e6SJustin Gibbs else 336bc9014e6SJustin Gibbs ASSERT3U(holds, >, 0); 337bc9014e6SJustin Gibbs } 338bc9014e6SJustin Gibbs #endif 339bc9014e6SJustin Gibbs } 340bc9014e6SJustin Gibbs 341fa9e4066Sahrens static void 342fa9e4066Sahrens dbuf_evict_user(dmu_buf_impl_t *db) 343fa9e4066Sahrens { 344bc9014e6SJustin Gibbs dmu_buf_user_t *dbu = db->db_user; 345bc9014e6SJustin Gibbs 346fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 347fa9e4066Sahrens 348bc9014e6SJustin Gibbs if (dbu == NULL) 349fa9e4066Sahrens return; 350fa9e4066Sahrens 351bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_EVICTING); 352bc9014e6SJustin Gibbs db->db_user = NULL; 353bc9014e6SJustin Gibbs 354bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 355bc9014e6SJustin Gibbs if (dbu->dbu_clear_on_evict_dbufp != NULL) 356bc9014e6SJustin Gibbs *dbu->dbu_clear_on_evict_dbufp = NULL; 357bc9014e6SJustin Gibbs #endif 358bc9014e6SJustin Gibbs 359bc9014e6SJustin Gibbs /* 36040510e8eSJosef 'Jeff' Sipek * There are two eviction callbacks - one that we call synchronously 36140510e8eSJosef 'Jeff' Sipek * and one that we invoke via a taskq. The async one is useful for 36240510e8eSJosef 'Jeff' Sipek * avoiding lock order reversals and limiting stack depth. 36340510e8eSJosef 'Jeff' Sipek * 36440510e8eSJosef 'Jeff' Sipek * Note that if we have a sync callback but no async callback, 36540510e8eSJosef 'Jeff' Sipek * it's likely that the sync callback will free the structure 36640510e8eSJosef 'Jeff' Sipek * containing the dbu. In that case we need to take care to not 36740510e8eSJosef 'Jeff' Sipek * dereference dbu after calling the sync evict func. 368bc9014e6SJustin Gibbs */ 36940510e8eSJosef 'Jeff' Sipek boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 37040510e8eSJosef 'Jeff' Sipek 37140510e8eSJosef 'Jeff' Sipek if (dbu->dbu_evict_func_sync != NULL) 37240510e8eSJosef 'Jeff' Sipek dbu->dbu_evict_func_sync(dbu); 37340510e8eSJosef 'Jeff' Sipek 37440510e8eSJosef 'Jeff' Sipek if (has_async) { 37540510e8eSJosef 'Jeff' Sipek taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 37640510e8eSJosef 'Jeff' Sipek dbu, 0, &dbu->dbu_tqent); 37740510e8eSJosef 'Jeff' Sipek } 378fa9e4066Sahrens } 379fa9e4066Sahrens 380744947dcSTom Erickson boolean_t 381744947dcSTom Erickson dbuf_is_metadata(dmu_buf_impl_t *db) 382744947dcSTom Erickson { 383744947dcSTom Erickson if (db->db_level > 0) { 384744947dcSTom Erickson return (B_TRUE); 385744947dcSTom Erickson } else { 386744947dcSTom Erickson boolean_t is_metadata; 387744947dcSTom Erickson 388744947dcSTom Erickson DB_DNODE_ENTER(db); 389ad135b5dSChristopher Siden is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 390744947dcSTom Erickson DB_DNODE_EXIT(db); 391744947dcSTom Erickson 392744947dcSTom Erickson return (is_metadata); 393744947dcSTom Erickson } 394744947dcSTom Erickson } 395744947dcSTom Erickson 396dcbf3bd6SGeorge Wilson /* 397dcbf3bd6SGeorge Wilson * This function *must* return indices evenly distributed between all 398dcbf3bd6SGeorge Wilson * sublists of the multilist. This is needed due to how the dbuf eviction 399dcbf3bd6SGeorge Wilson * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 400dcbf3bd6SGeorge Wilson * distributed between all sublists and uses this assumption when 401dcbf3bd6SGeorge Wilson * deciding which sublist to evict from and how much to evict from it. 402dcbf3bd6SGeorge Wilson */ 403dcbf3bd6SGeorge Wilson unsigned int 404dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 405ea8dc4b6Seschrock { 406dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = obj; 407ea8dc4b6Seschrock 408dcbf3bd6SGeorge Wilson /* 409dcbf3bd6SGeorge Wilson * The assumption here, is the hash value for a given 410dcbf3bd6SGeorge Wilson * dmu_buf_impl_t will remain constant throughout it's lifetime 411dcbf3bd6SGeorge Wilson * (i.e. it's objset, object, level and blkid fields don't change). 412dcbf3bd6SGeorge Wilson * Thus, we don't need to store the dbuf's sublist index 413dcbf3bd6SGeorge Wilson * on insertion, as this index can be recalculated on removal. 414dcbf3bd6SGeorge Wilson * 415dcbf3bd6SGeorge Wilson * Also, the low order bits of the hash value are thought to be 416dcbf3bd6SGeorge Wilson * distributed evenly. Otherwise, in the case that the multilist 417dcbf3bd6SGeorge Wilson * has a power of two number of sublists, each sublists' usage 418dcbf3bd6SGeorge Wilson * would not be evenly distributed. 419dcbf3bd6SGeorge Wilson */ 420dcbf3bd6SGeorge Wilson return (dbuf_hash(db->db_objset, db->db.db_object, 421dcbf3bd6SGeorge Wilson db->db_level, db->db_blkid) % 422dcbf3bd6SGeorge Wilson multilist_get_num_sublists(ml)); 423dcbf3bd6SGeorge Wilson } 424dcbf3bd6SGeorge Wilson 425dcbf3bd6SGeorge Wilson static inline boolean_t 426dcbf3bd6SGeorge Wilson dbuf_cache_above_hiwater(void) 427dcbf3bd6SGeorge Wilson { 428dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_hiwater_bytes = 429dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 430dcbf3bd6SGeorge Wilson 431dcbf3bd6SGeorge Wilson return (refcount_count(&dbuf_cache_size) > 432dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 433dcbf3bd6SGeorge Wilson } 434dcbf3bd6SGeorge Wilson 435dcbf3bd6SGeorge Wilson static inline boolean_t 436dcbf3bd6SGeorge Wilson dbuf_cache_above_lowater(void) 437dcbf3bd6SGeorge Wilson { 438dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_lowater_bytes = 439dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 440dcbf3bd6SGeorge Wilson 441dcbf3bd6SGeorge Wilson return (refcount_count(&dbuf_cache_size) > 442dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 443dcbf3bd6SGeorge Wilson } 444dcbf3bd6SGeorge Wilson 445dcbf3bd6SGeorge Wilson /* 446dcbf3bd6SGeorge Wilson * Evict the oldest eligible dbuf from the dbuf cache. 447dcbf3bd6SGeorge Wilson */ 448dcbf3bd6SGeorge Wilson static void 449dcbf3bd6SGeorge Wilson dbuf_evict_one(void) 450dcbf3bd6SGeorge Wilson { 45194c2d0ebSMatthew Ahrens int idx = multilist_get_random_index(dbuf_cache); 45294c2d0ebSMatthew Ahrens multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx); 453dcbf3bd6SGeorge Wilson 454dcbf3bd6SGeorge Wilson ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 455dcbf3bd6SGeorge Wilson 456dcbf3bd6SGeorge Wilson /* 457dcbf3bd6SGeorge Wilson * Set the thread's tsd to indicate that it's processing evictions. 458dcbf3bd6SGeorge Wilson * Once a thread stops evicting from the dbuf cache it will 459dcbf3bd6SGeorge Wilson * reset its tsd to NULL. 460dcbf3bd6SGeorge Wilson */ 461dcbf3bd6SGeorge Wilson ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); 462dcbf3bd6SGeorge Wilson (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); 463dcbf3bd6SGeorge Wilson 464dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = multilist_sublist_tail(mls); 465dcbf3bd6SGeorge Wilson while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 466dcbf3bd6SGeorge Wilson db = multilist_sublist_prev(mls, db); 467dcbf3bd6SGeorge Wilson } 468dcbf3bd6SGeorge Wilson 469dcbf3bd6SGeorge Wilson DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 470dcbf3bd6SGeorge Wilson multilist_sublist_t *, mls); 471dcbf3bd6SGeorge Wilson 472dcbf3bd6SGeorge Wilson if (db != NULL) { 473dcbf3bd6SGeorge Wilson multilist_sublist_remove(mls, db); 474dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 475dcbf3bd6SGeorge Wilson (void) refcount_remove_many(&dbuf_cache_size, 476dcbf3bd6SGeorge Wilson db->db.db_size, db); 477ea8dc4b6Seschrock dbuf_destroy(db); 478dcbf3bd6SGeorge Wilson } else { 479dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 480dcbf3bd6SGeorge Wilson } 481dcbf3bd6SGeorge Wilson (void) tsd_set(zfs_dbuf_evict_key, NULL); 482dcbf3bd6SGeorge Wilson } 483dcbf3bd6SGeorge Wilson 484dcbf3bd6SGeorge Wilson /* 485dcbf3bd6SGeorge Wilson * The dbuf evict thread is responsible for aging out dbufs from the 486dcbf3bd6SGeorge Wilson * cache. Once the cache has reached it's maximum size, dbufs are removed 487dcbf3bd6SGeorge Wilson * and destroyed. The eviction thread will continue running until the size 488dcbf3bd6SGeorge Wilson * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 489dcbf3bd6SGeorge Wilson * out of the cache it is destroyed and becomes eligible for arc eviction. 490dcbf3bd6SGeorge Wilson */ 4913f7978d0SAlan Somers /* ARGSUSED */ 492dcbf3bd6SGeorge Wilson static void 4933f7978d0SAlan Somers dbuf_evict_thread(void *unused) 494dcbf3bd6SGeorge Wilson { 495dcbf3bd6SGeorge Wilson callb_cpr_t cpr; 496dcbf3bd6SGeorge Wilson 497dcbf3bd6SGeorge Wilson CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 498dcbf3bd6SGeorge Wilson 499dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 500dcbf3bd6SGeorge Wilson while (!dbuf_evict_thread_exit) { 501dcbf3bd6SGeorge Wilson while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 502dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_BEGIN(&cpr); 503dcbf3bd6SGeorge Wilson (void) cv_timedwait_hires(&dbuf_evict_cv, 504dcbf3bd6SGeorge Wilson &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 505dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 506dcbf3bd6SGeorge Wilson } 507dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 508dcbf3bd6SGeorge Wilson 509dcbf3bd6SGeorge Wilson /* 510dcbf3bd6SGeorge Wilson * Keep evicting as long as we're above the low water mark 511dcbf3bd6SGeorge Wilson * for the cache. We do this without holding the locks to 512dcbf3bd6SGeorge Wilson * minimize lock contention. 513dcbf3bd6SGeorge Wilson */ 514dcbf3bd6SGeorge Wilson while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 515dcbf3bd6SGeorge Wilson dbuf_evict_one(); 516dcbf3bd6SGeorge Wilson } 517dcbf3bd6SGeorge Wilson 518dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 519dcbf3bd6SGeorge Wilson } 520dcbf3bd6SGeorge Wilson 521dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 522dcbf3bd6SGeorge Wilson cv_broadcast(&dbuf_evict_cv); 523dcbf3bd6SGeorge Wilson CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 524dcbf3bd6SGeorge Wilson thread_exit(); 525dcbf3bd6SGeorge Wilson } 526dcbf3bd6SGeorge Wilson 527dcbf3bd6SGeorge Wilson /* 528dcbf3bd6SGeorge Wilson * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 529dcbf3bd6SGeorge Wilson * If the dbuf cache is at its high water mark, then evict a dbuf from the 530dcbf3bd6SGeorge Wilson * dbuf cache using the callers context. 531dcbf3bd6SGeorge Wilson */ 532dcbf3bd6SGeorge Wilson static void 533dcbf3bd6SGeorge Wilson dbuf_evict_notify(void) 534dcbf3bd6SGeorge Wilson { 535dcbf3bd6SGeorge Wilson 536dcbf3bd6SGeorge Wilson /* 537dcbf3bd6SGeorge Wilson * We use thread specific data to track when a thread has 538dcbf3bd6SGeorge Wilson * started processing evictions. This allows us to avoid deeply 539dcbf3bd6SGeorge Wilson * nested stacks that would have a call flow similar to this: 540dcbf3bd6SGeorge Wilson * 541dcbf3bd6SGeorge Wilson * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 542dcbf3bd6SGeorge Wilson * ^ | 543dcbf3bd6SGeorge Wilson * | | 544dcbf3bd6SGeorge Wilson * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 545dcbf3bd6SGeorge Wilson * 546dcbf3bd6SGeorge Wilson * The dbuf_eviction_thread will always have its tsd set until 547dcbf3bd6SGeorge Wilson * that thread exits. All other threads will only set their tsd 548dcbf3bd6SGeorge Wilson * if they are participating in the eviction process. This only 549dcbf3bd6SGeorge Wilson * happens if the eviction thread is unable to process evictions 550dcbf3bd6SGeorge Wilson * fast enough. To keep the dbuf cache size in check, other threads 551dcbf3bd6SGeorge Wilson * can evict from the dbuf cache directly. Those threads will set 552dcbf3bd6SGeorge Wilson * their tsd values so that we ensure that they only evict one dbuf 553dcbf3bd6SGeorge Wilson * from the dbuf cache. 554dcbf3bd6SGeorge Wilson */ 555dcbf3bd6SGeorge Wilson if (tsd_get(zfs_dbuf_evict_key) != NULL) 556dcbf3bd6SGeorge Wilson return; 557dcbf3bd6SGeorge Wilson 558dbfd9f93SMatthew Ahrens /* 559dbfd9f93SMatthew Ahrens * We check if we should evict without holding the dbuf_evict_lock, 560dbfd9f93SMatthew Ahrens * because it's OK to occasionally make the wrong decision here, 561dbfd9f93SMatthew Ahrens * and grabbing the lock results in massive lock contention. 562dbfd9f93SMatthew Ahrens */ 563dcbf3bd6SGeorge Wilson if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) { 564dbfd9f93SMatthew Ahrens if (dbuf_cache_above_hiwater()) 565dcbf3bd6SGeorge Wilson dbuf_evict_one(); 566dbfd9f93SMatthew Ahrens cv_signal(&dbuf_evict_cv); 567dcbf3bd6SGeorge Wilson } 568ea8dc4b6Seschrock } 569ea8dc4b6Seschrock 570ea8dc4b6Seschrock void 571fa9e4066Sahrens dbuf_init(void) 572fa9e4066Sahrens { 573ea8dc4b6Seschrock uint64_t hsize = 1ULL << 16; 574fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 575fa9e4066Sahrens int i; 576fa9e4066Sahrens 577fa9e4066Sahrens /* 578fa9e4066Sahrens * The hash table is big enough to fill all of physical memory 579ea8dc4b6Seschrock * with an average 4K block size. The table will take up 580ea8dc4b6Seschrock * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 581fa9e4066Sahrens */ 582ea8dc4b6Seschrock while (hsize * 4096 < physmem * PAGESIZE) 583fa9e4066Sahrens hsize <<= 1; 584fa9e4066Sahrens 585ea8dc4b6Seschrock retry: 586fa9e4066Sahrens h->hash_table_mask = hsize - 1; 587ea8dc4b6Seschrock h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 588ea8dc4b6Seschrock if (h->hash_table == NULL) { 589ea8dc4b6Seschrock /* XXX - we should really return an error instead of assert */ 590ea8dc4b6Seschrock ASSERT(hsize > (1ULL << 10)); 591ea8dc4b6Seschrock hsize >>= 1; 592ea8dc4b6Seschrock goto retry; 593ea8dc4b6Seschrock } 594fa9e4066Sahrens 595dcbf3bd6SGeorge Wilson dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 596fa9e4066Sahrens sizeof (dmu_buf_impl_t), 597fa9e4066Sahrens 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 598fa9e4066Sahrens 599fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 600fa9e4066Sahrens mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 601bc9014e6SJustin Gibbs 602bc9014e6SJustin Gibbs /* 603dcbf3bd6SGeorge Wilson * Setup the parameters for the dbuf cache. We cap the size of the 604dcbf3bd6SGeorge Wilson * dbuf cache to 1/32nd (default) of the size of the ARC. 605dcbf3bd6SGeorge Wilson */ 606dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes, 607dcbf3bd6SGeorge Wilson arc_max_bytes() >> dbuf_cache_max_shift); 608dcbf3bd6SGeorge Wilson 609dcbf3bd6SGeorge Wilson /* 610bc9014e6SJustin Gibbs * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 611bc9014e6SJustin Gibbs * configuration is not required. 612bc9014e6SJustin Gibbs */ 613bc9014e6SJustin Gibbs dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 614dcbf3bd6SGeorge Wilson 61594c2d0ebSMatthew Ahrens dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t), 616dcbf3bd6SGeorge Wilson offsetof(dmu_buf_impl_t, db_cache_link), 617dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func); 618dcbf3bd6SGeorge Wilson refcount_create(&dbuf_cache_size); 619dcbf3bd6SGeorge Wilson 620dcbf3bd6SGeorge Wilson tsd_create(&zfs_dbuf_evict_key, NULL); 621dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 622dcbf3bd6SGeorge Wilson mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 623dcbf3bd6SGeorge Wilson cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 624dcbf3bd6SGeorge Wilson dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 625dcbf3bd6SGeorge Wilson NULL, 0, &p0, TS_RUN, minclsyspri); 626fa9e4066Sahrens } 627fa9e4066Sahrens 628fa9e4066Sahrens void 629fa9e4066Sahrens dbuf_fini(void) 630fa9e4066Sahrens { 631fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 632fa9e4066Sahrens int i; 633fa9e4066Sahrens 634fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 635fa9e4066Sahrens mutex_destroy(&h->hash_mutexes[i]); 636fa9e4066Sahrens kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 637dcbf3bd6SGeorge Wilson kmem_cache_destroy(dbuf_kmem_cache); 638bc9014e6SJustin Gibbs taskq_destroy(dbu_evict_taskq); 639dcbf3bd6SGeorge Wilson 640dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 641dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_TRUE; 642dcbf3bd6SGeorge Wilson while (dbuf_evict_thread_exit) { 643dcbf3bd6SGeorge Wilson cv_signal(&dbuf_evict_cv); 644dcbf3bd6SGeorge Wilson cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 645dcbf3bd6SGeorge Wilson } 646dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 647dcbf3bd6SGeorge Wilson tsd_destroy(&zfs_dbuf_evict_key); 648dcbf3bd6SGeorge Wilson 649dcbf3bd6SGeorge Wilson mutex_destroy(&dbuf_evict_lock); 650dcbf3bd6SGeorge Wilson cv_destroy(&dbuf_evict_cv); 651dcbf3bd6SGeorge Wilson 652dcbf3bd6SGeorge Wilson refcount_destroy(&dbuf_cache_size); 65394c2d0ebSMatthew Ahrens multilist_destroy(dbuf_cache); 654fa9e4066Sahrens } 655fa9e4066Sahrens 656fa9e4066Sahrens /* 657fa9e4066Sahrens * Other stuff. 658fa9e4066Sahrens */ 659fa9e4066Sahrens 6609c9dc39aSek110237 #ifdef ZFS_DEBUG 661fa9e4066Sahrens static void 662fa9e4066Sahrens dbuf_verify(dmu_buf_impl_t *db) 663fa9e4066Sahrens { 664744947dcSTom Erickson dnode_t *dn; 665b24ab676SJeff Bonwick dbuf_dirty_record_t *dr; 666fa9e4066Sahrens 667fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 668fa9e4066Sahrens 669fa9e4066Sahrens if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 670fa9e4066Sahrens return; 671fa9e4066Sahrens 672fa9e4066Sahrens ASSERT(db->db_objset != NULL); 673744947dcSTom Erickson DB_DNODE_ENTER(db); 674744947dcSTom Erickson dn = DB_DNODE(db); 675fa9e4066Sahrens if (dn == NULL) { 676fa9e4066Sahrens ASSERT(db->db_parent == NULL); 677fa9e4066Sahrens ASSERT(db->db_blkptr == NULL); 678fa9e4066Sahrens } else { 679fa9e4066Sahrens ASSERT3U(db->db.db_object, ==, dn->dn_object); 680fa9e4066Sahrens ASSERT3P(db->db_objset, ==, dn->dn_objset); 681fa9e4066Sahrens ASSERT3U(db->db_level, <, dn->dn_nlevels); 682744947dcSTom Erickson ASSERT(db->db_blkid == DMU_BONUS_BLKID || 683744947dcSTom Erickson db->db_blkid == DMU_SPILL_BLKID || 6840f6d88adSAlex Reece !avl_is_empty(&dn->dn_dbufs)); 685fa9e4066Sahrens } 6860a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 687fa9e4066Sahrens ASSERT(dn != NULL); 6881934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 6890a586ceaSMark Shellenbaum ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 6900a586ceaSMark Shellenbaum } else if (db->db_blkid == DMU_SPILL_BLKID) { 6910a586ceaSMark Shellenbaum ASSERT(dn != NULL); 6920a586ceaSMark Shellenbaum ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 693fb09f5aaSMadhav Suresh ASSERT0(db->db.db_offset); 694fa9e4066Sahrens } else { 695fa9e4066Sahrens ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 696fa9e4066Sahrens } 697fa9e4066Sahrens 698b24ab676SJeff Bonwick for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 699b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 700b24ab676SJeff Bonwick 701b24ab676SJeff Bonwick for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 702b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 703b24ab676SJeff Bonwick 70488b7b0f2SMatthew Ahrens /* 70588b7b0f2SMatthew Ahrens * We can't assert that db_size matches dn_datablksz because it 70688b7b0f2SMatthew Ahrens * can be momentarily different when another thread is doing 70788b7b0f2SMatthew Ahrens * dnode_set_blksz(). 70888b7b0f2SMatthew Ahrens */ 70988b7b0f2SMatthew Ahrens if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 710b24ab676SJeff Bonwick dr = db->db_data_pending; 711fa9e4066Sahrens /* 71288b7b0f2SMatthew Ahrens * It should only be modified in syncing context, so 71388b7b0f2SMatthew Ahrens * make sure we only have one copy of the data. 714fa9e4066Sahrens */ 715c717a561Smaybee ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 716fa9e4066Sahrens } 717fa9e4066Sahrens 718fa9e4066Sahrens /* verify db->db_blkptr */ 719fa9e4066Sahrens if (db->db_blkptr) { 720fa9e4066Sahrens if (db->db_parent == dn->dn_dbuf) { 721fa9e4066Sahrens /* db is pointed to by the dnode */ 722fa9e4066Sahrens /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 72314843421SMatthew Ahrens if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 724fa9e4066Sahrens ASSERT(db->db_parent == NULL); 725fa9e4066Sahrens else 726fa9e4066Sahrens ASSERT(db->db_parent != NULL); 7270a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 728fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 729fa9e4066Sahrens &dn->dn_phys->dn_blkptr[db->db_blkid]); 730fa9e4066Sahrens } else { 731fa9e4066Sahrens /* db is pointed to by an indirect block */ 732fa9e4066Sahrens int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 733fa9e4066Sahrens ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 734fa9e4066Sahrens ASSERT3U(db->db_parent->db.db_object, ==, 735fa9e4066Sahrens db->db.db_object); 736fa9e4066Sahrens /* 737fa9e4066Sahrens * dnode_grow_indblksz() can make this fail if we don't 738fa9e4066Sahrens * have the struct_rwlock. XXX indblksz no longer 739fa9e4066Sahrens * grows. safe to do this now? 740fa9e4066Sahrens */ 741744947dcSTom Erickson if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 742fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 743fa9e4066Sahrens ((blkptr_t *)db->db_parent->db.db_data + 744fa9e4066Sahrens db->db_blkid % epb)); 745fa9e4066Sahrens } 746fa9e4066Sahrens } 747fa9e4066Sahrens } 748fa9e4066Sahrens if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 7493f9d6ad7SLin Ling (db->db_buf == NULL || db->db_buf->b_data) && 7500a586ceaSMark Shellenbaum db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 751fa9e4066Sahrens db->db_state != DB_FILL && !dn->dn_free_txg) { 752fa9e4066Sahrens /* 753fa9e4066Sahrens * If the blkptr isn't set but they have nonzero data, 754fa9e4066Sahrens * it had better be dirty, otherwise we'll lose that 755fa9e4066Sahrens * data when we evict this buffer. 7568df0bcf0SPaul Dagnelie * 7578df0bcf0SPaul Dagnelie * There is an exception to this rule for indirect blocks; in 7588df0bcf0SPaul Dagnelie * this case, if the indirect block is a hole, we fill in a few 7598df0bcf0SPaul Dagnelie * fields on each of the child blocks (importantly, birth time) 7608df0bcf0SPaul Dagnelie * to prevent hole birth times from being lost when you 7618df0bcf0SPaul Dagnelie * partially fill in a hole. 762fa9e4066Sahrens */ 763fa9e4066Sahrens if (db->db_dirtycnt == 0) { 7648df0bcf0SPaul Dagnelie if (db->db_level == 0) { 765fa9e4066Sahrens uint64_t *buf = db->db.db_data; 766fa9e4066Sahrens int i; 767fa9e4066Sahrens 768fa9e4066Sahrens for (i = 0; i < db->db.db_size >> 3; i++) { 769fa9e4066Sahrens ASSERT(buf[i] == 0); 770fa9e4066Sahrens } 7718df0bcf0SPaul Dagnelie } else { 7728df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 7738df0bcf0SPaul Dagnelie ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 7748df0bcf0SPaul Dagnelie db->db.db_size); 7758df0bcf0SPaul Dagnelie /* 7768df0bcf0SPaul Dagnelie * We want to verify that all the blkptrs in the 7778df0bcf0SPaul Dagnelie * indirect block are holes, but we may have 7788df0bcf0SPaul Dagnelie * automatically set up a few fields for them. 7798df0bcf0SPaul Dagnelie * We iterate through each blkptr and verify 7808df0bcf0SPaul Dagnelie * they only have those fields set. 7818df0bcf0SPaul Dagnelie */ 7828df0bcf0SPaul Dagnelie for (int i = 0; 7838df0bcf0SPaul Dagnelie i < db->db.db_size / sizeof (blkptr_t); 7848df0bcf0SPaul Dagnelie i++) { 7858df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 7868df0bcf0SPaul Dagnelie ASSERT(ZIO_CHECKSUM_IS_ZERO( 7878df0bcf0SPaul Dagnelie &bp->blk_cksum)); 7888df0bcf0SPaul Dagnelie ASSERT( 7898df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[0]) && 7908df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[1]) && 7918df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[2])); 7928df0bcf0SPaul Dagnelie ASSERT0(bp->blk_fill); 7938df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[0]); 7948df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[1]); 7958df0bcf0SPaul Dagnelie ASSERT(!BP_IS_EMBEDDED(bp)); 7968df0bcf0SPaul Dagnelie ASSERT(BP_IS_HOLE(bp)); 7978df0bcf0SPaul Dagnelie ASSERT0(bp->blk_phys_birth); 7988df0bcf0SPaul Dagnelie } 7998df0bcf0SPaul Dagnelie } 800fa9e4066Sahrens } 801fa9e4066Sahrens } 802744947dcSTom Erickson DB_DNODE_EXIT(db); 803fa9e4066Sahrens } 8049c9dc39aSek110237 #endif 805fa9e4066Sahrens 806fa9e4066Sahrens static void 807bc9014e6SJustin Gibbs dbuf_clear_data(dmu_buf_impl_t *db) 808fa9e4066Sahrens { 809fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 810ea8dc4b6Seschrock dbuf_evict_user(db); 811dcbf3bd6SGeorge Wilson ASSERT3P(db->db_buf, ==, NULL); 812ea8dc4b6Seschrock db->db.db_data = NULL; 81382c9918fSTim Haley if (db->db_state != DB_NOFILL) 814ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 815ea8dc4b6Seschrock } 816bc9014e6SJustin Gibbs 817bc9014e6SJustin Gibbs static void 818bc9014e6SJustin Gibbs dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 819bc9014e6SJustin Gibbs { 820bc9014e6SJustin Gibbs ASSERT(MUTEX_HELD(&db->db_mtx)); 821bc9014e6SJustin Gibbs ASSERT(buf != NULL); 822bc9014e6SJustin Gibbs 823bc9014e6SJustin Gibbs db->db_buf = buf; 824bc9014e6SJustin Gibbs ASSERT(buf->b_data != NULL); 825bc9014e6SJustin Gibbs db->db.db_data = buf->b_data; 826fa9e4066Sahrens } 827fa9e4066Sahrens 828c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 829c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Loan out an arc_buf for read. Return the loaned arc_buf. 830c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 831c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 832c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dbuf_loan_arcbuf(dmu_buf_impl_t *db) 833c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 834c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf; 835c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 836dcbf3bd6SGeorge Wilson ASSERT(db->db_blkid != DMU_BONUS_BLKID); 837c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_enter(&db->db_mtx); 838c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 839c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int blksz = db->db.db_size; 84043466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 841744947dcSTom Erickson 842c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 8435602294fSDan Kimmel abuf = arc_loan_buf(spa, B_FALSE, blksz); 844c242f9a0Schunli zhang - Sun Microsystems - Irvine United States bcopy(db->db.db_data, abuf->b_data, blksz); 845c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 846c242f9a0Schunli zhang - Sun Microsystems - Irvine United States abuf = db->db_buf; 847c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_loan_inuse_buf(abuf, db); 848dcbf3bd6SGeorge Wilson db->db_buf = NULL; 849bc9014e6SJustin Gibbs dbuf_clear_data(db); 850c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 851c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 852c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (abuf); 853c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 854c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 855a2cdcdd2SPaul Dagnelie /* 856a2cdcdd2SPaul Dagnelie * Calculate which level n block references the data at the level 0 offset 857a2cdcdd2SPaul Dagnelie * provided. 858a2cdcdd2SPaul Dagnelie */ 859fa9e4066Sahrens uint64_t 860a2cdcdd2SPaul Dagnelie dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 861fa9e4066Sahrens { 862a2cdcdd2SPaul Dagnelie if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 863a2cdcdd2SPaul Dagnelie /* 864a2cdcdd2SPaul Dagnelie * The level n blkid is equal to the level 0 blkid divided by 865a2cdcdd2SPaul Dagnelie * the number of level 0s in a level n block. 866a2cdcdd2SPaul Dagnelie * 867a2cdcdd2SPaul Dagnelie * The level 0 blkid is offset >> datablkshift = 868a2cdcdd2SPaul Dagnelie * offset / 2^datablkshift. 869a2cdcdd2SPaul Dagnelie * 870a2cdcdd2SPaul Dagnelie * The number of level 0s in a level n is the number of block 871a2cdcdd2SPaul Dagnelie * pointers in an indirect block, raised to the power of level. 872a2cdcdd2SPaul Dagnelie * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 873a2cdcdd2SPaul Dagnelie * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 874a2cdcdd2SPaul Dagnelie * 875a2cdcdd2SPaul Dagnelie * Thus, the level n blkid is: offset / 876a2cdcdd2SPaul Dagnelie * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 877a2cdcdd2SPaul Dagnelie * = offset / 2^(datablkshift + level * 878a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 879a2cdcdd2SPaul Dagnelie * = offset >> (datablkshift + level * 880a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 881a2cdcdd2SPaul Dagnelie */ 882a2cdcdd2SPaul Dagnelie return (offset >> (dn->dn_datablkshift + level * 883a2cdcdd2SPaul Dagnelie (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 884fa9e4066Sahrens } else { 885fa9e4066Sahrens ASSERT3U(offset, <, dn->dn_datablksz); 886fa9e4066Sahrens return (0); 887fa9e4066Sahrens } 888fa9e4066Sahrens } 889fa9e4066Sahrens 890fa9e4066Sahrens static void 891fa9e4066Sahrens dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 892fa9e4066Sahrens { 893fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 894fa9e4066Sahrens 895fa9e4066Sahrens mutex_enter(&db->db_mtx); 896fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_READ); 897fa9e4066Sahrens /* 898fa9e4066Sahrens * All reads are synchronous, so we must have a hold on the dbuf 899fa9e4066Sahrens */ 900fa9e4066Sahrens ASSERT(refcount_count(&db->db_holds) > 0); 901ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 902fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 903c717a561Smaybee if (db->db_level == 0 && db->db_freed_in_flight) { 904fa9e4066Sahrens /* we were freed in flight; disregard any error */ 905fa9e4066Sahrens arc_release(buf, db); 906fa9e4066Sahrens bzero(buf->b_data, db->db.db_size); 9076b4acc8bSahrens arc_buf_freeze(buf); 908c717a561Smaybee db->db_freed_in_flight = FALSE; 909fa9e4066Sahrens dbuf_set_data(db, buf); 910fa9e4066Sahrens db->db_state = DB_CACHED; 911fa9e4066Sahrens } else if (zio == NULL || zio->io_error == 0) { 912fa9e4066Sahrens dbuf_set_data(db, buf); 913fa9e4066Sahrens db->db_state = DB_CACHED; 914fa9e4066Sahrens } else { 9150a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 916fa9e4066Sahrens ASSERT3P(db->db_buf, ==, NULL); 917dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, db); 918ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 919fa9e4066Sahrens } 920fa9e4066Sahrens cv_broadcast(&db->db_changed); 9213f9d6ad7SLin Ling dbuf_rele_and_unlock(db, NULL); 922fa9e4066Sahrens } 923fa9e4066Sahrens 924ea8dc4b6Seschrock static void 925cf6106c8SMatthew Ahrens dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 926fa9e4066Sahrens { 927744947dcSTom Erickson dnode_t *dn; 9287802d7bfSMatthew Ahrens zbookmark_phys_t zb; 9297adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_NOWAIT; 930fa9e4066Sahrens 931744947dcSTom Erickson DB_DNODE_ENTER(db); 932744947dcSTom Erickson dn = DB_DNODE(db); 933fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 934fa9e4066Sahrens /* We need the struct_rwlock to prevent db_blkptr from changing. */ 935088f3894Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 936ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&db->db_mtx)); 937ea8dc4b6Seschrock ASSERT(db->db_state == DB_UNCACHED); 938ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 939fa9e4066Sahrens 9400a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 941cf04dda1SMark Maybee int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 9421934e92fSmaybee 9431934e92fSmaybee ASSERT3U(bonuslen, <=, db->db.db_size); 944ea8dc4b6Seschrock db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 9455a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 9461934e92fSmaybee if (bonuslen < DN_MAX_BONUSLEN) 947ea8dc4b6Seschrock bzero(db->db.db_data, DN_MAX_BONUSLEN); 948cf04dda1SMark Maybee if (bonuslen) 949cf04dda1SMark Maybee bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 950744947dcSTom Erickson DB_DNODE_EXIT(db); 951fa9e4066Sahrens db->db_state = DB_CACHED; 952fa9e4066Sahrens mutex_exit(&db->db_mtx); 953fa9e4066Sahrens return; 954fa9e4066Sahrens } 955fa9e4066Sahrens 9561c8564a7SMark Maybee /* 9571c8564a7SMark Maybee * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 9581c8564a7SMark Maybee * processes the delete record and clears the bp while we are waiting 9591c8564a7SMark Maybee * for the dn_mtx (resulting in a "no" from block_freed). 9601c8564a7SMark Maybee */ 961088f3894Sahrens if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 9621c8564a7SMark Maybee (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 9631c8564a7SMark Maybee BP_IS_HOLE(db->db_blkptr)))) { 964ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 965ad23a2dbSjohansen 9665602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 9675602294fSDan Kimmel db->db.db_size)); 968fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 9698df0bcf0SPaul Dagnelie 9708df0bcf0SPaul Dagnelie if (db->db_blkptr != NULL && db->db_level > 0 && 9718df0bcf0SPaul Dagnelie BP_IS_HOLE(db->db_blkptr) && 9728df0bcf0SPaul Dagnelie db->db_blkptr->blk_birth != 0) { 9738df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 9748df0bcf0SPaul Dagnelie for (int i = 0; i < ((1 << 9758df0bcf0SPaul Dagnelie DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 9768df0bcf0SPaul Dagnelie i++) { 9778df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 9788df0bcf0SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 9798df0bcf0SPaul Dagnelie 1 << dn->dn_indblkshift); 9808df0bcf0SPaul Dagnelie BP_SET_LSIZE(bp, 9818df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) == 1 ? 9828df0bcf0SPaul Dagnelie dn->dn_datablksz : 9838df0bcf0SPaul Dagnelie BP_GET_LSIZE(db->db_blkptr)); 9848df0bcf0SPaul Dagnelie BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 9858df0bcf0SPaul Dagnelie BP_SET_LEVEL(bp, 9868df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) - 1); 9878df0bcf0SPaul Dagnelie BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 9888df0bcf0SPaul Dagnelie } 9898df0bcf0SPaul Dagnelie } 9908df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 991fa9e4066Sahrens db->db_state = DB_CACHED; 992fa9e4066Sahrens mutex_exit(&db->db_mtx); 993fa9e4066Sahrens return; 994fa9e4066Sahrens } 995fa9e4066Sahrens 996744947dcSTom Erickson DB_DNODE_EXIT(db); 997744947dcSTom Erickson 998fa9e4066Sahrens db->db_state = DB_READ; 999fa9e4066Sahrens mutex_exit(&db->db_mtx); 1000fa9e4066Sahrens 10013baa08fcSek110237 if (DBUF_IS_L2CACHEABLE(db)) 10027adb730bSGeorge Wilson aflags |= ARC_FLAG_L2CACHE; 10033baa08fcSek110237 1004b24ab676SJeff Bonwick SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1005b24ab676SJeff Bonwick db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1006b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 1007ea8dc4b6Seschrock 1008ea8dc4b6Seschrock dbuf_add_ref(db, NULL); 1009088f3894Sahrens 101043466aaeSMax Grossman (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1011fa9e4066Sahrens dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1012cf6106c8SMatthew Ahrens (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 101313506d1eSmaybee &aflags, &zb); 1014fa9e4066Sahrens } 1015fa9e4066Sahrens 10165602294fSDan Kimmel /* 10175602294fSDan Kimmel * This is our just-in-time copy function. It makes a copy of buffers that 10185602294fSDan Kimmel * have been modified in a previous transaction group before we access them in 10195602294fSDan Kimmel * the current active group. 10205602294fSDan Kimmel * 10215602294fSDan Kimmel * This function is used in three places: when we are dirtying a buffer for the 10225602294fSDan Kimmel * first time in a txg, when we are freeing a range in a dnode that includes 10235602294fSDan Kimmel * this buffer, and when we are accessing a buffer which was received compressed 10245602294fSDan Kimmel * and later referenced in a WRITE_BYREF record. 10255602294fSDan Kimmel * 10265602294fSDan Kimmel * Note that when we are called from dbuf_free_range() we do not put a hold on 10275602294fSDan Kimmel * the buffer, we just traverse the active dbuf list for the dnode. 10285602294fSDan Kimmel */ 10295602294fSDan Kimmel static void 10305602294fSDan Kimmel dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 10315602294fSDan Kimmel { 10325602294fSDan Kimmel dbuf_dirty_record_t *dr = db->db_last_dirty; 10335602294fSDan Kimmel 10345602294fSDan Kimmel ASSERT(MUTEX_HELD(&db->db_mtx)); 10355602294fSDan Kimmel ASSERT(db->db.db_data != NULL); 10365602294fSDan Kimmel ASSERT(db->db_level == 0); 10375602294fSDan Kimmel ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 10385602294fSDan Kimmel 10395602294fSDan Kimmel if (dr == NULL || 10405602294fSDan Kimmel (dr->dt.dl.dr_data != 10415602294fSDan Kimmel ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 10425602294fSDan Kimmel return; 10435602294fSDan Kimmel 10445602294fSDan Kimmel /* 10455602294fSDan Kimmel * If the last dirty record for this dbuf has not yet synced 10465602294fSDan Kimmel * and its referencing the dbuf data, either: 10475602294fSDan Kimmel * reset the reference to point to a new copy, 10485602294fSDan Kimmel * or (if there a no active holders) 10495602294fSDan Kimmel * just null out the current db_data pointer. 10505602294fSDan Kimmel */ 10515602294fSDan Kimmel ASSERT(dr->dr_txg >= txg - 2); 10525602294fSDan Kimmel if (db->db_blkid == DMU_BONUS_BLKID) { 10535602294fSDan Kimmel /* Note that the data bufs here are zio_bufs */ 10545602294fSDan Kimmel dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 10555602294fSDan Kimmel arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 10565602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 10575602294fSDan Kimmel } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 10585602294fSDan Kimmel int size = arc_buf_size(db->db_buf); 10595602294fSDan Kimmel arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 10605602294fSDan Kimmel spa_t *spa = db->db_objset->os_spa; 10615602294fSDan Kimmel enum zio_compress compress_type = 10625602294fSDan Kimmel arc_get_compression(db->db_buf); 10635602294fSDan Kimmel 10645602294fSDan Kimmel if (compress_type == ZIO_COMPRESS_OFF) { 10655602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 10665602294fSDan Kimmel } else { 10675602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 10685602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 10695602294fSDan Kimmel size, arc_buf_lsize(db->db_buf), compress_type); 10705602294fSDan Kimmel } 10715602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 10725602294fSDan Kimmel } else { 10735602294fSDan Kimmel db->db_buf = NULL; 10745602294fSDan Kimmel dbuf_clear_data(db); 10755602294fSDan Kimmel } 10765602294fSDan Kimmel } 10775602294fSDan Kimmel 1078ea8dc4b6Seschrock int 1079ea8dc4b6Seschrock dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1080fa9e4066Sahrens { 1081ea8dc4b6Seschrock int err = 0; 108243466aaeSMax Grossman boolean_t prefetch; 1083744947dcSTom Erickson dnode_t *dn; 1084fa9e4066Sahrens 1085fa9e4066Sahrens /* 1086fa9e4066Sahrens * We don't have to hold the mutex to check db_state because it 1087fa9e4066Sahrens * can't be freed while we have a hold on the buffer. 1088fa9e4066Sahrens */ 1089fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1090fa9e4066Sahrens 109182c9918fSTim Haley if (db->db_state == DB_NOFILL) 1092be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 109382c9918fSTim Haley 1094744947dcSTom Erickson DB_DNODE_ENTER(db); 1095744947dcSTom Erickson dn = DB_DNODE(db); 1096fa9e4066Sahrens if ((flags & DB_RF_HAVESTRUCT) == 0) 1097744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_READER); 1098fa9e4066Sahrens 10990a586ceaSMark Shellenbaum prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1100744947dcSTom Erickson (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 11013baa08fcSek110237 DBUF_IS_CACHEABLE(db); 110213506d1eSmaybee 1103fa9e4066Sahrens mutex_enter(&db->db_mtx); 1104ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 11055602294fSDan Kimmel /* 11065602294fSDan Kimmel * If the arc buf is compressed, we need to decompress it to 11075602294fSDan Kimmel * read the data. This could happen during the "zfs receive" of 11085602294fSDan Kimmel * a stream which is compressed and deduplicated. 11095602294fSDan Kimmel */ 11105602294fSDan Kimmel if (db->db_buf != NULL && 11115602294fSDan Kimmel arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 11125602294fSDan Kimmel dbuf_fix_old_data(db, 11135602294fSDan Kimmel spa_syncing_txg(dmu_objset_spa(db->db_objset))); 11145602294fSDan Kimmel err = arc_decompress(db->db_buf); 11155602294fSDan Kimmel dbuf_set_data(db, db->db_buf); 11165602294fSDan Kimmel } 1117ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 111813506d1eSmaybee if (prefetch) 1119cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1120ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1121744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1122744947dcSTom Erickson DB_DNODE_EXIT(db); 1123ea8dc4b6Seschrock } else if (db->db_state == DB_UNCACHED) { 1124744947dcSTom Erickson spa_t *spa = dn->dn_objset->os_spa; 1125def4fac5SMatthew Ahrens boolean_t need_wait = B_FALSE; 1126744947dcSTom Erickson 1127def4fac5SMatthew Ahrens if (zio == NULL && 1128def4fac5SMatthew Ahrens db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1129744947dcSTom Erickson zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1130def4fac5SMatthew Ahrens need_wait = B_TRUE; 1131def4fac5SMatthew Ahrens } 1132cf6106c8SMatthew Ahrens dbuf_read_impl(db, zio, flags); 113313506d1eSmaybee 1134ea8dc4b6Seschrock /* dbuf_read_impl has dropped db_mtx for us */ 1135ea8dc4b6Seschrock 113613506d1eSmaybee if (prefetch) 1137cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1138ea8dc4b6Seschrock 1139ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1140744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1141744947dcSTom Erickson DB_DNODE_EXIT(db); 1142ea8dc4b6Seschrock 1143def4fac5SMatthew Ahrens if (need_wait) 1144ea8dc4b6Seschrock err = zio_wait(zio); 1145ea8dc4b6Seschrock } else { 11463e30c24aSWill Andrews /* 11473e30c24aSWill Andrews * Another reader came in while the dbuf was in flight 11483e30c24aSWill Andrews * between UNCACHED and CACHED. Either a writer will finish 11493e30c24aSWill Andrews * writing the buffer (sending the dbuf to CACHED) or the 11503e30c24aSWill Andrews * first reader's request will reach the read_done callback 11513e30c24aSWill Andrews * and send the dbuf to CACHED. Otherwise, a failure 11523e30c24aSWill Andrews * occurred and the dbuf went to UNCACHED. 11533e30c24aSWill Andrews */ 115413506d1eSmaybee mutex_exit(&db->db_mtx); 115513506d1eSmaybee if (prefetch) 1156cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1157ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1158744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1159744947dcSTom Erickson DB_DNODE_EXIT(db); 116013506d1eSmaybee 11613e30c24aSWill Andrews /* Skip the wait per the caller's request. */ 116213506d1eSmaybee mutex_enter(&db->db_mtx); 1163ea8dc4b6Seschrock if ((flags & DB_RF_NEVERWAIT) == 0) { 1164ea8dc4b6Seschrock while (db->db_state == DB_READ || 1165ea8dc4b6Seschrock db->db_state == DB_FILL) { 1166fa9e4066Sahrens ASSERT(db->db_state == DB_READ || 1167fa9e4066Sahrens (flags & DB_RF_HAVESTRUCT) == 0); 1168f6164ad6SAdam H. Leventhal DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1169f6164ad6SAdam H. Leventhal db, zio_t *, zio); 1170fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1171fa9e4066Sahrens } 1172ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 1173be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 1174ea8dc4b6Seschrock } 1175fa9e4066Sahrens mutex_exit(&db->db_mtx); 1176fa9e4066Sahrens } 1177fa9e4066Sahrens 1178ea8dc4b6Seschrock return (err); 1179fa9e4066Sahrens } 1180fa9e4066Sahrens 1181fa9e4066Sahrens static void 1182fa9e4066Sahrens dbuf_noread(dmu_buf_impl_t *db) 1183fa9e4066Sahrens { 1184fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 11850a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1186fa9e4066Sahrens mutex_enter(&db->db_mtx); 1187fa9e4066Sahrens while (db->db_state == DB_READ || db->db_state == DB_FILL) 1188fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1189fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 1190ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 119143466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 1192ad23a2dbSjohansen 1193ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1194fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 11955602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1196fa9e4066Sahrens db->db_state = DB_FILL; 119782c9918fSTim Haley } else if (db->db_state == DB_NOFILL) { 1198bc9014e6SJustin Gibbs dbuf_clear_data(db); 1199fa9e4066Sahrens } else { 1200fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_CACHED); 1201fa9e4066Sahrens } 1202fa9e4066Sahrens mutex_exit(&db->db_mtx); 1203fa9e4066Sahrens } 1204fa9e4066Sahrens 1205fa9e4066Sahrens void 1206c717a561Smaybee dbuf_unoverride(dbuf_dirty_record_t *dr) 1207fa9e4066Sahrens { 1208c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1209b24ab676SJeff Bonwick blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1210c717a561Smaybee uint64_t txg = dr->dr_txg; 1211c5c6ffa0Smaybee 1212c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 121340713f2bSAlan Somers /* 121440713f2bSAlan Somers * This assert is valid because dmu_sync() expects to be called by 121540713f2bSAlan Somers * a zilog's get_data while holding a range lock. This call only 121640713f2bSAlan Somers * comes from dbuf_dirty() callers who must also hold a range lock. 121740713f2bSAlan Somers */ 1218c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1219c717a561Smaybee ASSERT(db->db_level == 0); 1220c717a561Smaybee 12210a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 1222c717a561Smaybee dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1223c717a561Smaybee return; 1224c717a561Smaybee 1225b24ab676SJeff Bonwick ASSERT(db->db_data_pending != dr); 1226b24ab676SJeff Bonwick 1227fa9e4066Sahrens /* free this block */ 122843466aaeSMax Grossman if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 122943466aaeSMax Grossman zio_free(db->db_objset->os_spa, txg, bp); 1230b24ab676SJeff Bonwick 1231c717a561Smaybee dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 123280901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = B_FALSE; 123380901aeaSGeorge Wilson 12346b4acc8bSahrens /* 12356b4acc8bSahrens * Release the already-written buffer, so we leave it in 12366b4acc8bSahrens * a consistent dirty state. Note that all callers are 12376b4acc8bSahrens * modifying the buffer, so they will immediately do 12386b4acc8bSahrens * another (redundant) arc_release(). Therefore, leave 12396b4acc8bSahrens * the buf thawed to save the effort of freezing & 12406b4acc8bSahrens * immediately re-thawing it. 12416b4acc8bSahrens */ 1242c717a561Smaybee arc_release(dr->dt.dl.dr_data, db); 1243fa9e4066Sahrens } 1244fa9e4066Sahrens 1245cdb0ab79Smaybee /* 1246cdb0ab79Smaybee * Evict (if its unreferenced) or clear (if its referenced) any level-0 1247cdb0ab79Smaybee * data blocks in the free range, so that any future readers will find 124843466aaeSMax Grossman * empty blocks. 1249cdb0ab79Smaybee */ 1250fa9e4066Sahrens void 12510f6d88adSAlex Reece dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 12520f6d88adSAlex Reece dmu_tx_t *tx) 1253fa9e4066Sahrens { 1254bc9014e6SJustin Gibbs dmu_buf_impl_t db_search; 1255bc9014e6SJustin Gibbs dmu_buf_impl_t *db, *db_next; 1256fa9e4066Sahrens uint64_t txg = tx->tx_txg; 12570f6d88adSAlex Reece avl_index_t where; 1258fa9e4066Sahrens 1259653af1b8SStephen Blinick if (end_blkid > dn->dn_maxblkid && 1260653af1b8SStephen Blinick !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 12610f6d88adSAlex Reece end_blkid = dn->dn_maxblkid; 12620f6d88adSAlex Reece dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 12630f6d88adSAlex Reece 12640f6d88adSAlex Reece db_search.db_level = 0; 12650f6d88adSAlex Reece db_search.db_blkid = start_blkid; 126686bb58aeSAlex Reece db_search.db_state = DB_SEARCH; 12672f3d8780SMatthew Ahrens 1268713d6c20SMatthew Ahrens mutex_enter(&dn->dn_dbufs_mtx); 12690f6d88adSAlex Reece db = avl_find(&dn->dn_dbufs, &db_search, &where); 12700f6d88adSAlex Reece ASSERT3P(db, ==, NULL); 12712f3d8780SMatthew Ahrens 12720f6d88adSAlex Reece db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 12730f6d88adSAlex Reece 12740f6d88adSAlex Reece for (; db != NULL; db = db_next) { 12750f6d88adSAlex Reece db_next = AVL_NEXT(&dn->dn_dbufs, db); 12760a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1277cdb0ab79Smaybee 12780f6d88adSAlex Reece if (db->db_level != 0 || db->db_blkid > end_blkid) { 12790f6d88adSAlex Reece break; 12800f6d88adSAlex Reece } 12810f6d88adSAlex Reece ASSERT3U(db->db_blkid, >=, start_blkid); 1282fa9e4066Sahrens 1283fa9e4066Sahrens /* found a level 0 buffer in the range */ 1284fa9e4066Sahrens mutex_enter(&db->db_mtx); 12853b2aab18SMatthew Ahrens if (dbuf_undirty(db, tx)) { 12863b2aab18SMatthew Ahrens /* mutex has been dropped and dbuf destroyed */ 12873b2aab18SMatthew Ahrens continue; 12883b2aab18SMatthew Ahrens } 12893b2aab18SMatthew Ahrens 1290ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED || 129182c9918fSTim Haley db->db_state == DB_NOFILL || 1292ea8dc4b6Seschrock db->db_state == DB_EVICTING) { 1293fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 1294fa9e4066Sahrens mutex_exit(&db->db_mtx); 1295fa9e4066Sahrens continue; 1296fa9e4066Sahrens } 1297c543ec06Sahrens if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1298c543ec06Sahrens /* will be handled in dbuf_read_done or dbuf_rele */ 1299c717a561Smaybee db->db_freed_in_flight = TRUE; 1300fa9e4066Sahrens mutex_exit(&db->db_mtx); 1301fa9e4066Sahrens continue; 1302fa9e4066Sahrens } 1303ea8dc4b6Seschrock if (refcount_count(&db->db_holds) == 0) { 1304ea8dc4b6Seschrock ASSERT(db->db_buf); 1305dcbf3bd6SGeorge Wilson dbuf_destroy(db); 1306ea8dc4b6Seschrock continue; 1307ea8dc4b6Seschrock } 1308c717a561Smaybee /* The dbuf is referenced */ 1309fa9e4066Sahrens 1310c717a561Smaybee if (db->db_last_dirty != NULL) { 1311c717a561Smaybee dbuf_dirty_record_t *dr = db->db_last_dirty; 1312c717a561Smaybee 1313c717a561Smaybee if (dr->dr_txg == txg) { 1314ea8dc4b6Seschrock /* 1315c717a561Smaybee * This buffer is "in-use", re-adjust the file 1316c717a561Smaybee * size to reflect that this buffer may 1317c717a561Smaybee * contain new data when we sync. 1318ea8dc4b6Seschrock */ 131906e0070dSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID && 132006e0070dSMark Shellenbaum db->db_blkid > dn->dn_maxblkid) 132144eda4d7Smaybee dn->dn_maxblkid = db->db_blkid; 1322c717a561Smaybee dbuf_unoverride(dr); 1323c717a561Smaybee } else { 1324c717a561Smaybee /* 1325c717a561Smaybee * This dbuf is not dirty in the open context. 1326c717a561Smaybee * Either uncache it (if its not referenced in 1327c717a561Smaybee * the open context) or reset its contents to 1328c717a561Smaybee * empty. 1329c717a561Smaybee */ 1330c717a561Smaybee dbuf_fix_old_data(db, txg); 133144eda4d7Smaybee } 1332c717a561Smaybee } 1333c717a561Smaybee /* clear the contents if its cached */ 1334ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 1335ea8dc4b6Seschrock ASSERT(db->db.db_data != NULL); 1336fa9e4066Sahrens arc_release(db->db_buf, db); 1337fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 13386b4acc8bSahrens arc_buf_freeze(db->db_buf); 1339fa9e4066Sahrens } 1340ea8dc4b6Seschrock 1341fa9e4066Sahrens mutex_exit(&db->db_mtx); 1342fa9e4066Sahrens } 1343fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 1344fa9e4066Sahrens } 1345fa9e4066Sahrens 1346fa9e4066Sahrens void 1347fa9e4066Sahrens dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1348fa9e4066Sahrens { 1349fa9e4066Sahrens arc_buf_t *buf, *obuf; 1350fa9e4066Sahrens int osize = db->db.db_size; 1351ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1352744947dcSTom Erickson dnode_t *dn; 1353fa9e4066Sahrens 13540a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1355ea8dc4b6Seschrock 1356744947dcSTom Erickson DB_DNODE_ENTER(db); 1357744947dcSTom Erickson dn = DB_DNODE(db); 1358744947dcSTom Erickson 1359fa9e4066Sahrens /* XXX does *this* func really need the lock? */ 1360744947dcSTom Erickson ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1361fa9e4066Sahrens 1362fa9e4066Sahrens /* 136343466aaeSMax Grossman * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1364fa9e4066Sahrens * is OK, because there can be no other references to the db 1365fa9e4066Sahrens * when we are changing its size, so no concurrent DB_FILL can 1366fa9e4066Sahrens * be happening. 1367fa9e4066Sahrens */ 1368ea8dc4b6Seschrock /* 1369ea8dc4b6Seschrock * XXX we should be doing a dbuf_read, checking the return 1370ea8dc4b6Seschrock * value and returning that up to our callers 1371ea8dc4b6Seschrock */ 137243466aaeSMax Grossman dmu_buf_will_dirty(&db->db, tx); 1373fa9e4066Sahrens 1374fa9e4066Sahrens /* create the data buffer for the new block */ 13755602294fSDan Kimmel buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1376fa9e4066Sahrens 1377fa9e4066Sahrens /* copy old block data to the new block */ 1378fa9e4066Sahrens obuf = db->db_buf; 1379f65e61c0Sahrens bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1380fa9e4066Sahrens /* zero the remainder */ 1381f65e61c0Sahrens if (size > osize) 1382fa9e4066Sahrens bzero((uint8_t *)buf->b_data + osize, size - osize); 1383fa9e4066Sahrens 1384fa9e4066Sahrens mutex_enter(&db->db_mtx); 1385fa9e4066Sahrens dbuf_set_data(db, buf); 1386dcbf3bd6SGeorge Wilson arc_buf_destroy(obuf, db); 1387fa9e4066Sahrens db->db.db_size = size; 1388fa9e4066Sahrens 1389c717a561Smaybee if (db->db_level == 0) { 1390c717a561Smaybee ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1391c717a561Smaybee db->db_last_dirty->dt.dl.dr_data = buf; 1392c717a561Smaybee } 1393fa9e4066Sahrens mutex_exit(&db->db_mtx); 1394fa9e4066Sahrens 139561e255ceSMatthew Ahrens dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1396744947dcSTom Erickson DB_DNODE_EXIT(db); 1397fa9e4066Sahrens } 1398fa9e4066Sahrens 13993f9d6ad7SLin Ling void 14003f9d6ad7SLin Ling dbuf_release_bp(dmu_buf_impl_t *db) 14013f9d6ad7SLin Ling { 140243466aaeSMax Grossman objset_t *os = db->db_objset; 14033f9d6ad7SLin Ling 14043f9d6ad7SLin Ling ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 14053f9d6ad7SLin Ling ASSERT(arc_released(os->os_phys_buf) || 14063f9d6ad7SLin Ling list_link_active(&os->os_dsl_dataset->ds_synced_link)); 14073f9d6ad7SLin Ling ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 14083f9d6ad7SLin Ling 14091b912ec7SGeorge Wilson (void) arc_release(db->db_buf, db); 14103f9d6ad7SLin Ling } 14113f9d6ad7SLin Ling 14120f2e7d03SMatthew Ahrens /* 14130f2e7d03SMatthew Ahrens * We already have a dirty record for this TXG, and we are being 14140f2e7d03SMatthew Ahrens * dirtied again. 14150f2e7d03SMatthew Ahrens */ 14160f2e7d03SMatthew Ahrens static void 14170f2e7d03SMatthew Ahrens dbuf_redirty(dbuf_dirty_record_t *dr) 14180f2e7d03SMatthew Ahrens { 14190f2e7d03SMatthew Ahrens dmu_buf_impl_t *db = dr->dr_dbuf; 14200f2e7d03SMatthew Ahrens 14210f2e7d03SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 14220f2e7d03SMatthew Ahrens 14230f2e7d03SMatthew Ahrens if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 14240f2e7d03SMatthew Ahrens /* 14250f2e7d03SMatthew Ahrens * If this buffer has already been written out, 14260f2e7d03SMatthew Ahrens * we now need to reset its state. 14270f2e7d03SMatthew Ahrens */ 14280f2e7d03SMatthew Ahrens dbuf_unoverride(dr); 14290f2e7d03SMatthew Ahrens if (db->db.db_object != DMU_META_DNODE_OBJECT && 14300f2e7d03SMatthew Ahrens db->db_state != DB_NOFILL) { 14310f2e7d03SMatthew Ahrens /* Already released on initial dirty, so just thaw. */ 14320f2e7d03SMatthew Ahrens ASSERT(arc_released(db->db_buf)); 14330f2e7d03SMatthew Ahrens arc_buf_thaw(db->db_buf); 14340f2e7d03SMatthew Ahrens } 14350f2e7d03SMatthew Ahrens } 14360f2e7d03SMatthew Ahrens } 14370f2e7d03SMatthew Ahrens 1438c717a561Smaybee dbuf_dirty_record_t * 1439fa9e4066Sahrens dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1440fa9e4066Sahrens { 1441744947dcSTom Erickson dnode_t *dn; 1442744947dcSTom Erickson objset_t *os; 1443c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 1444fa9e4066Sahrens int drop_struct_lock = FALSE; 1445fa9e4066Sahrens int txgoff = tx->tx_txg & TXG_MASK; 1446fa9e4066Sahrens 1447fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1448fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 14499c9dc39aSek110237 DMU_TX_DIRTY_BUF(tx, db); 1450fa9e4066Sahrens 1451744947dcSTom Erickson DB_DNODE_ENTER(db); 1452744947dcSTom Erickson dn = DB_DNODE(db); 1453fa9e4066Sahrens /* 1454fa9e4066Sahrens * Shouldn't dirty a regular buffer in syncing context. Private 1455fa9e4066Sahrens * objects may be dirtied in syncing context, but only if they 1456fa9e4066Sahrens * were already pre-dirtied in open context. 1457fa9e4066Sahrens */ 1458c166b69dSPaul Dagnelie #ifdef DEBUG 1459c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1460c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1461c166b69dSPaul Dagnelie RW_READER, FTAG); 1462c166b69dSPaul Dagnelie } 1463c717a561Smaybee ASSERT(!dmu_tx_is_syncing(tx) || 1464c717a561Smaybee BP_IS_HOLE(dn->dn_objset->os_rootbp) || 146514843421SMatthew Ahrens DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 146614843421SMatthew Ahrens dn->dn_objset->os_dsl_dataset == NULL); 1467c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1468c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1469c166b69dSPaul Dagnelie #endif 1470fa9e4066Sahrens /* 1471fa9e4066Sahrens * We make this assert for private objects as well, but after we 1472fa9e4066Sahrens * check if we're already dirty. They are allowed to re-dirty 1473fa9e4066Sahrens * in syncing context. 1474fa9e4066Sahrens */ 1475ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1476c717a561Smaybee dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1477fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1478fa9e4066Sahrens 1479fa9e4066Sahrens mutex_enter(&db->db_mtx); 1480fa9e4066Sahrens /* 1481c717a561Smaybee * XXX make this true for indirects too? The problem is that 1482c717a561Smaybee * transactions created with dmu_tx_create_assigned() from 1483c717a561Smaybee * syncing context don't bother holding ahead. 1484fa9e4066Sahrens */ 1485c717a561Smaybee ASSERT(db->db_level != 0 || 148682c9918fSTim Haley db->db_state == DB_CACHED || db->db_state == DB_FILL || 148782c9918fSTim Haley db->db_state == DB_NOFILL); 1488fa9e4066Sahrens 1489fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1490fa9e4066Sahrens /* 1491fa9e4066Sahrens * Don't set dirtyctx to SYNC if we're just modifying this as we 1492fa9e4066Sahrens * initialize the objset. 1493fa9e4066Sahrens */ 1494c166b69dSPaul Dagnelie if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1495c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1496c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1497c166b69dSPaul Dagnelie RW_READER, FTAG); 1498c166b69dSPaul Dagnelie } 1499c166b69dSPaul Dagnelie if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1500c166b69dSPaul Dagnelie dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1501c166b69dSPaul Dagnelie DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1502fa9e4066Sahrens ASSERT(dn->dn_dirtyctx_firstset == NULL); 1503fa9e4066Sahrens dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1504fa9e4066Sahrens } 1505c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1506c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1507c166b69dSPaul Dagnelie FTAG); 1508c166b69dSPaul Dagnelie } 1509c166b69dSPaul Dagnelie } 1510fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1511fa9e4066Sahrens 15120a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 15130a586ceaSMark Shellenbaum dn->dn_have_spill = B_TRUE; 15140a586ceaSMark Shellenbaum 1515fa9e4066Sahrens /* 1516fa9e4066Sahrens * If this buffer is already dirty, we're done. 1517fa9e4066Sahrens */ 1518c717a561Smaybee drp = &db->db_last_dirty; 1519c717a561Smaybee ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1520c717a561Smaybee db->db.db_object == DMU_META_DNODE_OBJECT); 15217e2186e3Sbonwick while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 15227e2186e3Sbonwick drp = &dr->dr_next; 15237e2186e3Sbonwick if (dr && dr->dr_txg == tx->tx_txg) { 1524744947dcSTom Erickson DB_DNODE_EXIT(db); 1525744947dcSTom Erickson 15260f2e7d03SMatthew Ahrens dbuf_redirty(dr); 1527fa9e4066Sahrens mutex_exit(&db->db_mtx); 15287e2186e3Sbonwick return (dr); 1529fa9e4066Sahrens } 1530fa9e4066Sahrens 1531fa9e4066Sahrens /* 1532fa9e4066Sahrens * Only valid if not already dirty. 1533fa9e4066Sahrens */ 153414843421SMatthew Ahrens ASSERT(dn->dn_object == 0 || 153514843421SMatthew Ahrens dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1536fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1537fa9e4066Sahrens 1538fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, db->db_level); 1539fa9e4066Sahrens 1540fa9e4066Sahrens /* 1541fa9e4066Sahrens * We should only be dirtying in syncing context if it's the 154214843421SMatthew Ahrens * mos or we're initializing the os or it's a special object. 154314843421SMatthew Ahrens * However, we are allowed to dirty in syncing context provided 154414843421SMatthew Ahrens * we already dirtied it in open context. Hence we must make 154514843421SMatthew Ahrens * this assertion only if we're not already dirty. 1546fa9e4066Sahrens */ 1547744947dcSTom Erickson os = dn->dn_objset; 15483991b535SGeorge Wilson VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1549c166b69dSPaul Dagnelie #ifdef DEBUG 1550c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1551c166b69dSPaul Dagnelie rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 155214843421SMatthew Ahrens ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 155314843421SMatthew Ahrens os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1554c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1555c166b69dSPaul Dagnelie rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1556c166b69dSPaul Dagnelie #endif 1557fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1558fa9e4066Sahrens 1559fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1560fa9e4066Sahrens 15610a586ceaSMark Shellenbaum if (db->db_blkid != DMU_BONUS_BLKID) { 156261e255ceSMatthew Ahrens dmu_objset_willuse_space(os, db->db.db_size, tx); 15631934e92fSmaybee } 15641934e92fSmaybee 1565ea8dc4b6Seschrock /* 1566ea8dc4b6Seschrock * If this buffer is dirty in an old transaction group we need 1567ea8dc4b6Seschrock * to make a copy of it so that the changes we make in this 1568ea8dc4b6Seschrock * transaction group won't leak out when we sync the older txg. 1569ea8dc4b6Seschrock */ 1570c717a561Smaybee dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1571c717a561Smaybee if (db->db_level == 0) { 1572c717a561Smaybee void *data_old = db->db_buf; 1573c717a561Smaybee 157482c9918fSTim Haley if (db->db_state != DB_NOFILL) { 15750a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 1576c717a561Smaybee dbuf_fix_old_data(db, tx->tx_txg); 1577c717a561Smaybee data_old = db->db.db_data; 1578c717a561Smaybee } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1579fa9e4066Sahrens /* 158082c9918fSTim Haley * Release the data buffer from the cache so 158182c9918fSTim Haley * that we can modify it without impacting 158282c9918fSTim Haley * possible other users of this cached data 158382c9918fSTim Haley * block. Note that indirect blocks and 158482c9918fSTim Haley * private objects are not released until the 158582c9918fSTim Haley * syncing state (since they are only modified 158682c9918fSTim Haley * then). 1587fa9e4066Sahrens */ 1588fa9e4066Sahrens arc_release(db->db_buf, db); 1589fa9e4066Sahrens dbuf_fix_old_data(db, tx->tx_txg); 1590c717a561Smaybee data_old = db->db_buf; 1591fa9e4066Sahrens } 1592c717a561Smaybee ASSERT(data_old != NULL); 159382c9918fSTim Haley } 1594c717a561Smaybee dr->dt.dl.dr_data = data_old; 1595c717a561Smaybee } else { 1596c717a561Smaybee mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1597c717a561Smaybee list_create(&dr->dt.di.dr_children, 1598c717a561Smaybee sizeof (dbuf_dirty_record_t), 1599c717a561Smaybee offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1600fa9e4066Sahrens } 160169962b56SMatthew Ahrens if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 160269962b56SMatthew Ahrens dr->dr_accounted = db->db.db_size; 1603c717a561Smaybee dr->dr_dbuf = db; 1604c717a561Smaybee dr->dr_txg = tx->tx_txg; 1605c717a561Smaybee dr->dr_next = *drp; 1606c717a561Smaybee *drp = dr; 1607fa9e4066Sahrens 1608fa9e4066Sahrens /* 1609fa9e4066Sahrens * We could have been freed_in_flight between the dbuf_noread 1610fa9e4066Sahrens * and dbuf_dirty. We win, as though the dbuf_noread() had 1611fa9e4066Sahrens * happened after the free. 1612fa9e4066Sahrens */ 16130a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 16140a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) { 1615c717a561Smaybee mutex_enter(&dn->dn_mtx); 1616bf16b11eSMatthew Ahrens if (dn->dn_free_ranges[txgoff] != NULL) { 1617bf16b11eSMatthew Ahrens range_tree_clear(dn->dn_free_ranges[txgoff], 1618bf16b11eSMatthew Ahrens db->db_blkid, 1); 1619bf16b11eSMatthew Ahrens } 1620fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1621c717a561Smaybee db->db_freed_in_flight = FALSE; 1622c717a561Smaybee } 1623fa9e4066Sahrens 1624fa9e4066Sahrens /* 1625fa9e4066Sahrens * This buffer is now part of this txg 1626fa9e4066Sahrens */ 1627fa9e4066Sahrens dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1628fa9e4066Sahrens db->db_dirtycnt += 1; 1629fa9e4066Sahrens ASSERT3U(db->db_dirtycnt, <=, 3); 1630fa9e4066Sahrens 1631fa9e4066Sahrens mutex_exit(&db->db_mtx); 1632fa9e4066Sahrens 16330a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 16340a586ceaSMark Shellenbaum db->db_blkid == DMU_SPILL_BLKID) { 1635c717a561Smaybee mutex_enter(&dn->dn_mtx); 1636c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1637c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1638c717a561Smaybee mutex_exit(&dn->dn_mtx); 1639fa9e4066Sahrens dnode_setdirty(dn, tx); 1640744947dcSTom Erickson DB_DNODE_EXIT(db); 1641c717a561Smaybee return (dr); 164292654925SMatthew Ahrens } 164392654925SMatthew Ahrens 164492654925SMatthew Ahrens /* 164592654925SMatthew Ahrens * The dn_struct_rwlock prevents db_blkptr from changing 164692654925SMatthew Ahrens * due to a write from syncing context completing 164792654925SMatthew Ahrens * while we are running, so we want to acquire it before 164892654925SMatthew Ahrens * looking at db_blkptr. 164992654925SMatthew Ahrens */ 165092654925SMatthew Ahrens if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 165192654925SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 165292654925SMatthew Ahrens drop_struct_lock = TRUE; 165392654925SMatthew Ahrens } 165492654925SMatthew Ahrens 1655d3469faaSMark Maybee /* 1656dcb6872cSMatthew Ahrens * We need to hold the dn_struct_rwlock to make this assertion, 1657dcb6872cSMatthew Ahrens * because it protects dn_phys / dn_next_nlevels from changing. 1658dcb6872cSMatthew Ahrens */ 1659dcb6872cSMatthew Ahrens ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1660dcb6872cSMatthew Ahrens dn->dn_phys->dn_nlevels > db->db_level || 1661dcb6872cSMatthew Ahrens dn->dn_next_nlevels[txgoff] > db->db_level || 1662dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1663dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1664dcb6872cSMatthew Ahrens 1665dcb6872cSMatthew Ahrens /* 166661e255ceSMatthew Ahrens * If we are overwriting a dedup BP, then unless it is snapshotted, 166761e255ceSMatthew Ahrens * when we get to syncing context we will need to decrement its 166861e255ceSMatthew Ahrens * refcount in the DDT. Prefetch the relevant DDT block so that 166961e255ceSMatthew Ahrens * syncing context won't have to wait for the i/o. 1670d3469faaSMark Maybee */ 167161e255ceSMatthew Ahrens ddt_prefetch(os->os_spa, db->db_blkptr); 1672fa9e4066Sahrens 16738346f03fSJonathan W Adams if (db->db_level == 0) { 16748346f03fSJonathan W Adams dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 16758346f03fSJonathan W Adams ASSERT(dn->dn_maxblkid >= db->db_blkid); 16768346f03fSJonathan W Adams } 16778346f03fSJonathan W Adams 167844eda4d7Smaybee if (db->db_level+1 < dn->dn_nlevels) { 1679c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 1680c717a561Smaybee dbuf_dirty_record_t *di; 1681c717a561Smaybee int parent_held = FALSE; 1682c717a561Smaybee 1683c717a561Smaybee if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1684fa9e4066Sahrens int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1685c717a561Smaybee 1686fa9e4066Sahrens parent = dbuf_hold_level(dn, db->db_level+1, 1687fa9e4066Sahrens db->db_blkid >> epbs, FTAG); 168801025c89SJohn Harres ASSERT(parent != NULL); 1689c717a561Smaybee parent_held = TRUE; 1690c717a561Smaybee } 1691fa9e4066Sahrens if (drop_struct_lock) 1692fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1693c717a561Smaybee ASSERT3U(db->db_level+1, ==, parent->db_level); 1694c717a561Smaybee di = dbuf_dirty(parent, tx); 1695c717a561Smaybee if (parent_held) 1696ea8dc4b6Seschrock dbuf_rele(parent, FTAG); 1697c717a561Smaybee 1698c717a561Smaybee mutex_enter(&db->db_mtx); 169969962b56SMatthew Ahrens /* 170069962b56SMatthew Ahrens * Since we've dropped the mutex, it's possible that 170169962b56SMatthew Ahrens * dbuf_undirty() might have changed this out from under us. 170269962b56SMatthew Ahrens */ 1703c717a561Smaybee if (db->db_last_dirty == dr || 1704c717a561Smaybee dn->dn_object == DMU_META_DNODE_OBJECT) { 1705c717a561Smaybee mutex_enter(&di->dt.di.dr_mtx); 1706c717a561Smaybee ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1707c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1708c717a561Smaybee list_insert_tail(&di->dt.di.dr_children, dr); 1709c717a561Smaybee mutex_exit(&di->dt.di.dr_mtx); 1710c717a561Smaybee dr->dr_parent = di; 1711c717a561Smaybee } 1712c717a561Smaybee mutex_exit(&db->db_mtx); 1713fa9e4066Sahrens } else { 1714c717a561Smaybee ASSERT(db->db_level+1 == dn->dn_nlevels); 1715c717a561Smaybee ASSERT(db->db_blkid < dn->dn_nblkptr); 1716744947dcSTom Erickson ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1717c717a561Smaybee mutex_enter(&dn->dn_mtx); 1718c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1719c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1720c717a561Smaybee mutex_exit(&dn->dn_mtx); 1721fa9e4066Sahrens if (drop_struct_lock) 1722fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1723fa9e4066Sahrens } 1724fa9e4066Sahrens 1725fa9e4066Sahrens dnode_setdirty(dn, tx); 1726744947dcSTom Erickson DB_DNODE_EXIT(db); 1727c717a561Smaybee return (dr); 1728fa9e4066Sahrens } 1729fa9e4066Sahrens 17303b2aab18SMatthew Ahrens /* 17313e30c24aSWill Andrews * Undirty a buffer in the transaction group referenced by the given 17323e30c24aSWill Andrews * transaction. Return whether this evicted the dbuf. 17333b2aab18SMatthew Ahrens */ 17343b2aab18SMatthew Ahrens static boolean_t 1735fa9e4066Sahrens dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1736fa9e4066Sahrens { 1737744947dcSTom Erickson dnode_t *dn; 1738c717a561Smaybee uint64_t txg = tx->tx_txg; 173917f17c2dSbonwick dbuf_dirty_record_t *dr, **drp; 1740fa9e4066Sahrens 1741c717a561Smaybee ASSERT(txg != 0); 174246e1baa6SMatthew Ahrens 174346e1baa6SMatthew Ahrens /* 174446e1baa6SMatthew Ahrens * Due to our use of dn_nlevels below, this can only be called 174546e1baa6SMatthew Ahrens * in open context, unless we are operating on the MOS. 174646e1baa6SMatthew Ahrens * From syncing context, dn_nlevels may be different from the 174746e1baa6SMatthew Ahrens * dn_nlevels used when dbuf was dirtied. 174846e1baa6SMatthew Ahrens */ 174946e1baa6SMatthew Ahrens ASSERT(db->db_objset == 175046e1baa6SMatthew Ahrens dmu_objset_pool(db->db_objset)->dp_meta_objset || 175146e1baa6SMatthew Ahrens txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 17520a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 17533b2aab18SMatthew Ahrens ASSERT0(db->db_level); 17543b2aab18SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 1755fa9e4066Sahrens 1756fa9e4066Sahrens /* 1757fa9e4066Sahrens * If this buffer is not dirty, we're done. 1758fa9e4066Sahrens */ 175917f17c2dSbonwick for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1760c717a561Smaybee if (dr->dr_txg <= txg) 1761c717a561Smaybee break; 17623b2aab18SMatthew Ahrens if (dr == NULL || dr->dr_txg < txg) 17633b2aab18SMatthew Ahrens return (B_FALSE); 1764c717a561Smaybee ASSERT(dr->dr_txg == txg); 1765b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 1766fa9e4066Sahrens 1767744947dcSTom Erickson DB_DNODE_ENTER(db); 1768744947dcSTom Erickson dn = DB_DNODE(db); 1769744947dcSTom Erickson 1770fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1771fa9e4066Sahrens 1772fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1773fa9e4066Sahrens 177446e1baa6SMatthew Ahrens dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 177546e1baa6SMatthew Ahrens dr->dr_accounted, txg); 1776fa9e4066Sahrens 177717f17c2dSbonwick *drp = dr->dr_next; 1778c717a561Smaybee 17793f2366c2SGordon Ross /* 17803f2366c2SGordon Ross * Note that there are three places in dbuf_dirty() 17813f2366c2SGordon Ross * where this dirty record may be put on a list. 17823f2366c2SGordon Ross * Make sure to do a list_remove corresponding to 17833f2366c2SGordon Ross * every one of those list_insert calls. 17843f2366c2SGordon Ross */ 1785c717a561Smaybee if (dr->dr_parent) { 1786c717a561Smaybee mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1787c717a561Smaybee list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1788c717a561Smaybee mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 17893f2366c2SGordon Ross } else if (db->db_blkid == DMU_SPILL_BLKID || 17903f2366c2SGordon Ross db->db_level + 1 == dn->dn_nlevels) { 1791cdb0ab79Smaybee ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1792fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1793c717a561Smaybee list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1794fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1795c717a561Smaybee } 1796744947dcSTom Erickson DB_DNODE_EXIT(db); 1797c717a561Smaybee 179882c9918fSTim Haley if (db->db_state != DB_NOFILL) { 1799c717a561Smaybee dbuf_unoverride(dr); 1800c717a561Smaybee 1801c717a561Smaybee ASSERT(db->db_buf != NULL); 1802c717a561Smaybee ASSERT(dr->dt.dl.dr_data != NULL); 1803c717a561Smaybee if (dr->dt.dl.dr_data != db->db_buf) 1804dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 1805c717a561Smaybee } 1806d2b3cbbdSJorgen Lundman 1807c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1808fa9e4066Sahrens 1809fa9e4066Sahrens ASSERT(db->db_dirtycnt > 0); 1810fa9e4066Sahrens db->db_dirtycnt -= 1; 1811fa9e4066Sahrens 1812c717a561Smaybee if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1813dcbf3bd6SGeorge Wilson ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1814dcbf3bd6SGeorge Wilson dbuf_destroy(db); 18153b2aab18SMatthew Ahrens return (B_TRUE); 1816fa9e4066Sahrens } 1817fa9e4066Sahrens 18183b2aab18SMatthew Ahrens return (B_FALSE); 1819fa9e4066Sahrens } 1820fa9e4066Sahrens 1821fa9e4066Sahrens void 182243466aaeSMax Grossman dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1823fa9e4066Sahrens { 182443466aaeSMax Grossman dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 18251ab7f2deSmaybee int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1826fa9e4066Sahrens 1827fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1828fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1829fa9e4066Sahrens 18300f2e7d03SMatthew Ahrens /* 18310f2e7d03SMatthew Ahrens * Quick check for dirtyness. For already dirty blocks, this 18320f2e7d03SMatthew Ahrens * reduces runtime of this function by >90%, and overall performance 18330f2e7d03SMatthew Ahrens * by 50% for some workloads (e.g. file deletion with indirect blocks 18340f2e7d03SMatthew Ahrens * cached). 18350f2e7d03SMatthew Ahrens */ 18360f2e7d03SMatthew Ahrens mutex_enter(&db->db_mtx); 18370f2e7d03SMatthew Ahrens dbuf_dirty_record_t *dr; 18380f2e7d03SMatthew Ahrens for (dr = db->db_last_dirty; 18390f2e7d03SMatthew Ahrens dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 18400f2e7d03SMatthew Ahrens /* 18410f2e7d03SMatthew Ahrens * It's possible that it is already dirty but not cached, 18420f2e7d03SMatthew Ahrens * because there are some calls to dbuf_dirty() that don't 18430f2e7d03SMatthew Ahrens * go through dmu_buf_will_dirty(). 18440f2e7d03SMatthew Ahrens */ 18450f2e7d03SMatthew Ahrens if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 18460f2e7d03SMatthew Ahrens /* This dbuf is already dirty and cached. */ 18470f2e7d03SMatthew Ahrens dbuf_redirty(dr); 18480f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 18490f2e7d03SMatthew Ahrens return; 18500f2e7d03SMatthew Ahrens } 18510f2e7d03SMatthew Ahrens } 18520f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 18530f2e7d03SMatthew Ahrens 1854744947dcSTom Erickson DB_DNODE_ENTER(db); 1855744947dcSTom Erickson if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1856fa9e4066Sahrens rf |= DB_RF_HAVESTRUCT; 1857744947dcSTom Erickson DB_DNODE_EXIT(db); 1858ea8dc4b6Seschrock (void) dbuf_read(db, NULL, rf); 1859c717a561Smaybee (void) dbuf_dirty(db, tx); 1860fa9e4066Sahrens } 1861fa9e4066Sahrens 1862fa9e4066Sahrens void 186382c9918fSTim Haley dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 186482c9918fSTim Haley { 186582c9918fSTim Haley dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 186682c9918fSTim Haley 186782c9918fSTim Haley db->db_state = DB_NOFILL; 186882c9918fSTim Haley 186982c9918fSTim Haley dmu_buf_will_fill(db_fake, tx); 187082c9918fSTim Haley } 187182c9918fSTim Haley 187282c9918fSTim Haley void 1873ea8dc4b6Seschrock dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1874fa9e4066Sahrens { 1875ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1876ea8dc4b6Seschrock 18770a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1878fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1879fa9e4066Sahrens ASSERT(db->db_level == 0); 1880fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1881fa9e4066Sahrens 1882ea8dc4b6Seschrock ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1883fa9e4066Sahrens dmu_tx_private_ok(tx)); 1884fa9e4066Sahrens 1885fa9e4066Sahrens dbuf_noread(db); 1886c717a561Smaybee (void) dbuf_dirty(db, tx); 1887fa9e4066Sahrens } 1888fa9e4066Sahrens 1889fa9e4066Sahrens #pragma weak dmu_buf_fill_done = dbuf_fill_done 1890fa9e4066Sahrens /* ARGSUSED */ 1891fa9e4066Sahrens void 1892fa9e4066Sahrens dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1893fa9e4066Sahrens { 1894fa9e4066Sahrens mutex_enter(&db->db_mtx); 18959c9dc39aSek110237 DBUF_VERIFY(db); 1896fa9e4066Sahrens 1897fa9e4066Sahrens if (db->db_state == DB_FILL) { 1898c717a561Smaybee if (db->db_level == 0 && db->db_freed_in_flight) { 18990a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1900fa9e4066Sahrens /* we were freed while filling */ 1901fa9e4066Sahrens /* XXX dbuf_undirty? */ 1902fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 1903c717a561Smaybee db->db_freed_in_flight = FALSE; 1904fa9e4066Sahrens } 1905fa9e4066Sahrens db->db_state = DB_CACHED; 1906fa9e4066Sahrens cv_broadcast(&db->db_changed); 1907fa9e4066Sahrens } 1908fa9e4066Sahrens mutex_exit(&db->db_mtx); 1909fa9e4066Sahrens } 1910fa9e4066Sahrens 19115d7b4d43SMatthew Ahrens void 19125d7b4d43SMatthew Ahrens dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 19135d7b4d43SMatthew Ahrens bp_embedded_type_t etype, enum zio_compress comp, 19145d7b4d43SMatthew Ahrens int uncompressed_size, int compressed_size, int byteorder, 19155d7b4d43SMatthew Ahrens dmu_tx_t *tx) 19165d7b4d43SMatthew Ahrens { 19175d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 19185d7b4d43SMatthew Ahrens struct dirty_leaf *dl; 19195d7b4d43SMatthew Ahrens dmu_object_type_t type; 19205d7b4d43SMatthew Ahrens 1921ca0cc391SMatthew Ahrens if (etype == BP_EMBEDDED_TYPE_DATA) { 1922ca0cc391SMatthew Ahrens ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 1923ca0cc391SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 1924ca0cc391SMatthew Ahrens } 1925ca0cc391SMatthew Ahrens 19265d7b4d43SMatthew Ahrens DB_DNODE_ENTER(db); 19275d7b4d43SMatthew Ahrens type = DB_DNODE(db)->dn_type; 19285d7b4d43SMatthew Ahrens DB_DNODE_EXIT(db); 19295d7b4d43SMatthew Ahrens 19305d7b4d43SMatthew Ahrens ASSERT0(db->db_level); 19315d7b4d43SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 19325d7b4d43SMatthew Ahrens 19335d7b4d43SMatthew Ahrens dmu_buf_will_not_fill(dbuf, tx); 19345d7b4d43SMatthew Ahrens 19355d7b4d43SMatthew Ahrens ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 19365d7b4d43SMatthew Ahrens dl = &db->db_last_dirty->dt.dl; 19375d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(&dl->dr_overridden_by, 19385d7b4d43SMatthew Ahrens data, comp, uncompressed_size, compressed_size); 19395d7b4d43SMatthew Ahrens BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 19405d7b4d43SMatthew Ahrens BP_SET_TYPE(&dl->dr_overridden_by, type); 19415d7b4d43SMatthew Ahrens BP_SET_LEVEL(&dl->dr_overridden_by, 0); 19425d7b4d43SMatthew Ahrens BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 19435d7b4d43SMatthew Ahrens 19445d7b4d43SMatthew Ahrens dl->dr_override_state = DR_OVERRIDDEN; 19455d7b4d43SMatthew Ahrens dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 19465d7b4d43SMatthew Ahrens } 19475d7b4d43SMatthew Ahrens 1948ea8dc4b6Seschrock /* 19492fdbea25SAleksandr Guzovskiy * Directly assign a provided arc buf to a given dbuf if it's not referenced 19502fdbea25SAleksandr Guzovskiy * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 19512fdbea25SAleksandr Guzovskiy */ 19522fdbea25SAleksandr Guzovskiy void 19532fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 19542fdbea25SAleksandr Guzovskiy { 19552fdbea25SAleksandr Guzovskiy ASSERT(!refcount_is_zero(&db->db_holds)); 19560a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 19572fdbea25SAleksandr Guzovskiy ASSERT(db->db_level == 0); 19585602294fSDan Kimmel ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 19592fdbea25SAleksandr Guzovskiy ASSERT(buf != NULL); 19605602294fSDan Kimmel ASSERT(arc_buf_lsize(buf) == db->db.db_size); 19612fdbea25SAleksandr Guzovskiy ASSERT(tx->tx_txg != 0); 19622fdbea25SAleksandr Guzovskiy 19632fdbea25SAleksandr Guzovskiy arc_return_buf(buf, db); 19642fdbea25SAleksandr Guzovskiy ASSERT(arc_released(buf)); 19652fdbea25SAleksandr Guzovskiy 19662fdbea25SAleksandr Guzovskiy mutex_enter(&db->db_mtx); 19672fdbea25SAleksandr Guzovskiy 19682fdbea25SAleksandr Guzovskiy while (db->db_state == DB_READ || db->db_state == DB_FILL) 19692fdbea25SAleksandr Guzovskiy cv_wait(&db->db_changed, &db->db_mtx); 19702fdbea25SAleksandr Guzovskiy 19712fdbea25SAleksandr Guzovskiy ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 19722fdbea25SAleksandr Guzovskiy 19732fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED && 19742fdbea25SAleksandr Guzovskiy refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 19752fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 19762fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 19772fdbea25SAleksandr Guzovskiy bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1978dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, db); 1979c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_copied(); 19802fdbea25SAleksandr Guzovskiy return; 19812fdbea25SAleksandr Guzovskiy } 19822fdbea25SAleksandr Guzovskiy 1983c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_nocopy(); 19842fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED) { 19852fdbea25SAleksandr Guzovskiy dbuf_dirty_record_t *dr = db->db_last_dirty; 19862fdbea25SAleksandr Guzovskiy 19872fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf != NULL); 19882fdbea25SAleksandr Guzovskiy if (dr != NULL && dr->dr_txg == tx->tx_txg) { 19892fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_data == db->db_buf); 19902fdbea25SAleksandr Guzovskiy if (!arc_released(db->db_buf)) { 19912fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_override_state == 19922fdbea25SAleksandr Guzovskiy DR_OVERRIDDEN); 19932fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 19942fdbea25SAleksandr Guzovskiy } 19952fdbea25SAleksandr Guzovskiy dr->dt.dl.dr_data = buf; 1996dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 19972fdbea25SAleksandr Guzovskiy } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 19982fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 1999dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 20002fdbea25SAleksandr Guzovskiy } 20012fdbea25SAleksandr Guzovskiy db->db_buf = NULL; 20022fdbea25SAleksandr Guzovskiy } 20032fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf == NULL); 20042fdbea25SAleksandr Guzovskiy dbuf_set_data(db, buf); 20052fdbea25SAleksandr Guzovskiy db->db_state = DB_FILL; 20062fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 20072fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 200843466aaeSMax Grossman dmu_buf_fill_done(&db->db, tx); 20092fdbea25SAleksandr Guzovskiy } 20102fdbea25SAleksandr Guzovskiy 2011ea8dc4b6Seschrock void 2012dcbf3bd6SGeorge Wilson dbuf_destroy(dmu_buf_impl_t *db) 2013fa9e4066Sahrens { 2014744947dcSTom Erickson dnode_t *dn; 2015ea8dc4b6Seschrock dmu_buf_impl_t *parent = db->db_parent; 2016744947dcSTom Erickson dmu_buf_impl_t *dndb; 2017fa9e4066Sahrens 2018fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 2019fa9e4066Sahrens ASSERT(refcount_is_zero(&db->db_holds)); 2020fa9e4066Sahrens 2021dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) { 2022dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 2023dcbf3bd6SGeorge Wilson db->db_buf = NULL; 2024dcbf3bd6SGeorge Wilson } 2025ea8dc4b6Seschrock 20260a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 2027dcbf3bd6SGeorge Wilson ASSERT(db->db.db_data != NULL); 2028ea8dc4b6Seschrock zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 20295a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2030fa9e4066Sahrens db->db_state = DB_UNCACHED; 2031fa9e4066Sahrens } 2032fa9e4066Sahrens 2033dcbf3bd6SGeorge Wilson dbuf_clear_data(db); 2034dcbf3bd6SGeorge Wilson 2035dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 203694c2d0ebSMatthew Ahrens multilist_remove(dbuf_cache, db); 2037dcbf3bd6SGeorge Wilson (void) refcount_remove_many(&dbuf_cache_size, 2038dcbf3bd6SGeorge Wilson db->db.db_size, db); 2039dcbf3bd6SGeorge Wilson } 2040dcbf3bd6SGeorge Wilson 204182c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2042fa9e4066Sahrens ASSERT(db->db_data_pending == NULL); 2043fa9e4066Sahrens 2044ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2045ea8dc4b6Seschrock db->db_blkptr = NULL; 2046ea8dc4b6Seschrock 2047dcbf3bd6SGeorge Wilson /* 2048dcbf3bd6SGeorge Wilson * Now that db_state is DB_EVICTING, nobody else can find this via 2049dcbf3bd6SGeorge Wilson * the hash table. We can now drop db_mtx, which allows us to 2050dcbf3bd6SGeorge Wilson * acquire the dn_dbufs_mtx. 2051dcbf3bd6SGeorge Wilson */ 2052dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 2053dcbf3bd6SGeorge Wilson 2054744947dcSTom Erickson DB_DNODE_ENTER(db); 2055744947dcSTom Erickson dn = DB_DNODE(db); 2056744947dcSTom Erickson dndb = dn->dn_dbuf; 2057dcbf3bd6SGeorge Wilson if (db->db_blkid != DMU_BONUS_BLKID) { 2058dcbf3bd6SGeorge Wilson boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2059dcbf3bd6SGeorge Wilson if (needlock) 2060dcbf3bd6SGeorge Wilson mutex_enter(&dn->dn_dbufs_mtx); 20610f6d88adSAlex Reece avl_remove(&dn->dn_dbufs, db); 2062640c1670SJosef 'Jeff' Sipek atomic_dec_32(&dn->dn_dbufs_count); 2063744947dcSTom Erickson membar_producer(); 2064744947dcSTom Erickson DB_DNODE_EXIT(db); 2065dcbf3bd6SGeorge Wilson if (needlock) 2066dcbf3bd6SGeorge Wilson mutex_exit(&dn->dn_dbufs_mtx); 2067744947dcSTom Erickson /* 2068744947dcSTom Erickson * Decrementing the dbuf count means that the hold corresponding 2069744947dcSTom Erickson * to the removed dbuf is no longer discounted in dnode_move(), 2070744947dcSTom Erickson * so the dnode cannot be moved until after we release the hold. 2071744947dcSTom Erickson * The membar_producer() ensures visibility of the decremented 2072744947dcSTom Erickson * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2073744947dcSTom Erickson * release any lock. 2074744947dcSTom Erickson */ 2075ea8dc4b6Seschrock dnode_rele(dn, db); 2076744947dcSTom Erickson db->db_dnode_handle = NULL; 2077dcbf3bd6SGeorge Wilson 2078dcbf3bd6SGeorge Wilson dbuf_hash_remove(db); 2079744947dcSTom Erickson } else { 2080744947dcSTom Erickson DB_DNODE_EXIT(db); 2081ea8dc4b6Seschrock } 2082ea8dc4b6Seschrock 2083dcbf3bd6SGeorge Wilson ASSERT(refcount_is_zero(&db->db_holds)); 2084ea8dc4b6Seschrock 2085dcbf3bd6SGeorge Wilson db->db_parent = NULL; 2086dcbf3bd6SGeorge Wilson 2087dcbf3bd6SGeorge Wilson ASSERT(db->db_buf == NULL); 2088dcbf3bd6SGeorge Wilson ASSERT(db->db.db_data == NULL); 2089dcbf3bd6SGeorge Wilson ASSERT(db->db_hash_next == NULL); 2090dcbf3bd6SGeorge Wilson ASSERT(db->db_blkptr == NULL); 2091dcbf3bd6SGeorge Wilson ASSERT(db->db_data_pending == NULL); 2092dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 2093dcbf3bd6SGeorge Wilson 2094dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2095dcbf3bd6SGeorge Wilson arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2096fa9e4066Sahrens 2097fa9e4066Sahrens /* 2098744947dcSTom Erickson * If this dbuf is referenced from an indirect dbuf, 2099fa9e4066Sahrens * decrement the ref count on the indirect dbuf. 2100fa9e4066Sahrens */ 2101c543ec06Sahrens if (parent && parent != dndb) 2102ea8dc4b6Seschrock dbuf_rele(parent, db); 2103fa9e4066Sahrens } 2104fa9e4066Sahrens 2105a2cdcdd2SPaul Dagnelie /* 2106a2cdcdd2SPaul Dagnelie * Note: While bpp will always be updated if the function returns success, 2107a2cdcdd2SPaul Dagnelie * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2108a2cdcdd2SPaul Dagnelie * this happens when the dnode is the meta-dnode, or a userused or groupused 2109a2cdcdd2SPaul Dagnelie * object. 2110a2cdcdd2SPaul Dagnelie */ 2111fa9e4066Sahrens static int 2112fa9e4066Sahrens dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2113fa9e4066Sahrens dmu_buf_impl_t **parentp, blkptr_t **bpp) 2114fa9e4066Sahrens { 21150b69c2f0Sahrens *parentp = NULL; 21160b69c2f0Sahrens *bpp = NULL; 21170b69c2f0Sahrens 21180a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 21190a586ceaSMark Shellenbaum 21200a586ceaSMark Shellenbaum if (blkid == DMU_SPILL_BLKID) { 21210a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 212206e0070dSMark Shellenbaum if (dn->dn_have_spill && 212306e0070dSMark Shellenbaum (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 21240a586ceaSMark Shellenbaum *bpp = &dn->dn_phys->dn_spill; 21250a586ceaSMark Shellenbaum else 21260a586ceaSMark Shellenbaum *bpp = NULL; 21270a586ceaSMark Shellenbaum dbuf_add_ref(dn->dn_dbuf, NULL); 21280a586ceaSMark Shellenbaum *parentp = dn->dn_dbuf; 21290a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 21300a586ceaSMark Shellenbaum return (0); 21310a586ceaSMark Shellenbaum } 2132ea8dc4b6Seschrock 21337de35a3eSPaul Dagnelie int nlevels = 21347de35a3eSPaul Dagnelie (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 21357de35a3eSPaul Dagnelie int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2136fa9e4066Sahrens 2137fa9e4066Sahrens ASSERT3U(level * epbs, <, 64); 2138fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 21397de35a3eSPaul Dagnelie /* 21407de35a3eSPaul Dagnelie * This assertion shouldn't trip as long as the max indirect block size 21417de35a3eSPaul Dagnelie * is less than 1M. The reason for this is that up to that point, 21427de35a3eSPaul Dagnelie * the number of levels required to address an entire object with blocks 21437de35a3eSPaul Dagnelie * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 21447de35a3eSPaul Dagnelie * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 21457de35a3eSPaul Dagnelie * (i.e. we can address the entire object), objects will all use at most 21467de35a3eSPaul Dagnelie * N-1 levels and the assertion won't overflow. However, once epbs is 21477de35a3eSPaul Dagnelie * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 21487de35a3eSPaul Dagnelie * enough to address an entire object, so objects will have 5 levels, 21497de35a3eSPaul Dagnelie * but then this assertion will overflow. 21507de35a3eSPaul Dagnelie * 21517de35a3eSPaul Dagnelie * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 21527de35a3eSPaul Dagnelie * need to redo this logic to handle overflows. 21537de35a3eSPaul Dagnelie */ 21547de35a3eSPaul Dagnelie ASSERT(level >= nlevels || 21557de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs) + 21567de35a3eSPaul Dagnelie highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2157ea8dc4b6Seschrock if (level >= nlevels || 21587de35a3eSPaul Dagnelie blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 21597de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs)) || 21607de35a3eSPaul Dagnelie (fail_sparse && 21617de35a3eSPaul Dagnelie blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2162fa9e4066Sahrens /* the buffer has no parent yet */ 2163be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2164fa9e4066Sahrens } else if (level < nlevels-1) { 2165fa9e4066Sahrens /* this block is referenced from an indirect block */ 2166fa9e4066Sahrens int err = dbuf_hold_impl(dn, level+1, 2167a2cdcdd2SPaul Dagnelie blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2168fa9e4066Sahrens if (err) 2169fa9e4066Sahrens return (err); 2170ea8dc4b6Seschrock err = dbuf_read(*parentp, NULL, 2171ea8dc4b6Seschrock (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2172c543ec06Sahrens if (err) { 2173c543ec06Sahrens dbuf_rele(*parentp, NULL); 2174c543ec06Sahrens *parentp = NULL; 2175c543ec06Sahrens return (err); 2176c543ec06Sahrens } 2177fa9e4066Sahrens *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2178fa9e4066Sahrens (blkid & ((1ULL << epbs) - 1)); 21797de35a3eSPaul Dagnelie if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 21807de35a3eSPaul Dagnelie ASSERT(BP_IS_HOLE(*bpp)); 2181c543ec06Sahrens return (0); 2182fa9e4066Sahrens } else { 2183fa9e4066Sahrens /* the block is referenced from the dnode */ 2184fa9e4066Sahrens ASSERT3U(level, ==, nlevels-1); 2185fa9e4066Sahrens ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2186fa9e4066Sahrens blkid < dn->dn_phys->dn_nblkptr); 2187c543ec06Sahrens if (dn->dn_dbuf) { 2188c543ec06Sahrens dbuf_add_ref(dn->dn_dbuf, NULL); 2189fa9e4066Sahrens *parentp = dn->dn_dbuf; 2190c543ec06Sahrens } 2191fa9e4066Sahrens *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2192fa9e4066Sahrens return (0); 2193fa9e4066Sahrens } 2194fa9e4066Sahrens } 2195fa9e4066Sahrens 2196fa9e4066Sahrens static dmu_buf_impl_t * 2197fa9e4066Sahrens dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2198fa9e4066Sahrens dmu_buf_impl_t *parent, blkptr_t *blkptr) 2199fa9e4066Sahrens { 2200503ad85cSMatthew Ahrens objset_t *os = dn->dn_objset; 2201fa9e4066Sahrens dmu_buf_impl_t *db, *odb; 2202fa9e4066Sahrens 2203fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2204fa9e4066Sahrens ASSERT(dn->dn_type != DMU_OT_NONE); 2205fa9e4066Sahrens 2206dcbf3bd6SGeorge Wilson db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2207fa9e4066Sahrens 2208fa9e4066Sahrens db->db_objset = os; 2209fa9e4066Sahrens db->db.db_object = dn->dn_object; 2210fa9e4066Sahrens db->db_level = level; 2211fa9e4066Sahrens db->db_blkid = blkid; 2212c717a561Smaybee db->db_last_dirty = NULL; 2213ea8dc4b6Seschrock db->db_dirtycnt = 0; 2214744947dcSTom Erickson db->db_dnode_handle = dn->dn_handle; 2215ea8dc4b6Seschrock db->db_parent = parent; 2216ea8dc4b6Seschrock db->db_blkptr = blkptr; 2217fa9e4066Sahrens 2218bc9014e6SJustin Gibbs db->db_user = NULL; 2219d2058105SJustin T. Gibbs db->db_user_immediate_evict = FALSE; 2220d2058105SJustin T. Gibbs db->db_freed_in_flight = FALSE; 2221d2058105SJustin T. Gibbs db->db_pending_evict = FALSE; 2222ea8dc4b6Seschrock 22230a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID) { 2224ea8dc4b6Seschrock ASSERT3P(parent, ==, dn->dn_dbuf); 22251934e92fSmaybee db->db.db_size = DN_MAX_BONUSLEN - 22261934e92fSmaybee (dn->dn_nblkptr-1) * sizeof (blkptr_t); 22271934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 22280a586ceaSMark Shellenbaum db->db.db_offset = DMU_BONUS_BLKID; 2229ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2230ea8dc4b6Seschrock /* the bonus dbuf is not placed in the hash table */ 22315a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2232ea8dc4b6Seschrock return (db); 22330a586ceaSMark Shellenbaum } else if (blkid == DMU_SPILL_BLKID) { 22340a586ceaSMark Shellenbaum db->db.db_size = (blkptr != NULL) ? 22350a586ceaSMark Shellenbaum BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 22360a586ceaSMark Shellenbaum db->db.db_offset = 0; 2237fa9e4066Sahrens } else { 2238fa9e4066Sahrens int blocksize = 2239fa9e4066Sahrens db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2240fa9e4066Sahrens db->db.db_size = blocksize; 2241fa9e4066Sahrens db->db.db_offset = db->db_blkid * blocksize; 2242fa9e4066Sahrens } 2243fa9e4066Sahrens 2244fa9e4066Sahrens /* 2245fa9e4066Sahrens * Hold the dn_dbufs_mtx while we get the new dbuf 2246fa9e4066Sahrens * in the hash table *and* added to the dbufs list. 2247fa9e4066Sahrens * This prevents a possible deadlock with someone 2248fa9e4066Sahrens * trying to look up this dbuf before its added to the 2249fa9e4066Sahrens * dn_dbufs list. 2250fa9e4066Sahrens */ 2251fa9e4066Sahrens mutex_enter(&dn->dn_dbufs_mtx); 2252ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2253fa9e4066Sahrens if ((odb = dbuf_hash_insert(db)) != NULL) { 2254fa9e4066Sahrens /* someone else inserted it first */ 2255dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2256fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 2257fa9e4066Sahrens return (odb); 2258fa9e4066Sahrens } 22590f6d88adSAlex Reece avl_add(&dn->dn_dbufs, db); 2260653af1b8SStephen Blinick 2261ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2262fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 22635a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2264fa9e4066Sahrens 2265fa9e4066Sahrens if (parent && parent != dn->dn_dbuf) 2266fa9e4066Sahrens dbuf_add_ref(parent, db); 2267fa9e4066Sahrens 2268ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2269ea8dc4b6Seschrock refcount_count(&dn->dn_holds) > 0); 2270fa9e4066Sahrens (void) refcount_add(&dn->dn_holds, db); 2271640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 2272fa9e4066Sahrens 2273fa9e4066Sahrens dprintf_dbuf(db, "db=%p\n", db); 2274fa9e4066Sahrens 2275fa9e4066Sahrens return (db); 2276fa9e4066Sahrens } 2277fa9e4066Sahrens 2278a2cdcdd2SPaul Dagnelie typedef struct dbuf_prefetch_arg { 2279a2cdcdd2SPaul Dagnelie spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2280a2cdcdd2SPaul Dagnelie zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2281a2cdcdd2SPaul Dagnelie int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2282a2cdcdd2SPaul Dagnelie int dpa_curlevel; /* The current level that we're reading */ 2283dcbf3bd6SGeorge Wilson dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2284a2cdcdd2SPaul Dagnelie zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2285a2cdcdd2SPaul Dagnelie zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2286a2cdcdd2SPaul Dagnelie arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2287a2cdcdd2SPaul Dagnelie } dbuf_prefetch_arg_t; 2288a2cdcdd2SPaul Dagnelie 2289a2cdcdd2SPaul Dagnelie /* 2290a2cdcdd2SPaul Dagnelie * Actually issue the prefetch read for the block given. 2291a2cdcdd2SPaul Dagnelie */ 2292a2cdcdd2SPaul Dagnelie static void 2293a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2294fa9e4066Sahrens { 2295a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2296a2cdcdd2SPaul Dagnelie return; 2297a2cdcdd2SPaul Dagnelie 2298a2cdcdd2SPaul Dagnelie arc_flags_t aflags = 2299a2cdcdd2SPaul Dagnelie dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2300a2cdcdd2SPaul Dagnelie 2301a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2302a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2303a2cdcdd2SPaul Dagnelie ASSERT(dpa->dpa_zio != NULL); 2304a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2305a2cdcdd2SPaul Dagnelie dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2306a2cdcdd2SPaul Dagnelie &aflags, &dpa->dpa_zb); 2307a2cdcdd2SPaul Dagnelie } 2308a2cdcdd2SPaul Dagnelie 2309a2cdcdd2SPaul Dagnelie /* 2310a2cdcdd2SPaul Dagnelie * Called when an indirect block above our prefetch target is read in. This 2311a2cdcdd2SPaul Dagnelie * will either read in the next indirect block down the tree or issue the actual 2312a2cdcdd2SPaul Dagnelie * prefetch if the next block down is our target. 2313a2cdcdd2SPaul Dagnelie */ 2314a2cdcdd2SPaul Dagnelie static void 2315a2cdcdd2SPaul Dagnelie dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2316a2cdcdd2SPaul Dagnelie { 2317a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = private; 2318a2cdcdd2SPaul Dagnelie 2319a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2320a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_curlevel, >, 0); 2321dcbf3bd6SGeorge Wilson 2322dcbf3bd6SGeorge Wilson /* 2323dcbf3bd6SGeorge Wilson * The dpa_dnode is only valid if we are called with a NULL 2324dcbf3bd6SGeorge Wilson * zio. This indicates that the arc_read() returned without 2325dcbf3bd6SGeorge Wilson * first calling zio_read() to issue a physical read. Once 2326dcbf3bd6SGeorge Wilson * a physical read is made the dpa_dnode must be invalidated 2327dcbf3bd6SGeorge Wilson * as the locks guarding it may have been dropped. If the 2328dcbf3bd6SGeorge Wilson * dpa_dnode is still valid, then we want to add it to the dbuf 2329dcbf3bd6SGeorge Wilson * cache. To do so, we must hold the dbuf associated with the block 2330dcbf3bd6SGeorge Wilson * we just prefetched, read its contents so that we associate it 2331dcbf3bd6SGeorge Wilson * with an arc_buf_t, and then release it. 2332dcbf3bd6SGeorge Wilson */ 2333a2cdcdd2SPaul Dagnelie if (zio != NULL) { 2334a2cdcdd2SPaul Dagnelie ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2335dcbf3bd6SGeorge Wilson if (zio->io_flags & ZIO_FLAG_RAW) { 2336dcbf3bd6SGeorge Wilson ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2337dcbf3bd6SGeorge Wilson } else { 2338a2cdcdd2SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2339dcbf3bd6SGeorge Wilson } 2340a2cdcdd2SPaul Dagnelie ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2341dcbf3bd6SGeorge Wilson 2342dcbf3bd6SGeorge Wilson dpa->dpa_dnode = NULL; 2343dcbf3bd6SGeorge Wilson } else if (dpa->dpa_dnode != NULL) { 2344dcbf3bd6SGeorge Wilson uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2345dcbf3bd6SGeorge Wilson (dpa->dpa_epbs * (dpa->dpa_curlevel - 2346dcbf3bd6SGeorge Wilson dpa->dpa_zb.zb_level)); 2347dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2348dcbf3bd6SGeorge Wilson dpa->dpa_curlevel, curblkid, FTAG); 2349dcbf3bd6SGeorge Wilson (void) dbuf_read(db, NULL, 2350dcbf3bd6SGeorge Wilson DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2351dcbf3bd6SGeorge Wilson dbuf_rele(db, FTAG); 2352a2cdcdd2SPaul Dagnelie } 2353a2cdcdd2SPaul Dagnelie 2354a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel--; 2355a2cdcdd2SPaul Dagnelie 2356a2cdcdd2SPaul Dagnelie uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2357a2cdcdd2SPaul Dagnelie (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2358a2cdcdd2SPaul Dagnelie blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2359a2cdcdd2SPaul Dagnelie P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2360a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(bp) || (zio != NULL && zio->io_error != 0)) { 2361a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2362a2cdcdd2SPaul Dagnelie } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2363a2cdcdd2SPaul Dagnelie ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2364a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, bp); 2365a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2366a2cdcdd2SPaul Dagnelie } else { 2367a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2368a2cdcdd2SPaul Dagnelie zbookmark_phys_t zb; 2369a2cdcdd2SPaul Dagnelie 237027295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 237127295216Sbenrubson if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 237227295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 237327295216Sbenrubson 2374a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2375a2cdcdd2SPaul Dagnelie 2376a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2377a2cdcdd2SPaul Dagnelie dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2378a2cdcdd2SPaul Dagnelie 2379a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2380a2cdcdd2SPaul Dagnelie bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2381a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2382a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2383a2cdcdd2SPaul Dagnelie } 2384dcbf3bd6SGeorge Wilson 2385dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, private); 2386a2cdcdd2SPaul Dagnelie } 2387a2cdcdd2SPaul Dagnelie 2388a2cdcdd2SPaul Dagnelie /* 2389a2cdcdd2SPaul Dagnelie * Issue prefetch reads for the given block on the given level. If the indirect 2390a2cdcdd2SPaul Dagnelie * blocks above that block are not in memory, we will read them in 2391a2cdcdd2SPaul Dagnelie * asynchronously. As a result, this call never blocks waiting for a read to 2392a2cdcdd2SPaul Dagnelie * complete. 2393a2cdcdd2SPaul Dagnelie */ 2394a2cdcdd2SPaul Dagnelie void 2395a2cdcdd2SPaul Dagnelie dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2396a2cdcdd2SPaul Dagnelie arc_flags_t aflags) 2397a2cdcdd2SPaul Dagnelie { 2398a2cdcdd2SPaul Dagnelie blkptr_t bp; 2399a2cdcdd2SPaul Dagnelie int epbs, nlevels, curlevel; 2400a2cdcdd2SPaul Dagnelie uint64_t curblkid; 2401fa9e4066Sahrens 24020a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2403fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2404fa9e4066Sahrens 2405cf6106c8SMatthew Ahrens if (blkid > dn->dn_maxblkid) 2406cf6106c8SMatthew Ahrens return; 2407cf6106c8SMatthew Ahrens 2408fa9e4066Sahrens if (dnode_block_freed(dn, blkid)) 2409fa9e4066Sahrens return; 2410fa9e4066Sahrens 2411fa9e4066Sahrens /* 2412a2cdcdd2SPaul Dagnelie * This dnode hasn't been written to disk yet, so there's nothing to 2413a2cdcdd2SPaul Dagnelie * prefetch. 2414fa9e4066Sahrens */ 2415a2cdcdd2SPaul Dagnelie nlevels = dn->dn_phys->dn_nlevels; 2416a2cdcdd2SPaul Dagnelie if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2417a2cdcdd2SPaul Dagnelie return; 2418a2cdcdd2SPaul Dagnelie 2419a2cdcdd2SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2420a2cdcdd2SPaul Dagnelie if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2421a2cdcdd2SPaul Dagnelie return; 2422a2cdcdd2SPaul Dagnelie 2423a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2424a2cdcdd2SPaul Dagnelie level, blkid); 2425a2cdcdd2SPaul Dagnelie if (db != NULL) { 2426fa9e4066Sahrens mutex_exit(&db->db_mtx); 2427a2cdcdd2SPaul Dagnelie /* 2428a2cdcdd2SPaul Dagnelie * This dbuf already exists. It is either CACHED, or 2429a2cdcdd2SPaul Dagnelie * (we assume) about to be read or filled. 2430a2cdcdd2SPaul Dagnelie */ 2431fa9e4066Sahrens return; 2432fa9e4066Sahrens } 2433fa9e4066Sahrens 2434a2cdcdd2SPaul Dagnelie /* 2435a2cdcdd2SPaul Dagnelie * Find the closest ancestor (indirect block) of the target block 2436a2cdcdd2SPaul Dagnelie * that is present in the cache. In this indirect block, we will 2437a2cdcdd2SPaul Dagnelie * find the bp that is at curlevel, curblkid. 2438a2cdcdd2SPaul Dagnelie */ 2439a2cdcdd2SPaul Dagnelie curlevel = level; 2440a2cdcdd2SPaul Dagnelie curblkid = blkid; 2441a2cdcdd2SPaul Dagnelie while (curlevel < nlevels - 1) { 2442a2cdcdd2SPaul Dagnelie int parent_level = curlevel + 1; 2443a2cdcdd2SPaul Dagnelie uint64_t parent_blkid = curblkid >> epbs; 2444a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db; 2445a2cdcdd2SPaul Dagnelie 2446a2cdcdd2SPaul Dagnelie if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2447a2cdcdd2SPaul Dagnelie FALSE, TRUE, FTAG, &db) == 0) { 2448a2cdcdd2SPaul Dagnelie blkptr_t *bpp = db->db_buf->b_data; 2449a2cdcdd2SPaul Dagnelie bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2450a2cdcdd2SPaul Dagnelie dbuf_rele(db, FTAG); 2451a2cdcdd2SPaul Dagnelie break; 2452a2cdcdd2SPaul Dagnelie } 2453a2cdcdd2SPaul Dagnelie 2454a2cdcdd2SPaul Dagnelie curlevel = parent_level; 2455a2cdcdd2SPaul Dagnelie curblkid = parent_blkid; 2456a2cdcdd2SPaul Dagnelie } 2457a2cdcdd2SPaul Dagnelie 2458a2cdcdd2SPaul Dagnelie if (curlevel == nlevels - 1) { 2459a2cdcdd2SPaul Dagnelie /* No cached indirect blocks found. */ 2460a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2461a2cdcdd2SPaul Dagnelie bp = dn->dn_phys->dn_blkptr[curblkid]; 2462a2cdcdd2SPaul Dagnelie } 2463a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(&bp)) 2464a2cdcdd2SPaul Dagnelie return; 2465a2cdcdd2SPaul Dagnelie 2466a2cdcdd2SPaul Dagnelie ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2467a2cdcdd2SPaul Dagnelie 2468a2cdcdd2SPaul Dagnelie zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2469a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL); 2470a2cdcdd2SPaul Dagnelie 2471a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2472b24ab676SJeff Bonwick dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2473a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2474a2cdcdd2SPaul Dagnelie dn->dn_object, level, blkid); 2475a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel = curlevel; 2476a2cdcdd2SPaul Dagnelie dpa->dpa_prio = prio; 2477a2cdcdd2SPaul Dagnelie dpa->dpa_aflags = aflags; 2478a2cdcdd2SPaul Dagnelie dpa->dpa_spa = dn->dn_objset->os_spa; 2479dcbf3bd6SGeorge Wilson dpa->dpa_dnode = dn; 2480a2cdcdd2SPaul Dagnelie dpa->dpa_epbs = epbs; 2481a2cdcdd2SPaul Dagnelie dpa->dpa_zio = pio; 2482a2cdcdd2SPaul Dagnelie 248327295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 248427295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 248527295216Sbenrubson dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 248627295216Sbenrubson 2487a2cdcdd2SPaul Dagnelie /* 2488a2cdcdd2SPaul Dagnelie * If we have the indirect just above us, no need to do the asynchronous 2489a2cdcdd2SPaul Dagnelie * prefetch chain; we'll just run the last step ourselves. If we're at 2490a2cdcdd2SPaul Dagnelie * a higher level, though, we want to issue the prefetches for all the 2491a2cdcdd2SPaul Dagnelie * indirect blocks asynchronously, so we can go on with whatever we were 2492a2cdcdd2SPaul Dagnelie * doing. 2493a2cdcdd2SPaul Dagnelie */ 2494a2cdcdd2SPaul Dagnelie if (curlevel == level) { 2495a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, ==, blkid); 2496a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, &bp); 2497a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2498a2cdcdd2SPaul Dagnelie } else { 2499a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 25007802d7bfSMatthew Ahrens zbookmark_phys_t zb; 2501b24ab676SJeff Bonwick 250227295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 250327295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 250427295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 250527295216Sbenrubson 2506a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2507a2cdcdd2SPaul Dagnelie dn->dn_object, curlevel, curblkid); 2508a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2509a2cdcdd2SPaul Dagnelie &bp, dbuf_prefetch_indirect_done, dpa, prio, 2510fa9e4066Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2511a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2512fa9e4066Sahrens } 2513a2cdcdd2SPaul Dagnelie /* 2514a2cdcdd2SPaul Dagnelie * We use pio here instead of dpa_zio since it's possible that 2515a2cdcdd2SPaul Dagnelie * dpa may have already been freed. 2516a2cdcdd2SPaul Dagnelie */ 2517a2cdcdd2SPaul Dagnelie zio_nowait(pio); 2518fa9e4066Sahrens } 2519fa9e4066Sahrens 2520fa9e4066Sahrens /* 2521fa9e4066Sahrens * Returns with db_holds incremented, and db_mtx not held. 2522fa9e4066Sahrens * Note: dn_struct_rwlock must be held. 2523fa9e4066Sahrens */ 2524fa9e4066Sahrens int 2525a2cdcdd2SPaul Dagnelie dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2526a2cdcdd2SPaul Dagnelie boolean_t fail_sparse, boolean_t fail_uncached, 2527fa9e4066Sahrens void *tag, dmu_buf_impl_t **dbp) 2528fa9e4066Sahrens { 2529fa9e4066Sahrens dmu_buf_impl_t *db, *parent = NULL; 2530fa9e4066Sahrens 25310a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2532fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2533fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, level); 2534fa9e4066Sahrens 2535fa9e4066Sahrens *dbp = NULL; 2536ea8dc4b6Seschrock top: 2537fa9e4066Sahrens /* dbuf_find() returns with db_mtx held */ 2538e57a022bSJustin T. Gibbs db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2539fa9e4066Sahrens 2540fa9e4066Sahrens if (db == NULL) { 2541fa9e4066Sahrens blkptr_t *bp = NULL; 2542fa9e4066Sahrens int err; 2543fa9e4066Sahrens 2544a2cdcdd2SPaul Dagnelie if (fail_uncached) 2545a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2546a2cdcdd2SPaul Dagnelie 2547c543ec06Sahrens ASSERT3P(parent, ==, NULL); 2548fa9e4066Sahrens err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2549fa9e4066Sahrens if (fail_sparse) { 2550fa9e4066Sahrens if (err == 0 && bp && BP_IS_HOLE(bp)) 2551be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 2552fa9e4066Sahrens if (err) { 2553c543ec06Sahrens if (parent) 2554ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2555fa9e4066Sahrens return (err); 2556fa9e4066Sahrens } 2557fa9e4066Sahrens } 2558ea8dc4b6Seschrock if (err && err != ENOENT) 2559ea8dc4b6Seschrock return (err); 2560fa9e4066Sahrens db = dbuf_create(dn, level, blkid, parent, bp); 2561fa9e4066Sahrens } 2562fa9e4066Sahrens 2563a2cdcdd2SPaul Dagnelie if (fail_uncached && db->db_state != DB_CACHED) { 2564a2cdcdd2SPaul Dagnelie mutex_exit(&db->db_mtx); 2565a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2566a2cdcdd2SPaul Dagnelie } 2567a2cdcdd2SPaul Dagnelie 2568dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) 2569ea8dc4b6Seschrock ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2570ea8dc4b6Seschrock 2571ea8dc4b6Seschrock ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2572ea8dc4b6Seschrock 2573fa9e4066Sahrens /* 2574c717a561Smaybee * If this buffer is currently syncing out, and we are are 2575c717a561Smaybee * still referencing it from db_data, we need to make a copy 2576c717a561Smaybee * of it in case we decide we want to dirty it again in this txg. 2577fa9e4066Sahrens */ 25780a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2579ea8dc4b6Seschrock dn->dn_object != DMU_META_DNODE_OBJECT && 2580c717a561Smaybee db->db_state == DB_CACHED && db->db_data_pending) { 2581c717a561Smaybee dbuf_dirty_record_t *dr = db->db_data_pending; 2582c717a561Smaybee 2583c717a561Smaybee if (dr->dt.dl.dr_data == db->db_buf) { 2584ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2585fa9e4066Sahrens 2586c717a561Smaybee dbuf_set_data(db, 25875602294fSDan Kimmel arc_alloc_buf(dn->dn_objset->os_spa, db, type, 25885602294fSDan Kimmel db->db.db_size)); 2589c717a561Smaybee bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2590fa9e4066Sahrens db->db.db_size); 2591fa9e4066Sahrens } 2592c717a561Smaybee } 2593fa9e4066Sahrens 2594dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2595dcbf3bd6SGeorge Wilson ASSERT(refcount_is_zero(&db->db_holds)); 259694c2d0ebSMatthew Ahrens multilist_remove(dbuf_cache, db); 2597dcbf3bd6SGeorge Wilson (void) refcount_remove_many(&dbuf_cache_size, 2598dcbf3bd6SGeorge Wilson db->db.db_size, db); 2599dcbf3bd6SGeorge Wilson } 2600ea8dc4b6Seschrock (void) refcount_add(&db->db_holds, tag); 26019c9dc39aSek110237 DBUF_VERIFY(db); 2602fa9e4066Sahrens mutex_exit(&db->db_mtx); 2603fa9e4066Sahrens 2604fa9e4066Sahrens /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2605c543ec06Sahrens if (parent) 2606ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2607fa9e4066Sahrens 2608744947dcSTom Erickson ASSERT3P(DB_DNODE(db), ==, dn); 2609fa9e4066Sahrens ASSERT3U(db->db_blkid, ==, blkid); 2610fa9e4066Sahrens ASSERT3U(db->db_level, ==, level); 2611fa9e4066Sahrens *dbp = db; 2612fa9e4066Sahrens 2613fa9e4066Sahrens return (0); 2614fa9e4066Sahrens } 2615fa9e4066Sahrens 2616fa9e4066Sahrens dmu_buf_impl_t * 2617ea8dc4b6Seschrock dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2618fa9e4066Sahrens { 2619a2cdcdd2SPaul Dagnelie return (dbuf_hold_level(dn, 0, blkid, tag)); 2620fa9e4066Sahrens } 2621fa9e4066Sahrens 2622fa9e4066Sahrens dmu_buf_impl_t * 2623fa9e4066Sahrens dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2624fa9e4066Sahrens { 2625fa9e4066Sahrens dmu_buf_impl_t *db; 2626a2cdcdd2SPaul Dagnelie int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2627ea8dc4b6Seschrock return (err ? NULL : db); 2628fa9e4066Sahrens } 2629fa9e4066Sahrens 26301934e92fSmaybee void 2631ea8dc4b6Seschrock dbuf_create_bonus(dnode_t *dn) 2632fa9e4066Sahrens { 2633ea8dc4b6Seschrock ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2634ea8dc4b6Seschrock 2635ea8dc4b6Seschrock ASSERT(dn->dn_bonus == NULL); 26360a586ceaSMark Shellenbaum dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 26370a586ceaSMark Shellenbaum } 26380a586ceaSMark Shellenbaum 26390a586ceaSMark Shellenbaum int 26400a586ceaSMark Shellenbaum dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 26410a586ceaSMark Shellenbaum { 26420a586ceaSMark Shellenbaum dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2643744947dcSTom Erickson dnode_t *dn; 2644744947dcSTom Erickson 26450a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 2646be6fd75aSMatthew Ahrens return (SET_ERROR(ENOTSUP)); 26470a586ceaSMark Shellenbaum if (blksz == 0) 26480a586ceaSMark Shellenbaum blksz = SPA_MINBLOCKSIZE; 2649b5152584SMatthew Ahrens ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 26500a586ceaSMark Shellenbaum blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 26510a586ceaSMark Shellenbaum 2652744947dcSTom Erickson DB_DNODE_ENTER(db); 2653744947dcSTom Erickson dn = DB_DNODE(db); 2654744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 26550a586ceaSMark Shellenbaum dbuf_new_size(db, blksz, tx); 2656744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 2657744947dcSTom Erickson DB_DNODE_EXIT(db); 26580a586ceaSMark Shellenbaum 26590a586ceaSMark Shellenbaum return (0); 26600a586ceaSMark Shellenbaum } 26610a586ceaSMark Shellenbaum 26620a586ceaSMark Shellenbaum void 26630a586ceaSMark Shellenbaum dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 26640a586ceaSMark Shellenbaum { 26650a586ceaSMark Shellenbaum dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2666fa9e4066Sahrens } 2667fa9e4066Sahrens 2668ea8dc4b6Seschrock #pragma weak dmu_buf_add_ref = dbuf_add_ref 2669fa9e4066Sahrens void 2670fa9e4066Sahrens dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2671fa9e4066Sahrens { 2672ea8dc4b6Seschrock int64_t holds = refcount_add(&db->db_holds, tag); 2673dcbf3bd6SGeorge Wilson ASSERT3S(holds, >, 1); 2674fa9e4066Sahrens } 2675fa9e4066Sahrens 2676e57a022bSJustin T. Gibbs #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2677e57a022bSJustin T. Gibbs boolean_t 2678e57a022bSJustin T. Gibbs dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2679e57a022bSJustin T. Gibbs void *tag) 2680e57a022bSJustin T. Gibbs { 2681e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2682e57a022bSJustin T. Gibbs dmu_buf_impl_t *found_db; 2683e57a022bSJustin T. Gibbs boolean_t result = B_FALSE; 2684e57a022bSJustin T. Gibbs 2685e57a022bSJustin T. Gibbs if (db->db_blkid == DMU_BONUS_BLKID) 2686e57a022bSJustin T. Gibbs found_db = dbuf_find_bonus(os, obj); 2687e57a022bSJustin T. Gibbs else 2688e57a022bSJustin T. Gibbs found_db = dbuf_find(os, obj, 0, blkid); 2689e57a022bSJustin T. Gibbs 2690e57a022bSJustin T. Gibbs if (found_db != NULL) { 2691e57a022bSJustin T. Gibbs if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2692e57a022bSJustin T. Gibbs (void) refcount_add(&db->db_holds, tag); 2693e57a022bSJustin T. Gibbs result = B_TRUE; 2694e57a022bSJustin T. Gibbs } 2695e57a022bSJustin T. Gibbs mutex_exit(&db->db_mtx); 2696e57a022bSJustin T. Gibbs } 2697e57a022bSJustin T. Gibbs return (result); 2698e57a022bSJustin T. Gibbs } 2699e57a022bSJustin T. Gibbs 2700744947dcSTom Erickson /* 2701744947dcSTom Erickson * If you call dbuf_rele() you had better not be referencing the dnode handle 2702744947dcSTom Erickson * unless you have some other direct or indirect hold on the dnode. (An indirect 2703744947dcSTom Erickson * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2704744947dcSTom Erickson * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2705744947dcSTom Erickson * dnode's parent dbuf evicting its dnode handles. 2706744947dcSTom Erickson */ 2707fa9e4066Sahrens void 2708ea8dc4b6Seschrock dbuf_rele(dmu_buf_impl_t *db, void *tag) 2709fa9e4066Sahrens { 2710b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 2711b24ab676SJeff Bonwick dbuf_rele_and_unlock(db, tag); 2712b24ab676SJeff Bonwick } 2713b24ab676SJeff Bonwick 271443466aaeSMax Grossman void 271543466aaeSMax Grossman dmu_buf_rele(dmu_buf_t *db, void *tag) 271643466aaeSMax Grossman { 271743466aaeSMax Grossman dbuf_rele((dmu_buf_impl_t *)db, tag); 271843466aaeSMax Grossman } 271943466aaeSMax Grossman 2720b24ab676SJeff Bonwick /* 2721b24ab676SJeff Bonwick * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2722b24ab676SJeff Bonwick * db_dirtycnt and db_holds to be updated atomically. 2723b24ab676SJeff Bonwick */ 2724b24ab676SJeff Bonwick void 2725b24ab676SJeff Bonwick dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2726b24ab676SJeff Bonwick { 2727fa9e4066Sahrens int64_t holds; 2728fa9e4066Sahrens 2729b24ab676SJeff Bonwick ASSERT(MUTEX_HELD(&db->db_mtx)); 27309c9dc39aSek110237 DBUF_VERIFY(db); 2731fa9e4066Sahrens 2732744947dcSTom Erickson /* 2733744947dcSTom Erickson * Remove the reference to the dbuf before removing its hold on the 2734744947dcSTom Erickson * dnode so we can guarantee in dnode_move() that a referenced bonus 2735744947dcSTom Erickson * buffer has a corresponding dnode hold. 2736744947dcSTom Erickson */ 2737fa9e4066Sahrens holds = refcount_remove(&db->db_holds, tag); 2738ea8dc4b6Seschrock ASSERT(holds >= 0); 2739fa9e4066Sahrens 2740c717a561Smaybee /* 2741c717a561Smaybee * We can't freeze indirects if there is a possibility that they 2742c717a561Smaybee * may be modified in the current syncing context. 2743c717a561Smaybee */ 2744dcbf3bd6SGeorge Wilson if (db->db_buf != NULL && 2745dcbf3bd6SGeorge Wilson holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 27466b4acc8bSahrens arc_buf_freeze(db->db_buf); 2747dcbf3bd6SGeorge Wilson } 27486b4acc8bSahrens 2749fa9e4066Sahrens if (holds == db->db_dirtycnt && 2750d2058105SJustin T. Gibbs db->db_level == 0 && db->db_user_immediate_evict) 2751fa9e4066Sahrens dbuf_evict_user(db); 2752ea8dc4b6Seschrock 2753ea8dc4b6Seschrock if (holds == 0) { 27540a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 2755cd485b49SJustin T. Gibbs dnode_t *dn; 2756d2058105SJustin T. Gibbs boolean_t evict_dbuf = db->db_pending_evict; 2757cd485b49SJustin T. Gibbs 2758cd485b49SJustin T. Gibbs /* 2759cd485b49SJustin T. Gibbs * If the dnode moves here, we cannot cross this 2760cd485b49SJustin T. Gibbs * barrier until the move completes. 2761cd485b49SJustin T. Gibbs */ 2762cd485b49SJustin T. Gibbs DB_DNODE_ENTER(db); 2763cd485b49SJustin T. Gibbs 2764cd485b49SJustin T. Gibbs dn = DB_DNODE(db); 2765cd485b49SJustin T. Gibbs atomic_dec_32(&dn->dn_dbufs_count); 2766cd485b49SJustin T. Gibbs 2767cd485b49SJustin T. Gibbs /* 2768cd485b49SJustin T. Gibbs * Decrementing the dbuf count means that the bonus 2769cd485b49SJustin T. Gibbs * buffer's dnode hold is no longer discounted in 2770cd485b49SJustin T. Gibbs * dnode_move(). The dnode cannot move until after 2771d2058105SJustin T. Gibbs * the dnode_rele() below. 2772cd485b49SJustin T. Gibbs */ 2773cd485b49SJustin T. Gibbs DB_DNODE_EXIT(db); 2774cd485b49SJustin T. Gibbs 2775cd485b49SJustin T. Gibbs /* 2776cd485b49SJustin T. Gibbs * Do not reference db after its lock is dropped. 2777cd485b49SJustin T. Gibbs * Another thread may evict it. 2778cd485b49SJustin T. Gibbs */ 2779ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 2780744947dcSTom Erickson 2781d2058105SJustin T. Gibbs if (evict_dbuf) 2782cd485b49SJustin T. Gibbs dnode_evict_bonus(dn); 2783d2058105SJustin T. Gibbs 2784d2058105SJustin T. Gibbs dnode_rele(dn, db); 2785ea8dc4b6Seschrock } else if (db->db_buf == NULL) { 2786ea8dc4b6Seschrock /* 2787ea8dc4b6Seschrock * This is a special case: we never associated this 2788ea8dc4b6Seschrock * dbuf with any data allocated from the ARC. 2789ea8dc4b6Seschrock */ 279082c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || 279182c9918fSTim Haley db->db_state == DB_NOFILL); 2792dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2793ea8dc4b6Seschrock } else if (arc_released(db->db_buf)) { 2794ea8dc4b6Seschrock /* 2795ea8dc4b6Seschrock * This dbuf has anonymous data associated with it. 2796ea8dc4b6Seschrock */ 2797dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2798ea8dc4b6Seschrock } else { 2799dcbf3bd6SGeorge Wilson boolean_t do_arc_evict = B_FALSE; 2800dcbf3bd6SGeorge Wilson blkptr_t bp; 2801dcbf3bd6SGeorge Wilson spa_t *spa = dmu_objset_spa(db->db_objset); 28029253d63dSGeorge Wilson 2803dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) && 2804dcbf3bd6SGeorge Wilson db->db_blkptr != NULL && 2805bbfa8ea8SMatthew Ahrens !BP_IS_HOLE(db->db_blkptr) && 2806bbfa8ea8SMatthew Ahrens !BP_IS_EMBEDDED(db->db_blkptr)) { 2807dcbf3bd6SGeorge Wilson do_arc_evict = B_TRUE; 2808dcbf3bd6SGeorge Wilson bp = *db->db_blkptr; 2809dcbf3bd6SGeorge Wilson } 2810dcbf3bd6SGeorge Wilson 2811dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) || 2812dcbf3bd6SGeorge Wilson db->db_pending_evict) { 2813dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2814dcbf3bd6SGeorge Wilson } else if (!multilist_link_active(&db->db_cache_link)) { 281594c2d0ebSMatthew Ahrens multilist_insert(dbuf_cache, db); 2816dcbf3bd6SGeorge Wilson (void) refcount_add_many(&dbuf_cache_size, 2817dcbf3bd6SGeorge Wilson db->db.db_size, db); 2818dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 2819dcbf3bd6SGeorge Wilson 2820dcbf3bd6SGeorge Wilson dbuf_evict_notify(); 2821dcbf3bd6SGeorge Wilson } 2822dcbf3bd6SGeorge Wilson 2823dcbf3bd6SGeorge Wilson if (do_arc_evict) 2824bbfa8ea8SMatthew Ahrens arc_freed(spa, &bp); 2825bbfa8ea8SMatthew Ahrens } 2826ea8dc4b6Seschrock } else { 2827ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 2828fa9e4066Sahrens } 2829dcbf3bd6SGeorge Wilson 2830fa9e4066Sahrens } 2831fa9e4066Sahrens 2832fa9e4066Sahrens #pragma weak dmu_buf_refcount = dbuf_refcount 2833fa9e4066Sahrens uint64_t 2834fa9e4066Sahrens dbuf_refcount(dmu_buf_impl_t *db) 2835fa9e4066Sahrens { 2836fa9e4066Sahrens return (refcount_count(&db->db_holds)); 2837fa9e4066Sahrens } 2838fa9e4066Sahrens 2839fa9e4066Sahrens void * 2840bc9014e6SJustin Gibbs dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2841bc9014e6SJustin Gibbs dmu_buf_user_t *new_user) 2842fa9e4066Sahrens { 2843bc9014e6SJustin Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2844bc9014e6SJustin Gibbs 2845bc9014e6SJustin Gibbs mutex_enter(&db->db_mtx); 2846bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2847bc9014e6SJustin Gibbs if (db->db_user == old_user) 2848bc9014e6SJustin Gibbs db->db_user = new_user; 2849bc9014e6SJustin Gibbs else 2850bc9014e6SJustin Gibbs old_user = db->db_user; 2851bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2852bc9014e6SJustin Gibbs mutex_exit(&db->db_mtx); 2853bc9014e6SJustin Gibbs 2854bc9014e6SJustin Gibbs return (old_user); 2855fa9e4066Sahrens } 2856fa9e4066Sahrens 2857fa9e4066Sahrens void * 2858bc9014e6SJustin Gibbs dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2859bc9014e6SJustin Gibbs { 2860bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, NULL, user)); 2861bc9014e6SJustin Gibbs } 2862bc9014e6SJustin Gibbs 2863bc9014e6SJustin Gibbs void * 2864bc9014e6SJustin Gibbs dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2865fa9e4066Sahrens { 2866fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2867fa9e4066Sahrens 2868d2058105SJustin T. Gibbs db->db_user_immediate_evict = TRUE; 2869bc9014e6SJustin Gibbs return (dmu_buf_set_user(db_fake, user)); 2870fa9e4066Sahrens } 2871fa9e4066Sahrens 2872fa9e4066Sahrens void * 2873bc9014e6SJustin Gibbs dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2874fa9e4066Sahrens { 2875bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, user, NULL)); 2876fa9e4066Sahrens } 2877fa9e4066Sahrens 2878fa9e4066Sahrens void * 2879fa9e4066Sahrens dmu_buf_get_user(dmu_buf_t *db_fake) 2880fa9e4066Sahrens { 2881fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2882fa9e4066Sahrens 2883bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2884bc9014e6SJustin Gibbs return (db->db_user); 2885bc9014e6SJustin Gibbs } 2886bc9014e6SJustin Gibbs 2887bc9014e6SJustin Gibbs void 2888bc9014e6SJustin Gibbs dmu_buf_user_evict_wait() 2889bc9014e6SJustin Gibbs { 2890bc9014e6SJustin Gibbs taskq_wait(dbu_evict_taskq); 2891fa9e4066Sahrens } 2892fa9e4066Sahrens 289380901aeaSGeorge Wilson blkptr_t * 289480901aeaSGeorge Wilson dmu_buf_get_blkptr(dmu_buf_t *db) 289580901aeaSGeorge Wilson { 289680901aeaSGeorge Wilson dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 289780901aeaSGeorge Wilson return (dbi->db_blkptr); 289880901aeaSGeorge Wilson } 289980901aeaSGeorge Wilson 2900ae972795SMatthew Ahrens objset_t * 2901ae972795SMatthew Ahrens dmu_buf_get_objset(dmu_buf_t *db) 2902ae972795SMatthew Ahrens { 2903ae972795SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2904ae972795SMatthew Ahrens return (dbi->db_objset); 2905ae972795SMatthew Ahrens } 2906ae972795SMatthew Ahrens 290779d72832SMatthew Ahrens dnode_t * 290879d72832SMatthew Ahrens dmu_buf_dnode_enter(dmu_buf_t *db) 290979d72832SMatthew Ahrens { 291079d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 291179d72832SMatthew Ahrens DB_DNODE_ENTER(dbi); 291279d72832SMatthew Ahrens return (DB_DNODE(dbi)); 291379d72832SMatthew Ahrens } 291479d72832SMatthew Ahrens 291579d72832SMatthew Ahrens void 291679d72832SMatthew Ahrens dmu_buf_dnode_exit(dmu_buf_t *db) 291779d72832SMatthew Ahrens { 291879d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 291979d72832SMatthew Ahrens DB_DNODE_EXIT(dbi); 292079d72832SMatthew Ahrens } 292179d72832SMatthew Ahrens 2922c717a561Smaybee static void 2923c717a561Smaybee dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2924fa9e4066Sahrens { 2925c717a561Smaybee /* ASSERT(dmu_tx_is_syncing(tx) */ 2926c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 2927c717a561Smaybee 2928c717a561Smaybee if (db->db_blkptr != NULL) 2929c717a561Smaybee return; 2930c717a561Smaybee 29310a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 29320a586ceaSMark Shellenbaum db->db_blkptr = &dn->dn_phys->dn_spill; 29330a586ceaSMark Shellenbaum BP_ZERO(db->db_blkptr); 29340a586ceaSMark Shellenbaum return; 29350a586ceaSMark Shellenbaum } 2936c717a561Smaybee if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2937c717a561Smaybee /* 2938c717a561Smaybee * This buffer was allocated at a time when there was 2939c717a561Smaybee * no available blkptrs from the dnode, or it was 2940c717a561Smaybee * inappropriate to hook it in (i.e., nlevels mis-match). 2941c717a561Smaybee */ 2942c717a561Smaybee ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2943c717a561Smaybee ASSERT(db->db_parent == NULL); 2944c717a561Smaybee db->db_parent = dn->dn_dbuf; 2945c717a561Smaybee db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2946c717a561Smaybee DBUF_VERIFY(db); 2947c717a561Smaybee } else { 2948c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 2949c717a561Smaybee int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2950c717a561Smaybee 2951c717a561Smaybee ASSERT(dn->dn_phys->dn_nlevels > 1); 2952c717a561Smaybee if (parent == NULL) { 2953c717a561Smaybee mutex_exit(&db->db_mtx); 2954c717a561Smaybee rw_enter(&dn->dn_struct_rwlock, RW_READER); 2955a2cdcdd2SPaul Dagnelie parent = dbuf_hold_level(dn, db->db_level + 1, 2956a2cdcdd2SPaul Dagnelie db->db_blkid >> epbs, db); 2957c717a561Smaybee rw_exit(&dn->dn_struct_rwlock); 2958c717a561Smaybee mutex_enter(&db->db_mtx); 2959c717a561Smaybee db->db_parent = parent; 2960c717a561Smaybee } 2961c717a561Smaybee db->db_blkptr = (blkptr_t *)parent->db.db_data + 2962c717a561Smaybee (db->db_blkid & ((1ULL << epbs) - 1)); 2963c717a561Smaybee DBUF_VERIFY(db); 2964c717a561Smaybee } 2965c717a561Smaybee } 2966c717a561Smaybee 2967c717a561Smaybee static void 2968c717a561Smaybee dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2969c717a561Smaybee { 2970c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 2971744947dcSTom Erickson dnode_t *dn; 2972c717a561Smaybee zio_t *zio; 2973c717a561Smaybee 2974c717a561Smaybee ASSERT(dmu_tx_is_syncing(tx)); 2975c717a561Smaybee 2976c717a561Smaybee dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2977c717a561Smaybee 2978c717a561Smaybee mutex_enter(&db->db_mtx); 2979c717a561Smaybee 2980c717a561Smaybee ASSERT(db->db_level > 0); 2981c717a561Smaybee DBUF_VERIFY(db); 2982c717a561Smaybee 29833e30c24aSWill Andrews /* Read the block if it hasn't been read yet. */ 2984c717a561Smaybee if (db->db_buf == NULL) { 2985c717a561Smaybee mutex_exit(&db->db_mtx); 2986c717a561Smaybee (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2987c717a561Smaybee mutex_enter(&db->db_mtx); 2988c717a561Smaybee } 2989c717a561Smaybee ASSERT3U(db->db_state, ==, DB_CACHED); 2990c717a561Smaybee ASSERT(db->db_buf != NULL); 2991c717a561Smaybee 2992744947dcSTom Erickson DB_DNODE_ENTER(db); 2993744947dcSTom Erickson dn = DB_DNODE(db); 29943e30c24aSWill Andrews /* Indirect block size must match what the dnode thinks it is. */ 2995744947dcSTom Erickson ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2996c717a561Smaybee dbuf_check_blkptr(dn, db); 2997744947dcSTom Erickson DB_DNODE_EXIT(db); 2998c717a561Smaybee 29993e30c24aSWill Andrews /* Provide the pending dirty record to child dbufs */ 3000c717a561Smaybee db->db_data_pending = dr; 3001c717a561Smaybee 3002af2c4821Smaybee mutex_exit(&db->db_mtx); 30035cabbc6bSPrashanth Sreenivasa 3004088f3894Sahrens dbuf_write(dr, db->db_buf, tx); 3005c717a561Smaybee 3006c717a561Smaybee zio = dr->dr_zio; 3007c717a561Smaybee mutex_enter(&dr->dt.di.dr_mtx); 300846e1baa6SMatthew Ahrens dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3009c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3010c717a561Smaybee mutex_exit(&dr->dt.di.dr_mtx); 3011c717a561Smaybee zio_nowait(zio); 3012c717a561Smaybee } 3013c717a561Smaybee 3014c717a561Smaybee static void 3015c717a561Smaybee dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3016c717a561Smaybee { 3017c717a561Smaybee arc_buf_t **datap = &dr->dt.dl.dr_data; 3018c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3019744947dcSTom Erickson dnode_t *dn; 3020744947dcSTom Erickson objset_t *os; 3021c717a561Smaybee uint64_t txg = tx->tx_txg; 3022fa9e4066Sahrens 3023fa9e4066Sahrens ASSERT(dmu_tx_is_syncing(tx)); 3024fa9e4066Sahrens 3025fa9e4066Sahrens dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3026fa9e4066Sahrens 3027fa9e4066Sahrens mutex_enter(&db->db_mtx); 3028fa9e4066Sahrens /* 3029fa9e4066Sahrens * To be synced, we must be dirtied. But we 3030fa9e4066Sahrens * might have been freed after the dirty. 3031fa9e4066Sahrens */ 3032fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 3033fa9e4066Sahrens /* This buffer has been freed since it was dirtied */ 3034fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 3035fa9e4066Sahrens } else if (db->db_state == DB_FILL) { 3036fa9e4066Sahrens /* This buffer was freed and is now being re-filled */ 3037c717a561Smaybee ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3038fa9e4066Sahrens } else { 303982c9918fSTim Haley ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3040fa9e4066Sahrens } 30419c9dc39aSek110237 DBUF_VERIFY(db); 3042fa9e4066Sahrens 3043744947dcSTom Erickson DB_DNODE_ENTER(db); 3044744947dcSTom Erickson dn = DB_DNODE(db); 3045744947dcSTom Erickson 30460a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 30470a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 30480a586ceaSMark Shellenbaum dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 30490a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 30500a586ceaSMark Shellenbaum } 30510a586ceaSMark Shellenbaum 3052fa9e4066Sahrens /* 3053c717a561Smaybee * If this is a bonus buffer, simply copy the bonus data into the 3054c717a561Smaybee * dnode. It will be written out when the dnode is synced (and it 3055c717a561Smaybee * will be synced, since it must have been dirty for dbuf_sync to 3056c717a561Smaybee * be called). 3057fa9e4066Sahrens */ 30580a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 3059c717a561Smaybee dbuf_dirty_record_t **drp; 30601934e92fSmaybee 3061ea8dc4b6Seschrock ASSERT(*datap != NULL); 3062fb09f5aaSMadhav Suresh ASSERT0(db->db_level); 3063ea8dc4b6Seschrock ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 3064ea8dc4b6Seschrock bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 3065744947dcSTom Erickson DB_DNODE_EXIT(db); 3066744947dcSTom Erickson 30670e8c6158Smaybee if (*datap != db->db.db_data) { 3068ea8dc4b6Seschrock zio_buf_free(*datap, DN_MAX_BONUSLEN); 30695a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 30700e8c6158Smaybee } 3071ea8dc4b6Seschrock db->db_data_pending = NULL; 3072c717a561Smaybee drp = &db->db_last_dirty; 3073c717a561Smaybee while (*drp != dr) 3074c717a561Smaybee drp = &(*drp)->dr_next; 307517f17c2dSbonwick ASSERT(dr->dr_next == NULL); 3076b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 307717f17c2dSbonwick *drp = dr->dr_next; 3078c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3079ea8dc4b6Seschrock ASSERT(db->db_dirtycnt > 0); 3080ea8dc4b6Seschrock db->db_dirtycnt -= 1; 3081b24ab676SJeff Bonwick dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 3082ea8dc4b6Seschrock return; 3083ea8dc4b6Seschrock } 3084ea8dc4b6Seschrock 3085744947dcSTom Erickson os = dn->dn_objset; 3086744947dcSTom Erickson 3087c5c6ffa0Smaybee /* 3088f82bfe17Sgw25295 * This function may have dropped the db_mtx lock allowing a dmu_sync 3089f82bfe17Sgw25295 * operation to sneak in. As a result, we need to ensure that we 3090f82bfe17Sgw25295 * don't check the dr_override_state until we have returned from 3091f82bfe17Sgw25295 * dbuf_check_blkptr. 3092f82bfe17Sgw25295 */ 3093f82bfe17Sgw25295 dbuf_check_blkptr(dn, db); 3094f82bfe17Sgw25295 3095f82bfe17Sgw25295 /* 3096744947dcSTom Erickson * If this buffer is in the middle of an immediate write, 3097c717a561Smaybee * wait for the synchronous IO to complete. 3098c5c6ffa0Smaybee */ 3099c717a561Smaybee while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3100c5c6ffa0Smaybee ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3101c5c6ffa0Smaybee cv_wait(&db->db_changed, &db->db_mtx); 3102c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3103c5c6ffa0Smaybee } 3104c717a561Smaybee 3105ab69d62fSMatthew Ahrens if (db->db_state != DB_NOFILL && 3106ab69d62fSMatthew Ahrens dn->dn_object != DMU_META_DNODE_OBJECT && 3107ab69d62fSMatthew Ahrens refcount_count(&db->db_holds) > 1 && 3108b24ab676SJeff Bonwick dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3109ab69d62fSMatthew Ahrens *datap == db->db_buf) { 3110fa9e4066Sahrens /* 311182c9918fSTim Haley * If this buffer is currently "in use" (i.e., there 311282c9918fSTim Haley * are active holds and db_data still references it), 311382c9918fSTim Haley * then make a copy before we start the write so that 311482c9918fSTim Haley * any modifications from the open txg will not leak 311582c9918fSTim Haley * into this write. 3116fa9e4066Sahrens * 311782c9918fSTim Haley * NOTE: this copy does not need to be made for 311882c9918fSTim Haley * objects only modified in the syncing context (e.g. 311982c9918fSTim Haley * DNONE_DNODE blocks). 3120fa9e4066Sahrens */ 31215602294fSDan Kimmel int psize = arc_buf_size(*datap); 3122ab69d62fSMatthew Ahrens arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 31235602294fSDan Kimmel enum zio_compress compress_type = arc_get_compression(*datap); 31245602294fSDan Kimmel 31255602294fSDan Kimmel if (compress_type == ZIO_COMPRESS_OFF) { 31265602294fSDan Kimmel *datap = arc_alloc_buf(os->os_spa, db, type, psize); 31275602294fSDan Kimmel } else { 31285602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 31295602294fSDan Kimmel int lsize = arc_buf_lsize(*datap); 31305602294fSDan Kimmel *datap = arc_alloc_compressed_buf(os->os_spa, db, 31315602294fSDan Kimmel psize, lsize, compress_type); 31325602294fSDan Kimmel } 31335602294fSDan Kimmel bcopy(db->db.db_data, (*datap)->b_data, psize); 3134fa9e4066Sahrens } 3135c717a561Smaybee db->db_data_pending = dr; 3136fa9e4066Sahrens 3137fa9e4066Sahrens mutex_exit(&db->db_mtx); 3138fa9e4066Sahrens 3139088f3894Sahrens dbuf_write(dr, *datap, tx); 3140c717a561Smaybee 3141c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 3142744947dcSTom Erickson if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3143c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3144744947dcSTom Erickson DB_DNODE_EXIT(db); 3145744947dcSTom Erickson } else { 3146744947dcSTom Erickson /* 3147744947dcSTom Erickson * Although zio_nowait() does not "wait for an IO", it does 3148744947dcSTom Erickson * initiate the IO. If this is an empty write it seems plausible 3149744947dcSTom Erickson * that the IO could actually be completed before the nowait 3150744947dcSTom Erickson * returns. We need to DB_DNODE_EXIT() first in case 3151744947dcSTom Erickson * zio_nowait() invalidates the dbuf. 3152744947dcSTom Erickson */ 3153744947dcSTom Erickson DB_DNODE_EXIT(db); 3154c717a561Smaybee zio_nowait(dr->dr_zio); 3155fa9e4066Sahrens } 3156744947dcSTom Erickson } 3157c717a561Smaybee 3158c717a561Smaybee void 315946e1baa6SMatthew Ahrens dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3160c717a561Smaybee { 3161c717a561Smaybee dbuf_dirty_record_t *dr; 3162c717a561Smaybee 3163c717a561Smaybee while (dr = list_head(list)) { 3164c717a561Smaybee if (dr->dr_zio != NULL) { 3165c717a561Smaybee /* 3166c717a561Smaybee * If we find an already initialized zio then we 3167c717a561Smaybee * are processing the meta-dnode, and we have finished. 3168c717a561Smaybee * The dbufs for all dnodes are put back on the list 3169c717a561Smaybee * during processing, so that we can zio_wait() 3170c717a561Smaybee * these IOs after initiating all child IOs. 3171c717a561Smaybee */ 3172c717a561Smaybee ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3173c717a561Smaybee DMU_META_DNODE_OBJECT); 3174c717a561Smaybee break; 3175fa9e4066Sahrens } 317646e1baa6SMatthew Ahrens if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 317746e1baa6SMatthew Ahrens dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 317846e1baa6SMatthew Ahrens VERIFY3U(dr->dr_dbuf->db_level, ==, level); 317946e1baa6SMatthew Ahrens } 3180c717a561Smaybee list_remove(list, dr); 3181c717a561Smaybee if (dr->dr_dbuf->db_level > 0) 3182c717a561Smaybee dbuf_sync_indirect(dr, tx); 3183c717a561Smaybee else 3184c717a561Smaybee dbuf_sync_leaf(dr, tx); 3185c717a561Smaybee } 3186c717a561Smaybee } 3187c717a561Smaybee 3188fa9e4066Sahrens /* ARGSUSED */ 3189fa9e4066Sahrens static void 3190c717a561Smaybee dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3191fa9e4066Sahrens { 3192fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 3193744947dcSTom Erickson dnode_t *dn; 3194e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3195c717a561Smaybee blkptr_t *bp_orig = &zio->io_bp_orig; 3196b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 3197b24ab676SJeff Bonwick int64_t delta; 3198fa9e4066Sahrens uint64_t fill = 0; 3199b24ab676SJeff Bonwick int i; 3200fa9e4066Sahrens 320111ceac77SAlex Reece ASSERT3P(db->db_blkptr, !=, NULL); 320211ceac77SAlex Reece ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3203e14bb325SJeff Bonwick 3204744947dcSTom Erickson DB_DNODE_ENTER(db); 3205744947dcSTom Erickson dn = DB_DNODE(db); 3206b24ab676SJeff Bonwick delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3207b24ab676SJeff Bonwick dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3208b24ab676SJeff Bonwick zio->io_prev_space_delta = delta; 3209fa9e4066Sahrens 321043466aaeSMax Grossman if (bp->blk_birth != 0) { 32110a586ceaSMark Shellenbaum ASSERT((db->db_blkid != DMU_SPILL_BLKID && 32120a586ceaSMark Shellenbaum BP_GET_TYPE(bp) == dn->dn_type) || 32130a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && 32145d7b4d43SMatthew Ahrens BP_GET_TYPE(bp) == dn->dn_bonustype) || 32155d7b4d43SMatthew Ahrens BP_IS_EMBEDDED(bp)); 3216e14bb325SJeff Bonwick ASSERT(BP_GET_LEVEL(bp) == db->db_level); 321743466aaeSMax Grossman } 3218e14bb325SJeff Bonwick 3219fa9e4066Sahrens mutex_enter(&db->db_mtx); 3220fa9e4066Sahrens 32210a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 32220a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 32230a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 322411ceac77SAlex Reece ASSERT(!(BP_IS_HOLE(bp)) && 32250a586ceaSMark Shellenbaum db->db_blkptr == &dn->dn_phys->dn_spill); 32260a586ceaSMark Shellenbaum } 32270a586ceaSMark Shellenbaum #endif 32280a586ceaSMark Shellenbaum 3229fa9e4066Sahrens if (db->db_level == 0) { 3230fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 32310a586ceaSMark Shellenbaum if (db->db_blkid > dn->dn_phys->dn_maxblkid && 32320a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) 3233fa9e4066Sahrens dn->dn_phys->dn_maxblkid = db->db_blkid; 3234fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 3235fa9e4066Sahrens 3236fa9e4066Sahrens if (dn->dn_type == DMU_OT_DNODE) { 3237fa9e4066Sahrens dnode_phys_t *dnp = db->db.db_data; 3238fa9e4066Sahrens for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 3239fa9e4066Sahrens i--, dnp++) { 3240fa9e4066Sahrens if (dnp->dn_type != DMU_OT_NONE) 3241fa9e4066Sahrens fill++; 3242fa9e4066Sahrens } 3243fa9e4066Sahrens } else { 324443466aaeSMax Grossman if (BP_IS_HOLE(bp)) { 324543466aaeSMax Grossman fill = 0; 324643466aaeSMax Grossman } else { 3247fa9e4066Sahrens fill = 1; 3248fa9e4066Sahrens } 324943466aaeSMax Grossman } 3250fa9e4066Sahrens } else { 3251e14bb325SJeff Bonwick blkptr_t *ibp = db->db.db_data; 3252fa9e4066Sahrens ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3253e14bb325SJeff Bonwick for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3254e14bb325SJeff Bonwick if (BP_IS_HOLE(ibp)) 3255fa9e4066Sahrens continue; 32565d7b4d43SMatthew Ahrens fill += BP_GET_FILL(ibp); 3257fa9e4066Sahrens } 3258fa9e4066Sahrens } 3259744947dcSTom Erickson DB_DNODE_EXIT(db); 3260fa9e4066Sahrens 32615d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) 3262e14bb325SJeff Bonwick bp->blk_fill = fill; 3263fa9e4066Sahrens 3264fa9e4066Sahrens mutex_exit(&db->db_mtx); 326511ceac77SAlex Reece 326611ceac77SAlex Reece rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 326711ceac77SAlex Reece *db->db_blkptr = *bp; 326811ceac77SAlex Reece rw_exit(&dn->dn_struct_rwlock); 3269fa9e4066Sahrens } 3270fa9e4066Sahrens 32718df0bcf0SPaul Dagnelie /* ARGSUSED */ 32728df0bcf0SPaul Dagnelie /* 32738df0bcf0SPaul Dagnelie * This function gets called just prior to running through the compression 32748df0bcf0SPaul Dagnelie * stage of the zio pipeline. If we're an indirect block comprised of only 32758df0bcf0SPaul Dagnelie * holes, then we want this indirect to be compressed away to a hole. In 32768df0bcf0SPaul Dagnelie * order to do that we must zero out any information about the holes that 32778df0bcf0SPaul Dagnelie * this indirect points to prior to before we try to compress it. 32788df0bcf0SPaul Dagnelie */ 32798df0bcf0SPaul Dagnelie static void 32808df0bcf0SPaul Dagnelie dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 32818df0bcf0SPaul Dagnelie { 32828df0bcf0SPaul Dagnelie dmu_buf_impl_t *db = vdb; 32838df0bcf0SPaul Dagnelie dnode_t *dn; 32848df0bcf0SPaul Dagnelie blkptr_t *bp; 32851a01181fSGeorge Wilson unsigned int epbs, i; 32868df0bcf0SPaul Dagnelie 32878df0bcf0SPaul Dagnelie ASSERT3U(db->db_level, >, 0); 32888df0bcf0SPaul Dagnelie DB_DNODE_ENTER(db); 32898df0bcf0SPaul Dagnelie dn = DB_DNODE(db); 32908df0bcf0SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 32911a01181fSGeorge Wilson ASSERT3U(epbs, <, 31); 32928df0bcf0SPaul Dagnelie 32938df0bcf0SPaul Dagnelie /* Determine if all our children are holes */ 32948df0bcf0SPaul Dagnelie for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 32958df0bcf0SPaul Dagnelie if (!BP_IS_HOLE(bp)) 32968df0bcf0SPaul Dagnelie break; 32978df0bcf0SPaul Dagnelie } 32988df0bcf0SPaul Dagnelie 32998df0bcf0SPaul Dagnelie /* 33008df0bcf0SPaul Dagnelie * If all the children are holes, then zero them all out so that 33018df0bcf0SPaul Dagnelie * we may get compressed away. 33028df0bcf0SPaul Dagnelie */ 33038df0bcf0SPaul Dagnelie if (i == 1 << epbs) { 33041a01181fSGeorge Wilson /* 33051a01181fSGeorge Wilson * We only found holes. Grab the rwlock to prevent 33061a01181fSGeorge Wilson * anybody from reading the blocks we're about to 33071a01181fSGeorge Wilson * zero out. 33081a01181fSGeorge Wilson */ 33091a01181fSGeorge Wilson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 33108df0bcf0SPaul Dagnelie bzero(db->db.db_data, db->db.db_size); 33111a01181fSGeorge Wilson rw_exit(&dn->dn_struct_rwlock); 33128df0bcf0SPaul Dagnelie } 33138df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 33148df0bcf0SPaul Dagnelie } 33158df0bcf0SPaul Dagnelie 331669962b56SMatthew Ahrens /* 331769962b56SMatthew Ahrens * The SPA will call this callback several times for each zio - once 331869962b56SMatthew Ahrens * for every physical child i/o (zio->io_phys_children times). This 331969962b56SMatthew Ahrens * allows the DMU to monitor the progress of each logical i/o. For example, 332069962b56SMatthew Ahrens * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 332169962b56SMatthew Ahrens * block. There may be a long delay before all copies/fragments are completed, 332269962b56SMatthew Ahrens * so this callback allows us to retire dirty space gradually, as the physical 332369962b56SMatthew Ahrens * i/os complete. 332469962b56SMatthew Ahrens */ 332569962b56SMatthew Ahrens /* ARGSUSED */ 332669962b56SMatthew Ahrens static void 332769962b56SMatthew Ahrens dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 332869962b56SMatthew Ahrens { 332969962b56SMatthew Ahrens dmu_buf_impl_t *db = arg; 333069962b56SMatthew Ahrens objset_t *os = db->db_objset; 333169962b56SMatthew Ahrens dsl_pool_t *dp = dmu_objset_pool(os); 333269962b56SMatthew Ahrens dbuf_dirty_record_t *dr; 333369962b56SMatthew Ahrens int delta = 0; 333469962b56SMatthew Ahrens 333569962b56SMatthew Ahrens dr = db->db_data_pending; 333669962b56SMatthew Ahrens ASSERT3U(dr->dr_txg, ==, zio->io_txg); 333769962b56SMatthew Ahrens 333869962b56SMatthew Ahrens /* 333969962b56SMatthew Ahrens * The callback will be called io_phys_children times. Retire one 334069962b56SMatthew Ahrens * portion of our dirty space each time we are called. Any rounding 334169962b56SMatthew Ahrens * error will be cleaned up by dsl_pool_sync()'s call to 334269962b56SMatthew Ahrens * dsl_pool_undirty_space(). 334369962b56SMatthew Ahrens */ 334469962b56SMatthew Ahrens delta = dr->dr_accounted / zio->io_phys_children; 334569962b56SMatthew Ahrens dsl_pool_undirty_space(dp, delta, zio->io_txg); 334669962b56SMatthew Ahrens } 334769962b56SMatthew Ahrens 3348c717a561Smaybee /* ARGSUSED */ 3349c717a561Smaybee static void 3350c717a561Smaybee dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3351c717a561Smaybee { 3352c717a561Smaybee dmu_buf_impl_t *db = vdb; 3353b24ab676SJeff Bonwick blkptr_t *bp_orig = &zio->io_bp_orig; 335443466aaeSMax Grossman blkptr_t *bp = db->db_blkptr; 335543466aaeSMax Grossman objset_t *os = db->db_objset; 335643466aaeSMax Grossman dmu_tx_t *tx = os->os_synctx; 3357c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 3358c717a561Smaybee 3359fb09f5aaSMadhav Suresh ASSERT0(zio->io_error); 3360b24ab676SJeff Bonwick ASSERT(db->db_blkptr == bp); 3361b24ab676SJeff Bonwick 336280901aeaSGeorge Wilson /* 336380901aeaSGeorge Wilson * For nopwrites and rewrites we ensure that the bp matches our 336480901aeaSGeorge Wilson * original and bypass all the accounting. 336580901aeaSGeorge Wilson */ 336680901aeaSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3367b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, bp_orig)); 3368b24ab676SJeff Bonwick } else { 336943466aaeSMax Grossman dsl_dataset_t *ds = os->os_dsl_dataset; 3370b24ab676SJeff Bonwick (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3371b24ab676SJeff Bonwick dsl_dataset_block_born(ds, bp, tx); 3372b24ab676SJeff Bonwick } 3373c717a561Smaybee 3374c717a561Smaybee mutex_enter(&db->db_mtx); 3375c717a561Smaybee 3376b24ab676SJeff Bonwick DBUF_VERIFY(db); 3377b24ab676SJeff Bonwick 3378c717a561Smaybee drp = &db->db_last_dirty; 337917f17c2dSbonwick while ((dr = *drp) != db->db_data_pending) 338017f17c2dSbonwick drp = &dr->dr_next; 338117f17c2dSbonwick ASSERT(!list_link_active(&dr->dr_dirty_node)); 3382b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 338317f17c2dSbonwick ASSERT(dr->dr_next == NULL); 338417f17c2dSbonwick *drp = dr->dr_next; 3385c717a561Smaybee 33860a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 33870a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 3388744947dcSTom Erickson dnode_t *dn; 3389744947dcSTom Erickson 3390744947dcSTom Erickson DB_DNODE_ENTER(db); 3391744947dcSTom Erickson dn = DB_DNODE(db); 33920a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 33930a586ceaSMark Shellenbaum ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 33940a586ceaSMark Shellenbaum db->db_blkptr == &dn->dn_phys->dn_spill); 3395744947dcSTom Erickson DB_DNODE_EXIT(db); 33960a586ceaSMark Shellenbaum } 33970a586ceaSMark Shellenbaum #endif 33980a586ceaSMark Shellenbaum 3399c717a561Smaybee if (db->db_level == 0) { 34000a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3401c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 340282c9918fSTim Haley if (db->db_state != DB_NOFILL) { 3403c717a561Smaybee if (dr->dt.dl.dr_data != db->db_buf) 3404dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 340582c9918fSTim Haley } 3406c717a561Smaybee } else { 3407744947dcSTom Erickson dnode_t *dn; 3408744947dcSTom Erickson 3409744947dcSTom Erickson DB_DNODE_ENTER(db); 3410744947dcSTom Erickson dn = DB_DNODE(db); 3411c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3412c717a561Smaybee ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3413c717a561Smaybee if (!BP_IS_HOLE(db->db_blkptr)) { 3414c717a561Smaybee int epbs = 3415c717a561Smaybee dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 341643466aaeSMax Grossman ASSERT3U(db->db_blkid, <=, 341743466aaeSMax Grossman dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3418c717a561Smaybee ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3419c717a561Smaybee db->db.db_size); 3420c717a561Smaybee } 3421744947dcSTom Erickson DB_DNODE_EXIT(db); 3422c25056deSgw25295 mutex_destroy(&dr->dt.di.dr_mtx); 3423c25056deSgw25295 list_destroy(&dr->dt.di.dr_children); 3424c717a561Smaybee } 3425c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3426c717a561Smaybee 3427c717a561Smaybee cv_broadcast(&db->db_changed); 3428c717a561Smaybee ASSERT(db->db_dirtycnt > 0); 3429c717a561Smaybee db->db_dirtycnt -= 1; 3430c717a561Smaybee db->db_data_pending = NULL; 343143466aaeSMax Grossman dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3432b24ab676SJeff Bonwick } 3433b24ab676SJeff Bonwick 3434b24ab676SJeff Bonwick static void 3435b24ab676SJeff Bonwick dbuf_write_nofill_ready(zio_t *zio) 3436b24ab676SJeff Bonwick { 3437b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, zio->io_private); 3438b24ab676SJeff Bonwick } 3439b24ab676SJeff Bonwick 3440b24ab676SJeff Bonwick static void 3441b24ab676SJeff Bonwick dbuf_write_nofill_done(zio_t *zio) 3442b24ab676SJeff Bonwick { 3443b24ab676SJeff Bonwick dbuf_write_done(zio, NULL, zio->io_private); 3444b24ab676SJeff Bonwick } 3445b24ab676SJeff Bonwick 3446b24ab676SJeff Bonwick static void 3447b24ab676SJeff Bonwick dbuf_write_override_ready(zio_t *zio) 3448b24ab676SJeff Bonwick { 3449b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3450b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3451b24ab676SJeff Bonwick 3452b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, db); 3453b24ab676SJeff Bonwick } 3454b24ab676SJeff Bonwick 3455b24ab676SJeff Bonwick static void 3456b24ab676SJeff Bonwick dbuf_write_override_done(zio_t *zio) 3457b24ab676SJeff Bonwick { 3458b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3459b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3460b24ab676SJeff Bonwick blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3461b24ab676SJeff Bonwick 3462b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3463b24ab676SJeff Bonwick if (!BP_EQUAL(zio->io_bp, obp)) { 3464b24ab676SJeff Bonwick if (!BP_IS_HOLE(obp)) 3465b24ab676SJeff Bonwick dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3466b24ab676SJeff Bonwick arc_release(dr->dt.dl.dr_data, db); 3467b24ab676SJeff Bonwick } 3468c717a561Smaybee mutex_exit(&db->db_mtx); 34694ee0199eSRobert Mustacchi dbuf_write_done(zio, NULL, db); 3470770499e1SDan Kimmel 3471770499e1SDan Kimmel if (zio->io_abd != NULL) 3472770499e1SDan Kimmel abd_put(zio->io_abd); 3473b24ab676SJeff Bonwick } 3474c717a561Smaybee 34755cabbc6bSPrashanth Sreenivasa typedef struct dbuf_remap_impl_callback_arg { 34765cabbc6bSPrashanth Sreenivasa objset_t *drica_os; 34775cabbc6bSPrashanth Sreenivasa uint64_t drica_blk_birth; 34785cabbc6bSPrashanth Sreenivasa dmu_tx_t *drica_tx; 34795cabbc6bSPrashanth Sreenivasa } dbuf_remap_impl_callback_arg_t; 34805cabbc6bSPrashanth Sreenivasa 34815cabbc6bSPrashanth Sreenivasa static void 34825cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 34835cabbc6bSPrashanth Sreenivasa void *arg) 34845cabbc6bSPrashanth Sreenivasa { 34855cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t *drica = arg; 34865cabbc6bSPrashanth Sreenivasa objset_t *os = drica->drica_os; 34875cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(os); 34885cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = drica->drica_tx; 34895cabbc6bSPrashanth Sreenivasa 34905cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 34915cabbc6bSPrashanth Sreenivasa 34925cabbc6bSPrashanth Sreenivasa if (os == spa_meta_objset(spa)) { 34935cabbc6bSPrashanth Sreenivasa spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 34945cabbc6bSPrashanth Sreenivasa } else { 34955cabbc6bSPrashanth Sreenivasa dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 34965cabbc6bSPrashanth Sreenivasa size, drica->drica_blk_birth, tx); 34975cabbc6bSPrashanth Sreenivasa } 34985cabbc6bSPrashanth Sreenivasa } 34995cabbc6bSPrashanth Sreenivasa 35005cabbc6bSPrashanth Sreenivasa static void 35015cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 35025cabbc6bSPrashanth Sreenivasa { 35035cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = *bp; 35045cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 35055cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t drica; 35065cabbc6bSPrashanth Sreenivasa 35075cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 35085cabbc6bSPrashanth Sreenivasa 35095cabbc6bSPrashanth Sreenivasa drica.drica_os = dn->dn_objset; 35105cabbc6bSPrashanth Sreenivasa drica.drica_blk_birth = bp->blk_birth; 35115cabbc6bSPrashanth Sreenivasa drica.drica_tx = tx; 35125cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 35135cabbc6bSPrashanth Sreenivasa &drica)) { 35145cabbc6bSPrashanth Sreenivasa /* 35155cabbc6bSPrashanth Sreenivasa * The struct_rwlock prevents dbuf_read_impl() from 35165cabbc6bSPrashanth Sreenivasa * dereferencing the BP while we are changing it. To 35175cabbc6bSPrashanth Sreenivasa * avoid lock contention, only grab it when we are actually 35185cabbc6bSPrashanth Sreenivasa * changing the BP. 35195cabbc6bSPrashanth Sreenivasa */ 35205cabbc6bSPrashanth Sreenivasa rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 35215cabbc6bSPrashanth Sreenivasa *bp = bp_copy; 35225cabbc6bSPrashanth Sreenivasa rw_exit(&dn->dn_struct_rwlock); 35235cabbc6bSPrashanth Sreenivasa } 35245cabbc6bSPrashanth Sreenivasa } 35255cabbc6bSPrashanth Sreenivasa 35265cabbc6bSPrashanth Sreenivasa /* 35275cabbc6bSPrashanth Sreenivasa * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 35285cabbc6bSPrashanth Sreenivasa * to remap a copy of every bp in the dbuf. 35295cabbc6bSPrashanth Sreenivasa */ 35305cabbc6bSPrashanth Sreenivasa boolean_t 35315cabbc6bSPrashanth Sreenivasa dbuf_can_remap(const dmu_buf_impl_t *db) 35325cabbc6bSPrashanth Sreenivasa { 35335cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 35345cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 35355cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 35365cabbc6bSPrashanth Sreenivasa 35375cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_level, >, 0); 35385cabbc6bSPrashanth Sreenivasa ASSERT3S(db->db_state, ==, DB_CACHED); 35395cabbc6bSPrashanth Sreenivasa 35405cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 35415cabbc6bSPrashanth Sreenivasa 35425cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 35435cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 35445cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = bp[i]; 35455cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 35465cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 35475cabbc6bSPrashanth Sreenivasa break; 35485cabbc6bSPrashanth Sreenivasa } 35495cabbc6bSPrashanth Sreenivasa } 35505cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 35515cabbc6bSPrashanth Sreenivasa 35525cabbc6bSPrashanth Sreenivasa return (ret); 35535cabbc6bSPrashanth Sreenivasa } 35545cabbc6bSPrashanth Sreenivasa 35555cabbc6bSPrashanth Sreenivasa boolean_t 35565cabbc6bSPrashanth Sreenivasa dnode_needs_remap(const dnode_t *dn) 35575cabbc6bSPrashanth Sreenivasa { 35585cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 35595cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 35605cabbc6bSPrashanth Sreenivasa 35615cabbc6bSPrashanth Sreenivasa if (dn->dn_phys->dn_nlevels == 0) { 35625cabbc6bSPrashanth Sreenivasa return (B_FALSE); 35635cabbc6bSPrashanth Sreenivasa } 35645cabbc6bSPrashanth Sreenivasa 35655cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 35665cabbc6bSPrashanth Sreenivasa 35675cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 35685cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 35695cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 35705cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 35715cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 35725cabbc6bSPrashanth Sreenivasa break; 35735cabbc6bSPrashanth Sreenivasa } 35745cabbc6bSPrashanth Sreenivasa } 35755cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 35765cabbc6bSPrashanth Sreenivasa 35775cabbc6bSPrashanth Sreenivasa return (ret); 35785cabbc6bSPrashanth Sreenivasa } 35795cabbc6bSPrashanth Sreenivasa 35805cabbc6bSPrashanth Sreenivasa /* 35815cabbc6bSPrashanth Sreenivasa * Remap any existing BP's to concrete vdevs, if possible. 35825cabbc6bSPrashanth Sreenivasa */ 35835cabbc6bSPrashanth Sreenivasa static void 35845cabbc6bSPrashanth Sreenivasa dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 35855cabbc6bSPrashanth Sreenivasa { 35865cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 35875cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 35885cabbc6bSPrashanth Sreenivasa 35895cabbc6bSPrashanth Sreenivasa if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 35905cabbc6bSPrashanth Sreenivasa return; 35915cabbc6bSPrashanth Sreenivasa 35925cabbc6bSPrashanth Sreenivasa if (db->db_level > 0) { 35935cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 35945cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 35955cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &bp[i], tx); 35965cabbc6bSPrashanth Sreenivasa } 35975cabbc6bSPrashanth Sreenivasa } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 35985cabbc6bSPrashanth Sreenivasa dnode_phys_t *dnp = db->db.db_data; 35995cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 36005cabbc6bSPrashanth Sreenivasa DMU_OT_DNODE); 36015cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 36025cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 36035cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 36045cabbc6bSPrashanth Sreenivasa } 36055cabbc6bSPrashanth Sreenivasa } 36065cabbc6bSPrashanth Sreenivasa } 36075cabbc6bSPrashanth Sreenivasa } 36085cabbc6bSPrashanth Sreenivasa 36095cabbc6bSPrashanth Sreenivasa 36103e30c24aSWill Andrews /* Issue I/O to commit a dirty buffer to disk. */ 3611b24ab676SJeff Bonwick static void 3612b24ab676SJeff Bonwick dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3613b24ab676SJeff Bonwick { 3614b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3615744947dcSTom Erickson dnode_t *dn; 3616744947dcSTom Erickson objset_t *os; 3617b24ab676SJeff Bonwick dmu_buf_impl_t *parent = db->db_parent; 3618b24ab676SJeff Bonwick uint64_t txg = tx->tx_txg; 36197802d7bfSMatthew Ahrens zbookmark_phys_t zb; 3620b24ab676SJeff Bonwick zio_prop_t zp; 3621b24ab676SJeff Bonwick zio_t *zio; 36220a586ceaSMark Shellenbaum int wp_flag = 0; 3623b24ab676SJeff Bonwick 362411ceac77SAlex Reece ASSERT(dmu_tx_is_syncing(tx)); 362511ceac77SAlex Reece 3626744947dcSTom Erickson DB_DNODE_ENTER(db); 3627744947dcSTom Erickson dn = DB_DNODE(db); 3628744947dcSTom Erickson os = dn->dn_objset; 3629744947dcSTom Erickson 3630b24ab676SJeff Bonwick if (db->db_state != DB_NOFILL) { 3631b24ab676SJeff Bonwick if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3632b24ab676SJeff Bonwick /* 3633b24ab676SJeff Bonwick * Private object buffers are released here rather 3634b24ab676SJeff Bonwick * than in dbuf_dirty() since they are only modified 3635b24ab676SJeff Bonwick * in the syncing context and we don't want the 3636b24ab676SJeff Bonwick * overhead of making multiple copies of the data. 3637b24ab676SJeff Bonwick */ 3638b24ab676SJeff Bonwick if (BP_IS_HOLE(db->db_blkptr)) { 3639b24ab676SJeff Bonwick arc_buf_thaw(data); 3640b24ab676SJeff Bonwick } else { 36413f9d6ad7SLin Ling dbuf_release_bp(db); 3642b24ab676SJeff Bonwick } 36435cabbc6bSPrashanth Sreenivasa dbuf_remap(dn, db, tx); 3644b24ab676SJeff Bonwick } 3645b24ab676SJeff Bonwick } 3646b24ab676SJeff Bonwick 3647b24ab676SJeff Bonwick if (parent != dn->dn_dbuf) { 36483e30c24aSWill Andrews /* Our parent is an indirect block. */ 36493e30c24aSWill Andrews /* We have a dirty parent that has been scheduled for write. */ 3650b24ab676SJeff Bonwick ASSERT(parent && parent->db_data_pending); 36513e30c24aSWill Andrews /* Our parent's buffer is one level closer to the dnode. */ 3652b24ab676SJeff Bonwick ASSERT(db->db_level == parent->db_level-1); 36533e30c24aSWill Andrews /* 36543e30c24aSWill Andrews * We're about to modify our parent's db_data by modifying 36553e30c24aSWill Andrews * our block pointer, so the parent must be released. 36563e30c24aSWill Andrews */ 3657b24ab676SJeff Bonwick ASSERT(arc_released(parent->db_buf)); 3658b24ab676SJeff Bonwick zio = parent->db_data_pending->dr_zio; 3659b24ab676SJeff Bonwick } else { 36603e30c24aSWill Andrews /* Our parent is the dnode itself. */ 36610a586ceaSMark Shellenbaum ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 36620a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) || 36630a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 36640a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 3665b24ab676SJeff Bonwick ASSERT3P(db->db_blkptr, ==, 3666b24ab676SJeff Bonwick &dn->dn_phys->dn_blkptr[db->db_blkid]); 3667b24ab676SJeff Bonwick zio = dn->dn_zio; 3668b24ab676SJeff Bonwick } 3669b24ab676SJeff Bonwick 3670b24ab676SJeff Bonwick ASSERT(db->db_level == 0 || data == db->db_buf); 3671b24ab676SJeff Bonwick ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3672b24ab676SJeff Bonwick ASSERT(zio); 3673b24ab676SJeff Bonwick 3674b24ab676SJeff Bonwick SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3675b24ab676SJeff Bonwick os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3676b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 3677b24ab676SJeff Bonwick 36780a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 36790a586ceaSMark Shellenbaum wp_flag = WP_SPILL; 36800a586ceaSMark Shellenbaum wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 36810a586ceaSMark Shellenbaum 3682adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3683744947dcSTom Erickson DB_DNODE_EXIT(db); 3684b24ab676SJeff Bonwick 368511ceac77SAlex Reece /* 368611ceac77SAlex Reece * We copy the blkptr now (rather than when we instantiate the dirty 368711ceac77SAlex Reece * record), because its value can change between open context and 368811ceac77SAlex Reece * syncing context. We do not need to hold dn_struct_rwlock to read 368911ceac77SAlex Reece * db_blkptr because we are in syncing context. 369011ceac77SAlex Reece */ 369111ceac77SAlex Reece dr->dr_bp_copy = *db->db_blkptr; 369211ceac77SAlex Reece 36935d7b4d43SMatthew Ahrens if (db->db_level == 0 && 36945d7b4d43SMatthew Ahrens dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 36955d7b4d43SMatthew Ahrens /* 36965d7b4d43SMatthew Ahrens * The BP for this block has been provided by open context 36975d7b4d43SMatthew Ahrens * (by dmu_sync() or dmu_buf_write_embedded()). 36985d7b4d43SMatthew Ahrens */ 3699770499e1SDan Kimmel abd_t *contents = (data != NULL) ? 3700770499e1SDan Kimmel abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 37015d7b4d43SMatthew Ahrens 37025602294fSDan Kimmel dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 37035602294fSDan Kimmel contents, db->db.db_size, db->db.db_size, &zp, 37048df0bcf0SPaul Dagnelie dbuf_write_override_ready, NULL, NULL, 37058df0bcf0SPaul Dagnelie dbuf_write_override_done, 370669962b56SMatthew Ahrens dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3707b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3708b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3709b24ab676SJeff Bonwick zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 371080901aeaSGeorge Wilson dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3711b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 3712b24ab676SJeff Bonwick } else if (db->db_state == DB_NOFILL) { 3713810e43b2SBill Pijewski ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3714810e43b2SBill Pijewski zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3715b24ab676SJeff Bonwick dr->dr_zio = zio_write(zio, os->os_spa, txg, 37165602294fSDan Kimmel &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 37178df0bcf0SPaul Dagnelie dbuf_write_nofill_ready, NULL, NULL, 37188df0bcf0SPaul Dagnelie dbuf_write_nofill_done, db, 3719b24ab676SJeff Bonwick ZIO_PRIORITY_ASYNC_WRITE, 3720b24ab676SJeff Bonwick ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3721b24ab676SJeff Bonwick } else { 3722b24ab676SJeff Bonwick ASSERT(arc_released(data)); 37238df0bcf0SPaul Dagnelie 37248df0bcf0SPaul Dagnelie /* 37258df0bcf0SPaul Dagnelie * For indirect blocks, we want to setup the children 37268df0bcf0SPaul Dagnelie * ready callback so that we can properly handle an indirect 37278df0bcf0SPaul Dagnelie * block that only contains holes. 37288df0bcf0SPaul Dagnelie */ 37298df0bcf0SPaul Dagnelie arc_done_func_t *children_ready_cb = NULL; 37308df0bcf0SPaul Dagnelie if (db->db_level != 0) 37318df0bcf0SPaul Dagnelie children_ready_cb = dbuf_write_children_ready; 37328df0bcf0SPaul Dagnelie 3733b24ab676SJeff Bonwick dr->dr_zio = arc_write(zio, os->os_spa, txg, 373411ceac77SAlex Reece &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3735dcbf3bd6SGeorge Wilson &zp, dbuf_write_ready, children_ready_cb, 373669962b56SMatthew Ahrens dbuf_write_physdone, dbuf_write_done, db, 373769962b56SMatthew Ahrens ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3738b24ab676SJeff Bonwick } 3739fa9e4066Sahrens } 3740