1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5f65e61c0Sahrens * Common Development and Distribution License (the "License"). 6f65e61c0Sahrens * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2206e0070dSMark Shellenbaum * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 233f2366c2SGordon Ross * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24fa98e487SMatthew Ahrens * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25aad02571SSaso Kiselkov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26810e43b2SBill Pijewski * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 29fa9e4066Sahrens */ 30fa9e4066Sahrens 31fa9e4066Sahrens #include <sys/zfs_context.h> 32fa9e4066Sahrens #include <sys/dmu.h> 332f3d8780SMatthew Ahrens #include <sys/dmu_send.h> 34fa9e4066Sahrens #include <sys/dmu_impl.h> 35fa9e4066Sahrens #include <sys/dbuf.h> 36fa9e4066Sahrens #include <sys/dmu_objset.h> 37fa9e4066Sahrens #include <sys/dsl_dataset.h> 38fa9e4066Sahrens #include <sys/dsl_dir.h> 39fa9e4066Sahrens #include <sys/dmu_tx.h> 40fa9e4066Sahrens #include <sys/spa.h> 41fa9e4066Sahrens #include <sys/zio.h> 42fa9e4066Sahrens #include <sys/dmu_zfetch.h> 430a586ceaSMark Shellenbaum #include <sys/sa.h> 440a586ceaSMark Shellenbaum #include <sys/sa_impl.h> 455d7b4d43SMatthew Ahrens #include <sys/zfeature.h> 465d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 47bf16b11eSMatthew Ahrens #include <sys/range_tree.h> 48dcbf3bd6SGeorge Wilson #include <sys/callb.h> 49770499e1SDan Kimmel #include <sys/abd.h> 505cabbc6bSPrashanth Sreenivasa #include <sys/vdev.h> 513a2d8a1bSPaul Dagnelie #include <sys/cityhash.h> 52adb52d92SMatthew Ahrens #include <sys/spa_impl.h> 53dcbf3bd6SGeorge Wilson 543b2aab18SMatthew Ahrens static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 55088f3894Sahrens static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 56fa9e4066Sahrens 57bc9014e6SJustin Gibbs #ifndef __lint 58bc9014e6SJustin Gibbs extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 5940510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_sync, 6040510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_async, 6140510e8eSJosef 'Jeff' Sipek dmu_buf_t **clear_on_evict_dbufp); 62bc9014e6SJustin Gibbs #endif /* ! __lint */ 63bc9014e6SJustin Gibbs 64fa9e4066Sahrens /* 65fa9e4066Sahrens * Global data structures and functions for the dbuf cache. 66fa9e4066Sahrens */ 67dcbf3bd6SGeorge Wilson static kmem_cache_t *dbuf_kmem_cache; 68bc9014e6SJustin Gibbs static taskq_t *dbu_evict_taskq; 69fa9e4066Sahrens 70dcbf3bd6SGeorge Wilson static kthread_t *dbuf_cache_evict_thread; 71dcbf3bd6SGeorge Wilson static kmutex_t dbuf_evict_lock; 72dcbf3bd6SGeorge Wilson static kcondvar_t dbuf_evict_cv; 73dcbf3bd6SGeorge Wilson static boolean_t dbuf_evict_thread_exit; 74dcbf3bd6SGeorge Wilson 75dcbf3bd6SGeorge Wilson /* 76adb52d92SMatthew Ahrens * There are two dbuf caches; each dbuf can only be in one of them at a time. 77adb52d92SMatthew Ahrens * 78adb52d92SMatthew Ahrens * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 79adb52d92SMatthew Ahrens * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 80adb52d92SMatthew Ahrens * that represent the metadata that describes filesystems/snapshots/ 81adb52d92SMatthew Ahrens * bookmarks/properties/etc. We only evict from this cache when we export a 82adb52d92SMatthew Ahrens * pool, to short-circuit as much I/O as possible for all administrative 83adb52d92SMatthew Ahrens * commands that need the metadata. There is no eviction policy for this 84adb52d92SMatthew Ahrens * cache, because we try to only include types in it which would occupy a 85adb52d92SMatthew Ahrens * very small amount of space per object but create a large impact on the 86adb52d92SMatthew Ahrens * performance of these commands. Instead, after it reaches a maximum size 87adb52d92SMatthew Ahrens * (which should only happen on very small memory systems with a very large 88adb52d92SMatthew Ahrens * number of filesystem objects), we stop taking new dbufs into the 89adb52d92SMatthew Ahrens * metadata cache, instead putting them in the normal dbuf cache. 90adb52d92SMatthew Ahrens * 91adb52d92SMatthew Ahrens * 2. LRU cache of dbufs. The "dbuf cache" maintains a list of dbufs that 92dcbf3bd6SGeorge Wilson * are not currently held but have been recently released. These dbufs 93dcbf3bd6SGeorge Wilson * are not eligible for arc eviction until they are aged out of the cache. 94dcbf3bd6SGeorge Wilson * Dbufs that are aged out of the cache will be immediately destroyed and 95dcbf3bd6SGeorge Wilson * become eligible for arc eviction. 96adb52d92SMatthew Ahrens * 97adb52d92SMatthew Ahrens * Dbufs are added to these caches once the last hold is released. If a dbuf is 98adb52d92SMatthew Ahrens * later accessed and still exists in the dbuf cache, then it will be removed 99adb52d92SMatthew Ahrens * from the cache and later re-added to the head of the cache. 100adb52d92SMatthew Ahrens * 101adb52d92SMatthew Ahrens * If a given dbuf meets the requirements for the metadata cache, it will go 102adb52d92SMatthew Ahrens * there, otherwise it will be considered for the generic LRU dbuf cache. The 103adb52d92SMatthew Ahrens * caches and the refcounts tracking their sizes are stored in an array indexed 104adb52d92SMatthew Ahrens * by those caches' matching enum values (from dbuf_cached_state_t). 105dcbf3bd6SGeorge Wilson */ 106adb52d92SMatthew Ahrens typedef struct dbuf_cache { 107adb52d92SMatthew Ahrens multilist_t *cache; 108e914ace2STim Schumacher zfs_refcount_t size; 109adb52d92SMatthew Ahrens } dbuf_cache_t; 110adb52d92SMatthew Ahrens dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 111dcbf3bd6SGeorge Wilson 112adb52d92SMatthew Ahrens /* Size limits for the caches */ 113adb52d92SMatthew Ahrens uint64_t dbuf_cache_max_bytes = 0; 114adb52d92SMatthew Ahrens uint64_t dbuf_metadata_cache_max_bytes = 0; 115adb52d92SMatthew Ahrens /* Set the default sizes of the caches to log2 fraction of arc size */ 116268bbb2aSGeorge Wilson int dbuf_cache_shift = 5; 117adb52d92SMatthew Ahrens int dbuf_metadata_cache_shift = 6; 118dcbf3bd6SGeorge Wilson 119dcbf3bd6SGeorge Wilson /* 120adb52d92SMatthew Ahrens * For diagnostic purposes, this is incremented whenever we can't add 121adb52d92SMatthew Ahrens * something to the metadata cache because it's full, and instead put 122adb52d92SMatthew Ahrens * the data in the regular dbuf cache. 123adb52d92SMatthew Ahrens */ 124adb52d92SMatthew Ahrens uint64_t dbuf_metadata_cache_overflow; 125adb52d92SMatthew Ahrens 126adb52d92SMatthew Ahrens /* 127adb52d92SMatthew Ahrens * The LRU dbuf cache uses a three-stage eviction policy: 128dcbf3bd6SGeorge Wilson * - A low water marker designates when the dbuf eviction thread 129dcbf3bd6SGeorge Wilson * should stop evicting from the dbuf cache. 130dcbf3bd6SGeorge Wilson * - When we reach the maximum size (aka mid water mark), we 131dcbf3bd6SGeorge Wilson * signal the eviction thread to run. 132dcbf3bd6SGeorge Wilson * - The high water mark indicates when the eviction thread 133dcbf3bd6SGeorge Wilson * is unable to keep up with the incoming load and eviction must 134dcbf3bd6SGeorge Wilson * happen in the context of the calling thread. 135dcbf3bd6SGeorge Wilson * 136dcbf3bd6SGeorge Wilson * The dbuf cache: 137dcbf3bd6SGeorge Wilson * (max size) 138dcbf3bd6SGeorge Wilson * low water mid water hi water 139dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 140dcbf3bd6SGeorge Wilson * | | | | 141dcbf3bd6SGeorge Wilson * | | | | 142dcbf3bd6SGeorge Wilson * | | | | 143dcbf3bd6SGeorge Wilson * | | | | 144dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 145dcbf3bd6SGeorge Wilson * stop signal evict 146dcbf3bd6SGeorge Wilson * evicting eviction directly 147dcbf3bd6SGeorge Wilson * thread 148dcbf3bd6SGeorge Wilson * 149dcbf3bd6SGeorge Wilson * The high and low water marks indicate the operating range for the eviction 150dcbf3bd6SGeorge Wilson * thread. The low water mark is, by default, 90% of the total size of the 151dcbf3bd6SGeorge Wilson * cache and the high water mark is at 110% (both of these percentages can be 152dcbf3bd6SGeorge Wilson * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 153dcbf3bd6SGeorge Wilson * respectively). The eviction thread will try to ensure that the cache remains 154dcbf3bd6SGeorge Wilson * within this range by waking up every second and checking if the cache is 155dcbf3bd6SGeorge Wilson * above the low water mark. The thread can also be woken up by callers adding 156dcbf3bd6SGeorge Wilson * elements into the cache if the cache is larger than the mid water (i.e max 157dcbf3bd6SGeorge Wilson * cache size). Once the eviction thread is woken up and eviction is required, 158dcbf3bd6SGeorge Wilson * it will continue evicting buffers until it's able to reduce the cache size 159dcbf3bd6SGeorge Wilson * to the low water mark. If the cache size continues to grow and hits the high 160eb633035STom Caputi * water mark, then callers adding elements to the cache will begin to evict 161dcbf3bd6SGeorge Wilson * directly from the cache until the cache is no longer above the high water 162dcbf3bd6SGeorge Wilson * mark. 163dcbf3bd6SGeorge Wilson */ 164dcbf3bd6SGeorge Wilson 165dcbf3bd6SGeorge Wilson /* 166dcbf3bd6SGeorge Wilson * The percentage above and below the maximum cache size. 167dcbf3bd6SGeorge Wilson */ 168dcbf3bd6SGeorge Wilson uint_t dbuf_cache_hiwater_pct = 10; 169dcbf3bd6SGeorge Wilson uint_t dbuf_cache_lowater_pct = 10; 170dcbf3bd6SGeorge Wilson 171fa9e4066Sahrens /* ARGSUSED */ 172fa9e4066Sahrens static int 173fa9e4066Sahrens dbuf_cons(void *vdb, void *unused, int kmflag) 174fa9e4066Sahrens { 175fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 176fa9e4066Sahrens bzero(db, sizeof (dmu_buf_impl_t)); 177fa9e4066Sahrens 178fa9e4066Sahrens mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 179fa9e4066Sahrens cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 180dcbf3bd6SGeorge Wilson multilist_link_init(&db->db_cache_link); 181e914ace2STim Schumacher zfs_refcount_create(&db->db_holds); 1820f6d88adSAlex Reece 183fa9e4066Sahrens return (0); 184fa9e4066Sahrens } 185fa9e4066Sahrens 186fa9e4066Sahrens /* ARGSUSED */ 187fa9e4066Sahrens static void 188fa9e4066Sahrens dbuf_dest(void *vdb, void *unused) 189fa9e4066Sahrens { 190fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 191fa9e4066Sahrens mutex_destroy(&db->db_mtx); 192fa9e4066Sahrens cv_destroy(&db->db_changed); 193dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 194e914ace2STim Schumacher zfs_refcount_destroy(&db->db_holds); 195fa9e4066Sahrens } 196fa9e4066Sahrens 197fa9e4066Sahrens /* 198fa9e4066Sahrens * dbuf hash table routines 199fa9e4066Sahrens */ 200fa9e4066Sahrens static dbuf_hash_table_t dbuf_hash_table; 201fa9e4066Sahrens 202fa9e4066Sahrens static uint64_t dbuf_hash_count; 203fa9e4066Sahrens 2043a2d8a1bSPaul Dagnelie /* 2053a2d8a1bSPaul Dagnelie * We use Cityhash for this. It's fast, and has good hash properties without 2063a2d8a1bSPaul Dagnelie * requiring any large static buffers. 2073a2d8a1bSPaul Dagnelie */ 208fa9e4066Sahrens static uint64_t 209fa9e4066Sahrens dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 210fa9e4066Sahrens { 2113a2d8a1bSPaul Dagnelie return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 212fa9e4066Sahrens } 213fa9e4066Sahrens 214fa9e4066Sahrens #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 215fa9e4066Sahrens ((dbuf)->db.db_object == (obj) && \ 216fa9e4066Sahrens (dbuf)->db_objset == (os) && \ 217fa9e4066Sahrens (dbuf)->db_level == (level) && \ 218fa9e4066Sahrens (dbuf)->db_blkid == (blkid)) 219fa9e4066Sahrens 220fa9e4066Sahrens dmu_buf_impl_t * 221e57a022bSJustin T. Gibbs dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 222fa9e4066Sahrens { 223fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 224dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 225fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 226fa9e4066Sahrens dmu_buf_impl_t *db; 227fa9e4066Sahrens 228fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 229fa9e4066Sahrens for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 230fa9e4066Sahrens if (DBUF_EQUAL(db, os, obj, level, blkid)) { 231fa9e4066Sahrens mutex_enter(&db->db_mtx); 232ea8dc4b6Seschrock if (db->db_state != DB_EVICTING) { 233fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 234fa9e4066Sahrens return (db); 235fa9e4066Sahrens } 236fa9e4066Sahrens mutex_exit(&db->db_mtx); 237fa9e4066Sahrens } 238fa9e4066Sahrens } 239fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 240fa9e4066Sahrens return (NULL); 241fa9e4066Sahrens } 242fa9e4066Sahrens 243e57a022bSJustin T. Gibbs static dmu_buf_impl_t * 244e57a022bSJustin T. Gibbs dbuf_find_bonus(objset_t *os, uint64_t object) 245e57a022bSJustin T. Gibbs { 246e57a022bSJustin T. Gibbs dnode_t *dn; 247e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = NULL; 248e57a022bSJustin T. Gibbs 249e57a022bSJustin T. Gibbs if (dnode_hold(os, object, FTAG, &dn) == 0) { 250e57a022bSJustin T. Gibbs rw_enter(&dn->dn_struct_rwlock, RW_READER); 251e57a022bSJustin T. Gibbs if (dn->dn_bonus != NULL) { 252e57a022bSJustin T. Gibbs db = dn->dn_bonus; 253e57a022bSJustin T. Gibbs mutex_enter(&db->db_mtx); 254e57a022bSJustin T. Gibbs } 255e57a022bSJustin T. Gibbs rw_exit(&dn->dn_struct_rwlock); 256e57a022bSJustin T. Gibbs dnode_rele(dn, FTAG); 257e57a022bSJustin T. Gibbs } 258e57a022bSJustin T. Gibbs return (db); 259e57a022bSJustin T. Gibbs } 260e57a022bSJustin T. Gibbs 261fa9e4066Sahrens /* 262fa9e4066Sahrens * Insert an entry into the hash table. If there is already an element 263fa9e4066Sahrens * equal to elem in the hash table, then the already existing element 264fa9e4066Sahrens * will be returned and the new element will not be inserted. 265fa9e4066Sahrens * Otherwise returns NULL. 266fa9e4066Sahrens */ 267fa9e4066Sahrens static dmu_buf_impl_t * 268fa9e4066Sahrens dbuf_hash_insert(dmu_buf_impl_t *db) 269fa9e4066Sahrens { 270fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 271503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 272fa9e4066Sahrens uint64_t obj = db->db.db_object; 273fa9e4066Sahrens int level = db->db_level; 274fa9e4066Sahrens uint64_t blkid = db->db_blkid; 275dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 276fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 277fa9e4066Sahrens dmu_buf_impl_t *dbf; 278fa9e4066Sahrens 279fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 280fa9e4066Sahrens for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 281fa9e4066Sahrens if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 282fa9e4066Sahrens mutex_enter(&dbf->db_mtx); 283ea8dc4b6Seschrock if (dbf->db_state != DB_EVICTING) { 284fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 285fa9e4066Sahrens return (dbf); 286fa9e4066Sahrens } 287fa9e4066Sahrens mutex_exit(&dbf->db_mtx); 288fa9e4066Sahrens } 289fa9e4066Sahrens } 290fa9e4066Sahrens 291fa9e4066Sahrens mutex_enter(&db->db_mtx); 292fa9e4066Sahrens db->db_hash_next = h->hash_table[idx]; 293fa9e4066Sahrens h->hash_table[idx] = db; 294fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 2951a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&dbuf_hash_count); 296fa9e4066Sahrens 297fa9e4066Sahrens return (NULL); 298fa9e4066Sahrens } 299fa9e4066Sahrens 300fa9e4066Sahrens /* 301bbfa8ea8SMatthew Ahrens * Remove an entry from the hash table. It must be in the EVICTING state. 302fa9e4066Sahrens */ 303fa9e4066Sahrens static void 304fa9e4066Sahrens dbuf_hash_remove(dmu_buf_impl_t *db) 305fa9e4066Sahrens { 306fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 307dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 308fa9e4066Sahrens db->db_level, db->db_blkid); 309fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 310fa9e4066Sahrens dmu_buf_impl_t *dbf, **dbp; 311fa9e4066Sahrens 312fa9e4066Sahrens /* 313eb633035STom Caputi * We mustn't hold db_mtx to maintain lock ordering: 314fa9e4066Sahrens * DBUF_HASH_MUTEX > db_mtx. 315fa9e4066Sahrens */ 316e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 317ea8dc4b6Seschrock ASSERT(db->db_state == DB_EVICTING); 318fa9e4066Sahrens ASSERT(!MUTEX_HELD(&db->db_mtx)); 319fa9e4066Sahrens 320fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 321fa9e4066Sahrens dbp = &h->hash_table[idx]; 322fa9e4066Sahrens while ((dbf = *dbp) != db) { 323fa9e4066Sahrens dbp = &dbf->db_hash_next; 324fa9e4066Sahrens ASSERT(dbf != NULL); 325fa9e4066Sahrens } 326fa9e4066Sahrens *dbp = db->db_hash_next; 327fa9e4066Sahrens db->db_hash_next = NULL; 328fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 3291a5e258fSJosef 'Jeff' Sipek atomic_dec_64(&dbuf_hash_count); 330fa9e4066Sahrens } 331fa9e4066Sahrens 332bc9014e6SJustin Gibbs typedef enum { 333bc9014e6SJustin Gibbs DBVU_EVICTING, 334bc9014e6SJustin Gibbs DBVU_NOT_EVICTING 335bc9014e6SJustin Gibbs } dbvu_verify_type_t; 336bc9014e6SJustin Gibbs 337bc9014e6SJustin Gibbs static void 338bc9014e6SJustin Gibbs dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 339bc9014e6SJustin Gibbs { 340bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 341bc9014e6SJustin Gibbs int64_t holds; 342bc9014e6SJustin Gibbs 343bc9014e6SJustin Gibbs if (db->db_user == NULL) 344bc9014e6SJustin Gibbs return; 345bc9014e6SJustin Gibbs 346bc9014e6SJustin Gibbs /* Only data blocks support the attachment of user data. */ 347bc9014e6SJustin Gibbs ASSERT(db->db_level == 0); 348bc9014e6SJustin Gibbs 349bc9014e6SJustin Gibbs /* Clients must resolve a dbuf before attaching user data. */ 350bc9014e6SJustin Gibbs ASSERT(db->db.db_data != NULL); 351bc9014e6SJustin Gibbs ASSERT3U(db->db_state, ==, DB_CACHED); 352bc9014e6SJustin Gibbs 353e914ace2STim Schumacher holds = zfs_refcount_count(&db->db_holds); 354bc9014e6SJustin Gibbs if (verify_type == DBVU_EVICTING) { 355bc9014e6SJustin Gibbs /* 356bc9014e6SJustin Gibbs * Immediate eviction occurs when holds == dirtycnt. 357bc9014e6SJustin Gibbs * For normal eviction buffers, holds is zero on 358bc9014e6SJustin Gibbs * eviction, except when dbuf_fix_old_data() calls 359bc9014e6SJustin Gibbs * dbuf_clear_data(). However, the hold count can grow 360bc9014e6SJustin Gibbs * during eviction even though db_mtx is held (see 361bc9014e6SJustin Gibbs * dmu_bonus_hold() for an example), so we can only 362bc9014e6SJustin Gibbs * test the generic invariant that holds >= dirtycnt. 363bc9014e6SJustin Gibbs */ 364bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 365bc9014e6SJustin Gibbs } else { 366d2058105SJustin T. Gibbs if (db->db_user_immediate_evict == TRUE) 367bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 368bc9014e6SJustin Gibbs else 369bc9014e6SJustin Gibbs ASSERT3U(holds, >, 0); 370bc9014e6SJustin Gibbs } 371bc9014e6SJustin Gibbs #endif 372bc9014e6SJustin Gibbs } 373bc9014e6SJustin Gibbs 374fa9e4066Sahrens static void 375fa9e4066Sahrens dbuf_evict_user(dmu_buf_impl_t *db) 376fa9e4066Sahrens { 377bc9014e6SJustin Gibbs dmu_buf_user_t *dbu = db->db_user; 378bc9014e6SJustin Gibbs 379fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 380fa9e4066Sahrens 381bc9014e6SJustin Gibbs if (dbu == NULL) 382fa9e4066Sahrens return; 383fa9e4066Sahrens 384bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_EVICTING); 385bc9014e6SJustin Gibbs db->db_user = NULL; 386bc9014e6SJustin Gibbs 387bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 388bc9014e6SJustin Gibbs if (dbu->dbu_clear_on_evict_dbufp != NULL) 389bc9014e6SJustin Gibbs *dbu->dbu_clear_on_evict_dbufp = NULL; 390bc9014e6SJustin Gibbs #endif 391bc9014e6SJustin Gibbs 392bc9014e6SJustin Gibbs /* 39340510e8eSJosef 'Jeff' Sipek * There are two eviction callbacks - one that we call synchronously 39440510e8eSJosef 'Jeff' Sipek * and one that we invoke via a taskq. The async one is useful for 39540510e8eSJosef 'Jeff' Sipek * avoiding lock order reversals and limiting stack depth. 39640510e8eSJosef 'Jeff' Sipek * 39740510e8eSJosef 'Jeff' Sipek * Note that if we have a sync callback but no async callback, 39840510e8eSJosef 'Jeff' Sipek * it's likely that the sync callback will free the structure 39940510e8eSJosef 'Jeff' Sipek * containing the dbu. In that case we need to take care to not 40040510e8eSJosef 'Jeff' Sipek * dereference dbu after calling the sync evict func. 401bc9014e6SJustin Gibbs */ 40240510e8eSJosef 'Jeff' Sipek boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 40340510e8eSJosef 'Jeff' Sipek 40440510e8eSJosef 'Jeff' Sipek if (dbu->dbu_evict_func_sync != NULL) 40540510e8eSJosef 'Jeff' Sipek dbu->dbu_evict_func_sync(dbu); 40640510e8eSJosef 'Jeff' Sipek 40740510e8eSJosef 'Jeff' Sipek if (has_async) { 40840510e8eSJosef 'Jeff' Sipek taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 40940510e8eSJosef 'Jeff' Sipek dbu, 0, &dbu->dbu_tqent); 41040510e8eSJosef 'Jeff' Sipek } 411fa9e4066Sahrens } 412fa9e4066Sahrens 413744947dcSTom Erickson boolean_t 414744947dcSTom Erickson dbuf_is_metadata(dmu_buf_impl_t *db) 415744947dcSTom Erickson { 416eb633035STom Caputi if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) { 417744947dcSTom Erickson return (B_TRUE); 418744947dcSTom Erickson } else { 419744947dcSTom Erickson boolean_t is_metadata; 420744947dcSTom Erickson 421744947dcSTom Erickson DB_DNODE_ENTER(db); 422ad135b5dSChristopher Siden is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 423744947dcSTom Erickson DB_DNODE_EXIT(db); 424744947dcSTom Erickson 425744947dcSTom Erickson return (is_metadata); 426744947dcSTom Erickson } 427744947dcSTom Erickson } 428744947dcSTom Erickson 429dcbf3bd6SGeorge Wilson /* 430adb52d92SMatthew Ahrens * This returns whether this dbuf should be stored in the metadata cache, which 431adb52d92SMatthew Ahrens * is based on whether it's from one of the dnode types that store data related 432adb52d92SMatthew Ahrens * to traversing dataset hierarchies. 433adb52d92SMatthew Ahrens */ 434adb52d92SMatthew Ahrens static boolean_t 435adb52d92SMatthew Ahrens dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 436adb52d92SMatthew Ahrens { 437adb52d92SMatthew Ahrens DB_DNODE_ENTER(db); 438adb52d92SMatthew Ahrens dmu_object_type_t type = DB_DNODE(db)->dn_type; 439adb52d92SMatthew Ahrens DB_DNODE_EXIT(db); 440adb52d92SMatthew Ahrens 441adb52d92SMatthew Ahrens /* Check if this dbuf is one of the types we care about */ 442adb52d92SMatthew Ahrens if (DMU_OT_IS_METADATA_CACHED(type)) { 443adb52d92SMatthew Ahrens /* If we hit this, then we set something up wrong in dmu_ot */ 444adb52d92SMatthew Ahrens ASSERT(DMU_OT_IS_METADATA(type)); 445adb52d92SMatthew Ahrens 446adb52d92SMatthew Ahrens /* 447adb52d92SMatthew Ahrens * Sanity check for small-memory systems: don't allocate too 448adb52d92SMatthew Ahrens * much memory for this purpose. 449adb52d92SMatthew Ahrens */ 450e914ace2STim Schumacher if (zfs_refcount_count( 451e914ace2STim Schumacher &dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 452adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes) { 453adb52d92SMatthew Ahrens dbuf_metadata_cache_overflow++; 454adb52d92SMatthew Ahrens DTRACE_PROBE1(dbuf__metadata__cache__overflow, 455adb52d92SMatthew Ahrens dmu_buf_impl_t *, db); 456adb52d92SMatthew Ahrens return (B_FALSE); 457adb52d92SMatthew Ahrens } 458adb52d92SMatthew Ahrens 459adb52d92SMatthew Ahrens return (B_TRUE); 460adb52d92SMatthew Ahrens } 461adb52d92SMatthew Ahrens 462adb52d92SMatthew Ahrens return (B_FALSE); 463adb52d92SMatthew Ahrens } 464adb52d92SMatthew Ahrens 465adb52d92SMatthew Ahrens /* 466dcbf3bd6SGeorge Wilson * This function *must* return indices evenly distributed between all 467dcbf3bd6SGeorge Wilson * sublists of the multilist. This is needed due to how the dbuf eviction 468dcbf3bd6SGeorge Wilson * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 469dcbf3bd6SGeorge Wilson * distributed between all sublists and uses this assumption when 470dcbf3bd6SGeorge Wilson * deciding which sublist to evict from and how much to evict from it. 471dcbf3bd6SGeorge Wilson */ 472dcbf3bd6SGeorge Wilson unsigned int 473dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 474ea8dc4b6Seschrock { 475dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = obj; 476ea8dc4b6Seschrock 477dcbf3bd6SGeorge Wilson /* 478dcbf3bd6SGeorge Wilson * The assumption here, is the hash value for a given 479dcbf3bd6SGeorge Wilson * dmu_buf_impl_t will remain constant throughout it's lifetime 480dcbf3bd6SGeorge Wilson * (i.e. it's objset, object, level and blkid fields don't change). 481dcbf3bd6SGeorge Wilson * Thus, we don't need to store the dbuf's sublist index 482dcbf3bd6SGeorge Wilson * on insertion, as this index can be recalculated on removal. 483dcbf3bd6SGeorge Wilson * 484dcbf3bd6SGeorge Wilson * Also, the low order bits of the hash value are thought to be 485dcbf3bd6SGeorge Wilson * distributed evenly. Otherwise, in the case that the multilist 486dcbf3bd6SGeorge Wilson * has a power of two number of sublists, each sublists' usage 487dcbf3bd6SGeorge Wilson * would not be evenly distributed. 488dcbf3bd6SGeorge Wilson */ 489dcbf3bd6SGeorge Wilson return (dbuf_hash(db->db_objset, db->db.db_object, 490dcbf3bd6SGeorge Wilson db->db_level, db->db_blkid) % 491dcbf3bd6SGeorge Wilson multilist_get_num_sublists(ml)); 492dcbf3bd6SGeorge Wilson } 493dcbf3bd6SGeorge Wilson 494dcbf3bd6SGeorge Wilson static inline boolean_t 495dcbf3bd6SGeorge Wilson dbuf_cache_above_hiwater(void) 496dcbf3bd6SGeorge Wilson { 497dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_hiwater_bytes = 498dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 499dcbf3bd6SGeorge Wilson 500e914ace2STim Schumacher return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 501dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 502dcbf3bd6SGeorge Wilson } 503dcbf3bd6SGeorge Wilson 504dcbf3bd6SGeorge Wilson static inline boolean_t 505dcbf3bd6SGeorge Wilson dbuf_cache_above_lowater(void) 506dcbf3bd6SGeorge Wilson { 507dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_lowater_bytes = 508dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 509dcbf3bd6SGeorge Wilson 510e914ace2STim Schumacher return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 511dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 512dcbf3bd6SGeorge Wilson } 513dcbf3bd6SGeorge Wilson 514dcbf3bd6SGeorge Wilson /* 515dcbf3bd6SGeorge Wilson * Evict the oldest eligible dbuf from the dbuf cache. 516dcbf3bd6SGeorge Wilson */ 517dcbf3bd6SGeorge Wilson static void 518dcbf3bd6SGeorge Wilson dbuf_evict_one(void) 519dcbf3bd6SGeorge Wilson { 520adb52d92SMatthew Ahrens int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache); 521adb52d92SMatthew Ahrens multilist_sublist_t *mls = multilist_sublist_lock( 522adb52d92SMatthew Ahrens dbuf_caches[DB_DBUF_CACHE].cache, idx); 523dcbf3bd6SGeorge Wilson 524dcbf3bd6SGeorge Wilson ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 525dcbf3bd6SGeorge Wilson 526dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = multilist_sublist_tail(mls); 527dcbf3bd6SGeorge Wilson while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 528dcbf3bd6SGeorge Wilson db = multilist_sublist_prev(mls, db); 529dcbf3bd6SGeorge Wilson } 530dcbf3bd6SGeorge Wilson 531dcbf3bd6SGeorge Wilson DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 532dcbf3bd6SGeorge Wilson multilist_sublist_t *, mls); 533dcbf3bd6SGeorge Wilson 534dcbf3bd6SGeorge Wilson if (db != NULL) { 535dcbf3bd6SGeorge Wilson multilist_sublist_remove(mls, db); 536dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 537e914ace2STim Schumacher (void) zfs_refcount_remove_many( 538e914ace2STim Schumacher &dbuf_caches[DB_DBUF_CACHE].size, 539dcbf3bd6SGeorge Wilson db->db.db_size, db); 540adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 541adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 542ea8dc4b6Seschrock dbuf_destroy(db); 543dcbf3bd6SGeorge Wilson } else { 544dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 545dcbf3bd6SGeorge Wilson } 546dcbf3bd6SGeorge Wilson } 547dcbf3bd6SGeorge Wilson 548dcbf3bd6SGeorge Wilson /* 549dcbf3bd6SGeorge Wilson * The dbuf evict thread is responsible for aging out dbufs from the 550dcbf3bd6SGeorge Wilson * cache. Once the cache has reached it's maximum size, dbufs are removed 551dcbf3bd6SGeorge Wilson * and destroyed. The eviction thread will continue running until the size 552dcbf3bd6SGeorge Wilson * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 553dcbf3bd6SGeorge Wilson * out of the cache it is destroyed and becomes eligible for arc eviction. 554dcbf3bd6SGeorge Wilson */ 5553f7978d0SAlan Somers /* ARGSUSED */ 556dcbf3bd6SGeorge Wilson static void 5573f7978d0SAlan Somers dbuf_evict_thread(void *unused) 558dcbf3bd6SGeorge Wilson { 559dcbf3bd6SGeorge Wilson callb_cpr_t cpr; 560dcbf3bd6SGeorge Wilson 561dcbf3bd6SGeorge Wilson CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 562dcbf3bd6SGeorge Wilson 563dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 564dcbf3bd6SGeorge Wilson while (!dbuf_evict_thread_exit) { 565dcbf3bd6SGeorge Wilson while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 566dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_BEGIN(&cpr); 567dcbf3bd6SGeorge Wilson (void) cv_timedwait_hires(&dbuf_evict_cv, 568dcbf3bd6SGeorge Wilson &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 569dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 570dcbf3bd6SGeorge Wilson } 571dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 572dcbf3bd6SGeorge Wilson 573dcbf3bd6SGeorge Wilson /* 574dcbf3bd6SGeorge Wilson * Keep evicting as long as we're above the low water mark 575dcbf3bd6SGeorge Wilson * for the cache. We do this without holding the locks to 576dcbf3bd6SGeorge Wilson * minimize lock contention. 577dcbf3bd6SGeorge Wilson */ 578dcbf3bd6SGeorge Wilson while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 579dcbf3bd6SGeorge Wilson dbuf_evict_one(); 580dcbf3bd6SGeorge Wilson } 581dcbf3bd6SGeorge Wilson 582dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 583dcbf3bd6SGeorge Wilson } 584dcbf3bd6SGeorge Wilson 585dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 586dcbf3bd6SGeorge Wilson cv_broadcast(&dbuf_evict_cv); 587dcbf3bd6SGeorge Wilson CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 588dcbf3bd6SGeorge Wilson thread_exit(); 589dcbf3bd6SGeorge Wilson } 590dcbf3bd6SGeorge Wilson 591dcbf3bd6SGeorge Wilson /* 592dcbf3bd6SGeorge Wilson * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 593dcbf3bd6SGeorge Wilson * If the dbuf cache is at its high water mark, then evict a dbuf from the 594dcbf3bd6SGeorge Wilson * dbuf cache using the callers context. 595dcbf3bd6SGeorge Wilson */ 596dcbf3bd6SGeorge Wilson static void 597dcbf3bd6SGeorge Wilson dbuf_evict_notify(void) 598dcbf3bd6SGeorge Wilson { 599dbfd9f93SMatthew Ahrens /* 600dbfd9f93SMatthew Ahrens * We check if we should evict without holding the dbuf_evict_lock, 601dbfd9f93SMatthew Ahrens * because it's OK to occasionally make the wrong decision here, 602dbfd9f93SMatthew Ahrens * and grabbing the lock results in massive lock contention. 603dbfd9f93SMatthew Ahrens */ 604e914ace2STim Schumacher if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 605adb52d92SMatthew Ahrens dbuf_cache_max_bytes) { 606dbfd9f93SMatthew Ahrens if (dbuf_cache_above_hiwater()) 607dcbf3bd6SGeorge Wilson dbuf_evict_one(); 608dbfd9f93SMatthew Ahrens cv_signal(&dbuf_evict_cv); 609dcbf3bd6SGeorge Wilson } 610ea8dc4b6Seschrock } 611ea8dc4b6Seschrock 612ea8dc4b6Seschrock void 613fa9e4066Sahrens dbuf_init(void) 614fa9e4066Sahrens { 615ea8dc4b6Seschrock uint64_t hsize = 1ULL << 16; 616fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 617fa9e4066Sahrens int i; 618fa9e4066Sahrens 619fa9e4066Sahrens /* 620fa9e4066Sahrens * The hash table is big enough to fill all of physical memory 621ea8dc4b6Seschrock * with an average 4K block size. The table will take up 622ea8dc4b6Seschrock * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 623fa9e4066Sahrens */ 624ea8dc4b6Seschrock while (hsize * 4096 < physmem * PAGESIZE) 625fa9e4066Sahrens hsize <<= 1; 626fa9e4066Sahrens 627ea8dc4b6Seschrock retry: 628fa9e4066Sahrens h->hash_table_mask = hsize - 1; 629ea8dc4b6Seschrock h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 630ea8dc4b6Seschrock if (h->hash_table == NULL) { 631ea8dc4b6Seschrock /* XXX - we should really return an error instead of assert */ 632ea8dc4b6Seschrock ASSERT(hsize > (1ULL << 10)); 633ea8dc4b6Seschrock hsize >>= 1; 634ea8dc4b6Seschrock goto retry; 635ea8dc4b6Seschrock } 636fa9e4066Sahrens 637dcbf3bd6SGeorge Wilson dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 638fa9e4066Sahrens sizeof (dmu_buf_impl_t), 639fa9e4066Sahrens 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 640fa9e4066Sahrens 641fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 642fa9e4066Sahrens mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 643bc9014e6SJustin Gibbs 644bc9014e6SJustin Gibbs /* 645adb52d92SMatthew Ahrens * Setup the parameters for the dbuf caches. We set the sizes of the 646adb52d92SMatthew Ahrens * dbuf cache and the metadata cache to 1/32nd and 1/16th (default) 647adb52d92SMatthew Ahrens * of the size of the ARC, respectively. If the values are set in 648adb52d92SMatthew Ahrens * /etc/system and they're not greater than the size of the ARC, then 649adb52d92SMatthew Ahrens * we honor that value. 650dcbf3bd6SGeorge Wilson */ 651268bbb2aSGeorge Wilson if (dbuf_cache_max_bytes == 0 || 652268bbb2aSGeorge Wilson dbuf_cache_max_bytes >= arc_max_bytes()) { 653268bbb2aSGeorge Wilson dbuf_cache_max_bytes = arc_max_bytes() >> dbuf_cache_shift; 654268bbb2aSGeorge Wilson } 655adb52d92SMatthew Ahrens if (dbuf_metadata_cache_max_bytes == 0 || 656adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes >= arc_max_bytes()) { 657adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes = 658adb52d92SMatthew Ahrens arc_max_bytes() >> dbuf_metadata_cache_shift; 659adb52d92SMatthew Ahrens } 660dcbf3bd6SGeorge Wilson 661dcbf3bd6SGeorge Wilson /* 662bc9014e6SJustin Gibbs * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 663bc9014e6SJustin Gibbs * configuration is not required. 664bc9014e6SJustin Gibbs */ 665bc9014e6SJustin Gibbs dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 666dcbf3bd6SGeorge Wilson 667adb52d92SMatthew Ahrens for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 668adb52d92SMatthew Ahrens dbuf_caches[dcs].cache = 669adb52d92SMatthew Ahrens multilist_create(sizeof (dmu_buf_impl_t), 670dcbf3bd6SGeorge Wilson offsetof(dmu_buf_impl_t, db_cache_link), 671dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func); 672e914ace2STim Schumacher zfs_refcount_create(&dbuf_caches[dcs].size); 673adb52d92SMatthew Ahrens } 674dcbf3bd6SGeorge Wilson 675dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 676dcbf3bd6SGeorge Wilson mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 677dcbf3bd6SGeorge Wilson cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 678dcbf3bd6SGeorge Wilson dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 679dcbf3bd6SGeorge Wilson NULL, 0, &p0, TS_RUN, minclsyspri); 680fa9e4066Sahrens } 681fa9e4066Sahrens 682fa9e4066Sahrens void 683fa9e4066Sahrens dbuf_fini(void) 684fa9e4066Sahrens { 685fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 686fa9e4066Sahrens int i; 687fa9e4066Sahrens 688fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 689fa9e4066Sahrens mutex_destroy(&h->hash_mutexes[i]); 690fa9e4066Sahrens kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 691dcbf3bd6SGeorge Wilson kmem_cache_destroy(dbuf_kmem_cache); 692bc9014e6SJustin Gibbs taskq_destroy(dbu_evict_taskq); 693dcbf3bd6SGeorge Wilson 694dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 695dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_TRUE; 696dcbf3bd6SGeorge Wilson while (dbuf_evict_thread_exit) { 697dcbf3bd6SGeorge Wilson cv_signal(&dbuf_evict_cv); 698dcbf3bd6SGeorge Wilson cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 699dcbf3bd6SGeorge Wilson } 700dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 701dcbf3bd6SGeorge Wilson 702dcbf3bd6SGeorge Wilson mutex_destroy(&dbuf_evict_lock); 703dcbf3bd6SGeorge Wilson cv_destroy(&dbuf_evict_cv); 704dcbf3bd6SGeorge Wilson 705adb52d92SMatthew Ahrens for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 706e914ace2STim Schumacher zfs_refcount_destroy(&dbuf_caches[dcs].size); 707adb52d92SMatthew Ahrens multilist_destroy(dbuf_caches[dcs].cache); 708adb52d92SMatthew Ahrens } 709fa9e4066Sahrens } 710fa9e4066Sahrens 711fa9e4066Sahrens /* 712fa9e4066Sahrens * Other stuff. 713fa9e4066Sahrens */ 714fa9e4066Sahrens 7159c9dc39aSek110237 #ifdef ZFS_DEBUG 716fa9e4066Sahrens static void 717fa9e4066Sahrens dbuf_verify(dmu_buf_impl_t *db) 718fa9e4066Sahrens { 719744947dcSTom Erickson dnode_t *dn; 720b24ab676SJeff Bonwick dbuf_dirty_record_t *dr; 721fa9e4066Sahrens 722fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 723fa9e4066Sahrens 724fa9e4066Sahrens if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 725fa9e4066Sahrens return; 726fa9e4066Sahrens 727fa9e4066Sahrens ASSERT(db->db_objset != NULL); 728744947dcSTom Erickson DB_DNODE_ENTER(db); 729744947dcSTom Erickson dn = DB_DNODE(db); 730fa9e4066Sahrens if (dn == NULL) { 731fa9e4066Sahrens ASSERT(db->db_parent == NULL); 732fa9e4066Sahrens ASSERT(db->db_blkptr == NULL); 733fa9e4066Sahrens } else { 734fa9e4066Sahrens ASSERT3U(db->db.db_object, ==, dn->dn_object); 735fa9e4066Sahrens ASSERT3P(db->db_objset, ==, dn->dn_objset); 736fa9e4066Sahrens ASSERT3U(db->db_level, <, dn->dn_nlevels); 737744947dcSTom Erickson ASSERT(db->db_blkid == DMU_BONUS_BLKID || 738744947dcSTom Erickson db->db_blkid == DMU_SPILL_BLKID || 7390f6d88adSAlex Reece !avl_is_empty(&dn->dn_dbufs)); 740fa9e4066Sahrens } 7410a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 742fa9e4066Sahrens ASSERT(dn != NULL); 7431934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 7440a586ceaSMark Shellenbaum ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 7450a586ceaSMark Shellenbaum } else if (db->db_blkid == DMU_SPILL_BLKID) { 7460a586ceaSMark Shellenbaum ASSERT(dn != NULL); 747fb09f5aaSMadhav Suresh ASSERT0(db->db.db_offset); 748fa9e4066Sahrens } else { 749fa9e4066Sahrens ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 750fa9e4066Sahrens } 751fa9e4066Sahrens 752b24ab676SJeff Bonwick for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 753b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 754b24ab676SJeff Bonwick 755b24ab676SJeff Bonwick for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 756b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 757b24ab676SJeff Bonwick 75888b7b0f2SMatthew Ahrens /* 75988b7b0f2SMatthew Ahrens * We can't assert that db_size matches dn_datablksz because it 76088b7b0f2SMatthew Ahrens * can be momentarily different when another thread is doing 76188b7b0f2SMatthew Ahrens * dnode_set_blksz(). 76288b7b0f2SMatthew Ahrens */ 76388b7b0f2SMatthew Ahrens if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 764b24ab676SJeff Bonwick dr = db->db_data_pending; 765fa9e4066Sahrens /* 76688b7b0f2SMatthew Ahrens * It should only be modified in syncing context, so 76788b7b0f2SMatthew Ahrens * make sure we only have one copy of the data. 768fa9e4066Sahrens */ 769c717a561Smaybee ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 770fa9e4066Sahrens } 771fa9e4066Sahrens 772fa9e4066Sahrens /* verify db->db_blkptr */ 773fa9e4066Sahrens if (db->db_blkptr) { 774fa9e4066Sahrens if (db->db_parent == dn->dn_dbuf) { 775fa9e4066Sahrens /* db is pointed to by the dnode */ 776fa9e4066Sahrens /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 77714843421SMatthew Ahrens if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 778fa9e4066Sahrens ASSERT(db->db_parent == NULL); 779fa9e4066Sahrens else 780fa9e4066Sahrens ASSERT(db->db_parent != NULL); 7810a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 782fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 783fa9e4066Sahrens &dn->dn_phys->dn_blkptr[db->db_blkid]); 784fa9e4066Sahrens } else { 785fa9e4066Sahrens /* db is pointed to by an indirect block */ 786fa9e4066Sahrens int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 787fa9e4066Sahrens ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 788fa9e4066Sahrens ASSERT3U(db->db_parent->db.db_object, ==, 789fa9e4066Sahrens db->db.db_object); 790fa9e4066Sahrens /* 791fa9e4066Sahrens * dnode_grow_indblksz() can make this fail if we don't 792fa9e4066Sahrens * have the struct_rwlock. XXX indblksz no longer 793fa9e4066Sahrens * grows. safe to do this now? 794fa9e4066Sahrens */ 795744947dcSTom Erickson if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 796fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 797fa9e4066Sahrens ((blkptr_t *)db->db_parent->db.db_data + 798fa9e4066Sahrens db->db_blkid % epb)); 799fa9e4066Sahrens } 800fa9e4066Sahrens } 801fa9e4066Sahrens } 802fa9e4066Sahrens if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 8033f9d6ad7SLin Ling (db->db_buf == NULL || db->db_buf->b_data) && 8040a586ceaSMark Shellenbaum db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 805fa9e4066Sahrens db->db_state != DB_FILL && !dn->dn_free_txg) { 806fa9e4066Sahrens /* 807fa9e4066Sahrens * If the blkptr isn't set but they have nonzero data, 808fa9e4066Sahrens * it had better be dirty, otherwise we'll lose that 809fa9e4066Sahrens * data when we evict this buffer. 8108df0bcf0SPaul Dagnelie * 8118df0bcf0SPaul Dagnelie * There is an exception to this rule for indirect blocks; in 8128df0bcf0SPaul Dagnelie * this case, if the indirect block is a hole, we fill in a few 8138df0bcf0SPaul Dagnelie * fields on each of the child blocks (importantly, birth time) 8148df0bcf0SPaul Dagnelie * to prevent hole birth times from being lost when you 8158df0bcf0SPaul Dagnelie * partially fill in a hole. 816fa9e4066Sahrens */ 817fa9e4066Sahrens if (db->db_dirtycnt == 0) { 8188df0bcf0SPaul Dagnelie if (db->db_level == 0) { 819fa9e4066Sahrens uint64_t *buf = db->db.db_data; 820fa9e4066Sahrens int i; 821fa9e4066Sahrens 822fa9e4066Sahrens for (i = 0; i < db->db.db_size >> 3; i++) { 823fa9e4066Sahrens ASSERT(buf[i] == 0); 824fa9e4066Sahrens } 8258df0bcf0SPaul Dagnelie } else { 8268df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 8278df0bcf0SPaul Dagnelie ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 8288df0bcf0SPaul Dagnelie db->db.db_size); 8298df0bcf0SPaul Dagnelie /* 8308df0bcf0SPaul Dagnelie * We want to verify that all the blkptrs in the 8318df0bcf0SPaul Dagnelie * indirect block are holes, but we may have 8328df0bcf0SPaul Dagnelie * automatically set up a few fields for them. 8338df0bcf0SPaul Dagnelie * We iterate through each blkptr and verify 8348df0bcf0SPaul Dagnelie * they only have those fields set. 8358df0bcf0SPaul Dagnelie */ 8368df0bcf0SPaul Dagnelie for (int i = 0; 8378df0bcf0SPaul Dagnelie i < db->db.db_size / sizeof (blkptr_t); 8388df0bcf0SPaul Dagnelie i++) { 8398df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 8408df0bcf0SPaul Dagnelie ASSERT(ZIO_CHECKSUM_IS_ZERO( 8418df0bcf0SPaul Dagnelie &bp->blk_cksum)); 8428df0bcf0SPaul Dagnelie ASSERT( 8438df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[0]) && 8448df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[1]) && 8458df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[2])); 8468df0bcf0SPaul Dagnelie ASSERT0(bp->blk_fill); 8478df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[0]); 8488df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[1]); 8498df0bcf0SPaul Dagnelie ASSERT(!BP_IS_EMBEDDED(bp)); 8508df0bcf0SPaul Dagnelie ASSERT(BP_IS_HOLE(bp)); 8518df0bcf0SPaul Dagnelie ASSERT0(bp->blk_phys_birth); 8528df0bcf0SPaul Dagnelie } 8538df0bcf0SPaul Dagnelie } 854fa9e4066Sahrens } 855fa9e4066Sahrens } 856744947dcSTom Erickson DB_DNODE_EXIT(db); 857fa9e4066Sahrens } 8589c9dc39aSek110237 #endif 859fa9e4066Sahrens 860fa9e4066Sahrens static void 861bc9014e6SJustin Gibbs dbuf_clear_data(dmu_buf_impl_t *db) 862fa9e4066Sahrens { 863fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 864ea8dc4b6Seschrock dbuf_evict_user(db); 865dcbf3bd6SGeorge Wilson ASSERT3P(db->db_buf, ==, NULL); 866ea8dc4b6Seschrock db->db.db_data = NULL; 86782c9918fSTim Haley if (db->db_state != DB_NOFILL) 868ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 869ea8dc4b6Seschrock } 870bc9014e6SJustin Gibbs 871bc9014e6SJustin Gibbs static void 872bc9014e6SJustin Gibbs dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 873bc9014e6SJustin Gibbs { 874bc9014e6SJustin Gibbs ASSERT(MUTEX_HELD(&db->db_mtx)); 875bc9014e6SJustin Gibbs ASSERT(buf != NULL); 876bc9014e6SJustin Gibbs 877bc9014e6SJustin Gibbs db->db_buf = buf; 878bc9014e6SJustin Gibbs ASSERT(buf->b_data != NULL); 879bc9014e6SJustin Gibbs db->db.db_data = buf->b_data; 880fa9e4066Sahrens } 881fa9e4066Sahrens 882c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 883c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Loan out an arc_buf for read. Return the loaned arc_buf. 884c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 885c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 886c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dbuf_loan_arcbuf(dmu_buf_impl_t *db) 887c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 888c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf; 889c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 890dcbf3bd6SGeorge Wilson ASSERT(db->db_blkid != DMU_BONUS_BLKID); 891c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_enter(&db->db_mtx); 892e914ace2STim Schumacher if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) { 893c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int blksz = db->db.db_size; 89443466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 895744947dcSTom Erickson 896c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 8975602294fSDan Kimmel abuf = arc_loan_buf(spa, B_FALSE, blksz); 898c242f9a0Schunli zhang - Sun Microsystems - Irvine United States bcopy(db->db.db_data, abuf->b_data, blksz); 899c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 900c242f9a0Schunli zhang - Sun Microsystems - Irvine United States abuf = db->db_buf; 901c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_loan_inuse_buf(abuf, db); 902dcbf3bd6SGeorge Wilson db->db_buf = NULL; 903bc9014e6SJustin Gibbs dbuf_clear_data(db); 904c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 905c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 906c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (abuf); 907c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 908c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 909a2cdcdd2SPaul Dagnelie /* 910a2cdcdd2SPaul Dagnelie * Calculate which level n block references the data at the level 0 offset 911a2cdcdd2SPaul Dagnelie * provided. 912a2cdcdd2SPaul Dagnelie */ 913fa9e4066Sahrens uint64_t 914a2cdcdd2SPaul Dagnelie dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 915fa9e4066Sahrens { 916a2cdcdd2SPaul Dagnelie if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 917a2cdcdd2SPaul Dagnelie /* 918a2cdcdd2SPaul Dagnelie * The level n blkid is equal to the level 0 blkid divided by 919a2cdcdd2SPaul Dagnelie * the number of level 0s in a level n block. 920a2cdcdd2SPaul Dagnelie * 921a2cdcdd2SPaul Dagnelie * The level 0 blkid is offset >> datablkshift = 922a2cdcdd2SPaul Dagnelie * offset / 2^datablkshift. 923a2cdcdd2SPaul Dagnelie * 924a2cdcdd2SPaul Dagnelie * The number of level 0s in a level n is the number of block 925a2cdcdd2SPaul Dagnelie * pointers in an indirect block, raised to the power of level. 926a2cdcdd2SPaul Dagnelie * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 927a2cdcdd2SPaul Dagnelie * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 928a2cdcdd2SPaul Dagnelie * 929a2cdcdd2SPaul Dagnelie * Thus, the level n blkid is: offset / 930a2cdcdd2SPaul Dagnelie * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 931a2cdcdd2SPaul Dagnelie * = offset / 2^(datablkshift + level * 932a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 933a2cdcdd2SPaul Dagnelie * = offset >> (datablkshift + level * 934a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 935a2cdcdd2SPaul Dagnelie */ 936a2cdcdd2SPaul Dagnelie return (offset >> (dn->dn_datablkshift + level * 937a2cdcdd2SPaul Dagnelie (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 938fa9e4066Sahrens } else { 939fa9e4066Sahrens ASSERT3U(offset, <, dn->dn_datablksz); 940fa9e4066Sahrens return (0); 941fa9e4066Sahrens } 942fa9e4066Sahrens } 943fa9e4066Sahrens 944eb633035STom Caputi /* ARGSUSED */ 945fa9e4066Sahrens static void 946a3874b8bSToomas Soome dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 947a3874b8bSToomas Soome arc_buf_t *buf, void *vdb) 948fa9e4066Sahrens { 949fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 950fa9e4066Sahrens 951fa9e4066Sahrens mutex_enter(&db->db_mtx); 952fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_READ); 953fa9e4066Sahrens /* 954fa9e4066Sahrens * All reads are synchronous, so we must have a hold on the dbuf 955fa9e4066Sahrens */ 956e914ace2STim Schumacher ASSERT(zfs_refcount_count(&db->db_holds) > 0); 957ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 958fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 959fa98e487SMatthew Ahrens if (buf == NULL) { 960fa98e487SMatthew Ahrens /* i/o error */ 961fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error != 0); 962fa98e487SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 963fa98e487SMatthew Ahrens ASSERT3P(db->db_buf, ==, NULL); 964fa98e487SMatthew Ahrens db->db_state = DB_UNCACHED; 965fa98e487SMatthew Ahrens } else if (db->db_level == 0 && db->db_freed_in_flight) { 966a3874b8bSToomas Soome /* we were freed in flight; disregard any error */ 967fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 968a3874b8bSToomas Soome if (buf == NULL) { 969a3874b8bSToomas Soome buf = arc_alloc_buf(db->db_objset->os_spa, 970a3874b8bSToomas Soome db, DBUF_GET_BUFC_TYPE(db), db->db.db_size); 971a3874b8bSToomas Soome } 972fa9e4066Sahrens arc_release(buf, db); 973fa9e4066Sahrens bzero(buf->b_data, db->db.db_size); 9746b4acc8bSahrens arc_buf_freeze(buf); 975c717a561Smaybee db->db_freed_in_flight = FALSE; 976fa9e4066Sahrens dbuf_set_data(db, buf); 977fa9e4066Sahrens db->db_state = DB_CACHED; 978a3874b8bSToomas Soome } else if (buf != NULL) { 979fa98e487SMatthew Ahrens /* success */ 980fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 981fa9e4066Sahrens dbuf_set_data(db, buf); 982fa9e4066Sahrens db->db_state = DB_CACHED; 983fa9e4066Sahrens } 984fa9e4066Sahrens cv_broadcast(&db->db_changed); 985c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, NULL, B_FALSE); 986fa9e4066Sahrens } 987fa9e4066Sahrens 988eb633035STom Caputi 989eb633035STom Caputi /* 990eb633035STom Caputi * This function ensures that, when doing a decrypting read of a block, 991eb633035STom Caputi * we make sure we have decrypted the dnode associated with it. We must do 992eb633035STom Caputi * this so that we ensure we are fully authenticating the checksum-of-MACs 993eb633035STom Caputi * tree from the root of the objset down to this block. Indirect blocks are 994eb633035STom Caputi * always verified against their secure checksum-of-MACs assuming that the 995eb633035STom Caputi * dnode containing them is correct. Now that we are doing a decrypting read, 996eb633035STom Caputi * we can be sure that the key is loaded and verify that assumption. This is 997eb633035STom Caputi * especially important considering that we always read encrypted dnode 998eb633035STom Caputi * blocks as raw data (without verifying their MACs) to start, and 999eb633035STom Caputi * decrypt / authenticate them when we need to read an encrypted bonus buffer. 1000eb633035STom Caputi */ 1001eb633035STom Caputi static int 1002eb633035STom Caputi dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags) 1003eb633035STom Caputi { 1004eb633035STom Caputi int err = 0; 1005eb633035STom Caputi objset_t *os = db->db_objset; 1006eb633035STom Caputi arc_buf_t *dnode_abuf; 1007eb633035STom Caputi dnode_t *dn; 1008eb633035STom Caputi zbookmark_phys_t zb; 1009eb633035STom Caputi 1010eb633035STom Caputi ASSERT(MUTEX_HELD(&db->db_mtx)); 1011eb633035STom Caputi 1012eb633035STom Caputi if (!os->os_encrypted || os->os_raw_receive || 1013eb633035STom Caputi (flags & DB_RF_NO_DECRYPT) != 0) 1014eb633035STom Caputi return (0); 1015eb633035STom Caputi 1016eb633035STom Caputi DB_DNODE_ENTER(db); 1017eb633035STom Caputi dn = DB_DNODE(db); 1018eb633035STom Caputi dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL; 1019eb633035STom Caputi 1020eb633035STom Caputi if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) { 1021eb633035STom Caputi DB_DNODE_EXIT(db); 1022eb633035STom Caputi return (0); 1023eb633035STom Caputi } 1024eb633035STom Caputi 1025eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(os), 1026eb633035STom Caputi DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid); 1027eb633035STom Caputi err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE); 1028eb633035STom Caputi 1029eb633035STom Caputi /* 1030eb633035STom Caputi * An error code of EACCES tells us that the key is still not 1031eb633035STom Caputi * available. This is ok if we are only reading authenticated 1032eb633035STom Caputi * (and therefore non-encrypted) blocks. 1033eb633035STom Caputi */ 1034eb633035STom Caputi if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID && 1035eb633035STom Caputi !DMU_OT_IS_ENCRYPTED(dn->dn_type)) || 1036eb633035STom Caputi (db->db_blkid == DMU_BONUS_BLKID && 1037eb633035STom Caputi !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype)))) 1038eb633035STom Caputi err = 0; 1039eb633035STom Caputi 1040eb633035STom Caputi DB_DNODE_EXIT(db); 1041eb633035STom Caputi 1042eb633035STom Caputi return (err); 1043eb633035STom Caputi } 1044eb633035STom Caputi 1045eb633035STom Caputi static int 1046cf6106c8SMatthew Ahrens dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1047fa9e4066Sahrens { 1048744947dcSTom Erickson dnode_t *dn; 10497802d7bfSMatthew Ahrens zbookmark_phys_t zb; 10507adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_NOWAIT; 1051eb633035STom Caputi int err, zio_flags = 0; 1052fa9e4066Sahrens 1053744947dcSTom Erickson DB_DNODE_ENTER(db); 1054744947dcSTom Erickson dn = DB_DNODE(db); 1055e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1056fa9e4066Sahrens /* We need the struct_rwlock to prevent db_blkptr from changing. */ 1057088f3894Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1058ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&db->db_mtx)); 1059ea8dc4b6Seschrock ASSERT(db->db_state == DB_UNCACHED); 1060ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1061fa9e4066Sahrens 10620a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 106354811da5SToomas Soome /* 106454811da5SToomas Soome * The bonus length stored in the dnode may be less than 106554811da5SToomas Soome * the maximum available space in the bonus buffer. 106654811da5SToomas Soome */ 1067cf04dda1SMark Maybee int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 106854811da5SToomas Soome int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 10691934e92fSmaybee 1070eb633035STom Caputi /* if the underlying dnode block is encrypted, decrypt it */ 1071eb633035STom Caputi err = dbuf_read_verify_dnode_crypt(db, flags); 1072eb633035STom Caputi if (err != 0) { 1073eb633035STom Caputi DB_DNODE_EXIT(db); 1074eb633035STom Caputi mutex_exit(&db->db_mtx); 1075eb633035STom Caputi return (err); 1076eb633035STom Caputi } 1077eb633035STom Caputi 10781934e92fSmaybee ASSERT3U(bonuslen, <=, db->db.db_size); 107954811da5SToomas Soome db->db.db_data = zio_buf_alloc(max_bonuslen); 108054811da5SToomas Soome arc_space_consume(max_bonuslen, ARC_SPACE_BONUS); 108154811da5SToomas Soome if (bonuslen < max_bonuslen) 108254811da5SToomas Soome bzero(db->db.db_data, max_bonuslen); 1083cf04dda1SMark Maybee if (bonuslen) 1084cf04dda1SMark Maybee bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 1085744947dcSTom Erickson DB_DNODE_EXIT(db); 1086fa9e4066Sahrens db->db_state = DB_CACHED; 1087fa9e4066Sahrens mutex_exit(&db->db_mtx); 1088eb633035STom Caputi return (0); 1089fa9e4066Sahrens } 1090fa9e4066Sahrens 10911c8564a7SMark Maybee /* 10921c8564a7SMark Maybee * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 10931c8564a7SMark Maybee * processes the delete record and clears the bp while we are waiting 10941c8564a7SMark Maybee * for the dn_mtx (resulting in a "no" from block_freed). 10951c8564a7SMark Maybee */ 1096088f3894Sahrens if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 10971c8564a7SMark Maybee (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 10981c8564a7SMark Maybee BP_IS_HOLE(db->db_blkptr)))) { 1099ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1100ad23a2dbSjohansen 11015602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 11025602294fSDan Kimmel db->db.db_size)); 1103fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 11048df0bcf0SPaul Dagnelie 11058df0bcf0SPaul Dagnelie if (db->db_blkptr != NULL && db->db_level > 0 && 11068df0bcf0SPaul Dagnelie BP_IS_HOLE(db->db_blkptr) && 11078df0bcf0SPaul Dagnelie db->db_blkptr->blk_birth != 0) { 11088df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 11098df0bcf0SPaul Dagnelie for (int i = 0; i < ((1 << 11108df0bcf0SPaul Dagnelie DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 11118df0bcf0SPaul Dagnelie i++) { 11128df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 11138df0bcf0SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 11148df0bcf0SPaul Dagnelie 1 << dn->dn_indblkshift); 11158df0bcf0SPaul Dagnelie BP_SET_LSIZE(bp, 11168df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) == 1 ? 11178df0bcf0SPaul Dagnelie dn->dn_datablksz : 11188df0bcf0SPaul Dagnelie BP_GET_LSIZE(db->db_blkptr)); 11198df0bcf0SPaul Dagnelie BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 11208df0bcf0SPaul Dagnelie BP_SET_LEVEL(bp, 11218df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) - 1); 11228df0bcf0SPaul Dagnelie BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 11238df0bcf0SPaul Dagnelie } 11248df0bcf0SPaul Dagnelie } 11258df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 1126fa9e4066Sahrens db->db_state = DB_CACHED; 1127fa9e4066Sahrens mutex_exit(&db->db_mtx); 1128eb633035STom Caputi return (0); 1129eb633035STom Caputi } 1130eb633035STom Caputi 1131eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1132eb633035STom Caputi db->db.db_object, db->db_level, db->db_blkid); 1133eb633035STom Caputi 1134eb633035STom Caputi /* 1135eb633035STom Caputi * All bps of an encrypted os should have the encryption bit set. 1136eb633035STom Caputi * If this is not true it indicates tampering and we report an error. 1137eb633035STom Caputi */ 1138eb633035STom Caputi if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) { 1139eb633035STom Caputi spa_log_error(db->db_objset->os_spa, &zb); 1140eb633035STom Caputi zfs_panic_recover("unencrypted block in encrypted " 1141eb633035STom Caputi "object set %llu", dmu_objset_id(db->db_objset)); 1142eb633035STom Caputi DB_DNODE_EXIT(db); 1143eb633035STom Caputi mutex_exit(&db->db_mtx); 1144eb633035STom Caputi return (SET_ERROR(EIO)); 1145eb633035STom Caputi } 1146eb633035STom Caputi 1147eb633035STom Caputi err = dbuf_read_verify_dnode_crypt(db, flags); 1148eb633035STom Caputi if (err != 0) { 1149eb633035STom Caputi DB_DNODE_EXIT(db); 1150eb633035STom Caputi mutex_exit(&db->db_mtx); 1151eb633035STom Caputi return (err); 1152fa9e4066Sahrens } 1153fa9e4066Sahrens 1154744947dcSTom Erickson DB_DNODE_EXIT(db); 1155744947dcSTom Erickson 1156fa9e4066Sahrens db->db_state = DB_READ; 1157fa9e4066Sahrens mutex_exit(&db->db_mtx); 1158fa9e4066Sahrens 11593baa08fcSek110237 if (DBUF_IS_L2CACHEABLE(db)) 11607adb730bSGeorge Wilson aflags |= ARC_FLAG_L2CACHE; 11613baa08fcSek110237 1162ea8dc4b6Seschrock dbuf_add_ref(db, NULL); 1163088f3894Sahrens 1164eb633035STom Caputi zio_flags = (flags & DB_RF_CANFAIL) ? 1165eb633035STom Caputi ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED; 1166eb633035STom Caputi 1167eb633035STom Caputi if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr)) 1168eb633035STom Caputi zio_flags |= ZIO_FLAG_RAW; 1169eb633035STom Caputi 1170eb633035STom Caputi err = arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1171eb633035STom Caputi dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags, 117213506d1eSmaybee &aflags, &zb); 1173eb633035STom Caputi 1174eb633035STom Caputi return (err); 1175fa9e4066Sahrens } 1176fa9e4066Sahrens 11775602294fSDan Kimmel /* 11785602294fSDan Kimmel * This is our just-in-time copy function. It makes a copy of buffers that 11795602294fSDan Kimmel * have been modified in a previous transaction group before we access them in 11805602294fSDan Kimmel * the current active group. 11815602294fSDan Kimmel * 11825602294fSDan Kimmel * This function is used in three places: when we are dirtying a buffer for the 11835602294fSDan Kimmel * first time in a txg, when we are freeing a range in a dnode that includes 11845602294fSDan Kimmel * this buffer, and when we are accessing a buffer which was received compressed 11855602294fSDan Kimmel * and later referenced in a WRITE_BYREF record. 11865602294fSDan Kimmel * 11875602294fSDan Kimmel * Note that when we are called from dbuf_free_range() we do not put a hold on 11885602294fSDan Kimmel * the buffer, we just traverse the active dbuf list for the dnode. 11895602294fSDan Kimmel */ 11905602294fSDan Kimmel static void 11915602294fSDan Kimmel dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 11925602294fSDan Kimmel { 11935602294fSDan Kimmel dbuf_dirty_record_t *dr = db->db_last_dirty; 11945602294fSDan Kimmel 11955602294fSDan Kimmel ASSERT(MUTEX_HELD(&db->db_mtx)); 11965602294fSDan Kimmel ASSERT(db->db.db_data != NULL); 11975602294fSDan Kimmel ASSERT(db->db_level == 0); 11985602294fSDan Kimmel ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 11995602294fSDan Kimmel 12005602294fSDan Kimmel if (dr == NULL || 12015602294fSDan Kimmel (dr->dt.dl.dr_data != 12025602294fSDan Kimmel ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 12035602294fSDan Kimmel return; 12045602294fSDan Kimmel 12055602294fSDan Kimmel /* 12065602294fSDan Kimmel * If the last dirty record for this dbuf has not yet synced 12075602294fSDan Kimmel * and its referencing the dbuf data, either: 12085602294fSDan Kimmel * reset the reference to point to a new copy, 12095602294fSDan Kimmel * or (if there a no active holders) 12105602294fSDan Kimmel * just null out the current db_data pointer. 12115602294fSDan Kimmel */ 1212eb633035STom Caputi ASSERT3U(dr->dr_txg, >=, txg - 2); 12135602294fSDan Kimmel if (db->db_blkid == DMU_BONUS_BLKID) { 12145602294fSDan Kimmel /* Note that the data bufs here are zio_bufs */ 121554811da5SToomas Soome dnode_t *dn = DB_DNODE(db); 121654811da5SToomas Soome int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots); 121754811da5SToomas Soome dr->dt.dl.dr_data = zio_buf_alloc(bonuslen); 121854811da5SToomas Soome arc_space_consume(bonuslen, ARC_SPACE_BONUS); 121954811da5SToomas Soome bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen); 1220e914ace2STim Schumacher } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) { 1221eb633035STom Caputi dnode_t *dn = DB_DNODE(db); 12225602294fSDan Kimmel int size = arc_buf_size(db->db_buf); 12235602294fSDan Kimmel arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 12245602294fSDan Kimmel spa_t *spa = db->db_objset->os_spa; 12255602294fSDan Kimmel enum zio_compress compress_type = 12265602294fSDan Kimmel arc_get_compression(db->db_buf); 12275602294fSDan Kimmel 1228eb633035STom Caputi if (arc_is_encrypted(db->db_buf)) { 1229eb633035STom Caputi boolean_t byteorder; 1230eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 1231eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 1232eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 1233eb633035STom Caputi 1234eb633035STom Caputi arc_get_raw_params(db->db_buf, &byteorder, salt, 1235eb633035STom Caputi iv, mac); 1236eb633035STom Caputi dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db, 1237eb633035STom Caputi dmu_objset_id(dn->dn_objset), byteorder, salt, iv, 1238eb633035STom Caputi mac, dn->dn_type, size, arc_buf_lsize(db->db_buf), 1239eb633035STom Caputi compress_type); 1240eb633035STom Caputi } else if (compress_type != ZIO_COMPRESS_OFF) { 12415602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 12425602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 12435602294fSDan Kimmel size, arc_buf_lsize(db->db_buf), compress_type); 1244eb633035STom Caputi } else { 1245eb633035STom Caputi dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 12465602294fSDan Kimmel } 12475602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 12485602294fSDan Kimmel } else { 12495602294fSDan Kimmel db->db_buf = NULL; 12505602294fSDan Kimmel dbuf_clear_data(db); 12515602294fSDan Kimmel } 12525602294fSDan Kimmel } 12535602294fSDan Kimmel 1254ea8dc4b6Seschrock int 1255ea8dc4b6Seschrock dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1256fa9e4066Sahrens { 1257ea8dc4b6Seschrock int err = 0; 125843466aaeSMax Grossman boolean_t prefetch; 1259744947dcSTom Erickson dnode_t *dn; 1260fa9e4066Sahrens 1261fa9e4066Sahrens /* 1262fa9e4066Sahrens * We don't have to hold the mutex to check db_state because it 1263fa9e4066Sahrens * can't be freed while we have a hold on the buffer. 1264fa9e4066Sahrens */ 1265e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 1266fa9e4066Sahrens 126782c9918fSTim Haley if (db->db_state == DB_NOFILL) 1268be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 126982c9918fSTim Haley 1270744947dcSTom Erickson DB_DNODE_ENTER(db); 1271744947dcSTom Erickson dn = DB_DNODE(db); 1272fa9e4066Sahrens if ((flags & DB_RF_HAVESTRUCT) == 0) 1273744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_READER); 1274fa9e4066Sahrens 12750a586ceaSMark Shellenbaum prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1276744947dcSTom Erickson (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 12773baa08fcSek110237 DBUF_IS_CACHEABLE(db); 127813506d1eSmaybee 1279fa9e4066Sahrens mutex_enter(&db->db_mtx); 1280ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 1281eb633035STom Caputi spa_t *spa = dn->dn_objset->os_spa; 1282eb633035STom Caputi 12835602294fSDan Kimmel /* 1284eb633035STom Caputi * Ensure that this block's dnode has been decrypted if 1285eb633035STom Caputi * the caller has requested decrypted data. 12865602294fSDan Kimmel */ 1287eb633035STom Caputi err = dbuf_read_verify_dnode_crypt(db, flags); 1288eb633035STom Caputi 1289eb633035STom Caputi /* 1290eb633035STom Caputi * If the arc buf is compressed or encrypted and the caller 1291eb633035STom Caputi * requested uncompressed data, we need to untransform it 1292eb633035STom Caputi * before returning. We also call arc_untransform() on any 1293eb633035STom Caputi * unauthenticated blocks, which will verify their MAC if 1294eb633035STom Caputi * the key is now available. 1295eb633035STom Caputi */ 1296eb633035STom Caputi if (err == 0 && db->db_buf != NULL && 1297eb633035STom Caputi (flags & DB_RF_NO_DECRYPT) == 0 && 1298eb633035STom Caputi (arc_is_encrypted(db->db_buf) || 1299eb633035STom Caputi arc_is_unauthenticated(db->db_buf) || 1300eb633035STom Caputi arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) { 1301eb633035STom Caputi zbookmark_phys_t zb; 1302eb633035STom Caputi 1303eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 1304eb633035STom Caputi db->db.db_object, db->db_level, db->db_blkid); 1305eb633035STom Caputi dbuf_fix_old_data(db, spa_syncing_txg(spa)); 1306eb633035STom Caputi err = arc_untransform(db->db_buf, spa, &zb, B_FALSE); 13075602294fSDan Kimmel dbuf_set_data(db, db->db_buf); 13085602294fSDan Kimmel } 1309ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 1310eb633035STom Caputi if (err == 0 && prefetch) 1311cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1312ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1313744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1314744947dcSTom Erickson DB_DNODE_EXIT(db); 1315ea8dc4b6Seschrock } else if (db->db_state == DB_UNCACHED) { 1316744947dcSTom Erickson spa_t *spa = dn->dn_objset->os_spa; 1317def4fac5SMatthew Ahrens boolean_t need_wait = B_FALSE; 1318744947dcSTom Erickson 1319def4fac5SMatthew Ahrens if (zio == NULL && 1320def4fac5SMatthew Ahrens db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1321744947dcSTom Erickson zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1322def4fac5SMatthew Ahrens need_wait = B_TRUE; 1323def4fac5SMatthew Ahrens } 1324eb633035STom Caputi err = dbuf_read_impl(db, zio, flags); 132513506d1eSmaybee 1326ea8dc4b6Seschrock /* dbuf_read_impl has dropped db_mtx for us */ 1327ea8dc4b6Seschrock 1328eb633035STom Caputi if (!err && prefetch) 1329cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1330ea8dc4b6Seschrock 1331ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1332744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1333744947dcSTom Erickson DB_DNODE_EXIT(db); 1334ea8dc4b6Seschrock 1335eb633035STom Caputi if (!err && need_wait) 1336ea8dc4b6Seschrock err = zio_wait(zio); 1337ea8dc4b6Seschrock } else { 13383e30c24aSWill Andrews /* 13393e30c24aSWill Andrews * Another reader came in while the dbuf was in flight 13403e30c24aSWill Andrews * between UNCACHED and CACHED. Either a writer will finish 13413e30c24aSWill Andrews * writing the buffer (sending the dbuf to CACHED) or the 13423e30c24aSWill Andrews * first reader's request will reach the read_done callback 13433e30c24aSWill Andrews * and send the dbuf to CACHED. Otherwise, a failure 13443e30c24aSWill Andrews * occurred and the dbuf went to UNCACHED. 13453e30c24aSWill Andrews */ 134613506d1eSmaybee mutex_exit(&db->db_mtx); 134713506d1eSmaybee if (prefetch) 1348cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1349ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1350744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1351744947dcSTom Erickson DB_DNODE_EXIT(db); 135213506d1eSmaybee 13533e30c24aSWill Andrews /* Skip the wait per the caller's request. */ 135413506d1eSmaybee mutex_enter(&db->db_mtx); 1355ea8dc4b6Seschrock if ((flags & DB_RF_NEVERWAIT) == 0) { 1356ea8dc4b6Seschrock while (db->db_state == DB_READ || 1357ea8dc4b6Seschrock db->db_state == DB_FILL) { 1358fa9e4066Sahrens ASSERT(db->db_state == DB_READ || 1359fa9e4066Sahrens (flags & DB_RF_HAVESTRUCT) == 0); 1360f6164ad6SAdam H. Leventhal DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1361f6164ad6SAdam H. Leventhal db, zio_t *, zio); 1362fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1363fa9e4066Sahrens } 1364ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 1365be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 1366ea8dc4b6Seschrock } 1367fa9e4066Sahrens mutex_exit(&db->db_mtx); 1368fa9e4066Sahrens } 1369fa9e4066Sahrens 1370ea8dc4b6Seschrock return (err); 1371fa9e4066Sahrens } 1372fa9e4066Sahrens 1373fa9e4066Sahrens static void 1374fa9e4066Sahrens dbuf_noread(dmu_buf_impl_t *db) 1375fa9e4066Sahrens { 1376e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 13770a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1378fa9e4066Sahrens mutex_enter(&db->db_mtx); 1379fa9e4066Sahrens while (db->db_state == DB_READ || db->db_state == DB_FILL) 1380fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1381fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 1382ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 138343466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 1384ad23a2dbSjohansen 1385ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1386fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 13875602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1388fa9e4066Sahrens db->db_state = DB_FILL; 138982c9918fSTim Haley } else if (db->db_state == DB_NOFILL) { 1390bc9014e6SJustin Gibbs dbuf_clear_data(db); 1391fa9e4066Sahrens } else { 1392fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_CACHED); 1393fa9e4066Sahrens } 1394fa9e4066Sahrens mutex_exit(&db->db_mtx); 1395fa9e4066Sahrens } 1396fa9e4066Sahrens 1397fa9e4066Sahrens void 1398c717a561Smaybee dbuf_unoverride(dbuf_dirty_record_t *dr) 1399fa9e4066Sahrens { 1400c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1401b24ab676SJeff Bonwick blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1402c717a561Smaybee uint64_t txg = dr->dr_txg; 1403c5c6ffa0Smaybee 1404c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 140540713f2bSAlan Somers /* 140640713f2bSAlan Somers * This assert is valid because dmu_sync() expects to be called by 140740713f2bSAlan Somers * a zilog's get_data while holding a range lock. This call only 140840713f2bSAlan Somers * comes from dbuf_dirty() callers who must also hold a range lock. 140940713f2bSAlan Somers */ 1410c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1411c717a561Smaybee ASSERT(db->db_level == 0); 1412c717a561Smaybee 14130a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 1414c717a561Smaybee dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1415c717a561Smaybee return; 1416c717a561Smaybee 1417b24ab676SJeff Bonwick ASSERT(db->db_data_pending != dr); 1418b24ab676SJeff Bonwick 1419fa9e4066Sahrens /* free this block */ 142043466aaeSMax Grossman if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 142143466aaeSMax Grossman zio_free(db->db_objset->os_spa, txg, bp); 1422b24ab676SJeff Bonwick 1423c717a561Smaybee dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 142480901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = B_FALSE; 1425eb633035STom Caputi dr->dt.dl.dr_has_raw_params = B_FALSE; 142680901aeaSGeorge Wilson 14276b4acc8bSahrens /* 14286b4acc8bSahrens * Release the already-written buffer, so we leave it in 14296b4acc8bSahrens * a consistent dirty state. Note that all callers are 14306b4acc8bSahrens * modifying the buffer, so they will immediately do 14316b4acc8bSahrens * another (redundant) arc_release(). Therefore, leave 14326b4acc8bSahrens * the buf thawed to save the effort of freezing & 14336b4acc8bSahrens * immediately re-thawing it. 14346b4acc8bSahrens */ 1435c717a561Smaybee arc_release(dr->dt.dl.dr_data, db); 1436fa9e4066Sahrens } 1437fa9e4066Sahrens 1438cdb0ab79Smaybee /* 1439cdb0ab79Smaybee * Evict (if its unreferenced) or clear (if its referenced) any level-0 1440cdb0ab79Smaybee * data blocks in the free range, so that any future readers will find 144143466aaeSMax Grossman * empty blocks. 1442cdb0ab79Smaybee */ 1443fa9e4066Sahrens void 14440f6d88adSAlex Reece dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 14450f6d88adSAlex Reece dmu_tx_t *tx) 1446fa9e4066Sahrens { 1447bc9014e6SJustin Gibbs dmu_buf_impl_t db_search; 1448bc9014e6SJustin Gibbs dmu_buf_impl_t *db, *db_next; 1449fa9e4066Sahrens uint64_t txg = tx->tx_txg; 14500f6d88adSAlex Reece avl_index_t where; 1451fa9e4066Sahrens 1452653af1b8SStephen Blinick if (end_blkid > dn->dn_maxblkid && 1453653af1b8SStephen Blinick !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 14540f6d88adSAlex Reece end_blkid = dn->dn_maxblkid; 14550f6d88adSAlex Reece dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 14560f6d88adSAlex Reece 14570f6d88adSAlex Reece db_search.db_level = 0; 14580f6d88adSAlex Reece db_search.db_blkid = start_blkid; 145986bb58aeSAlex Reece db_search.db_state = DB_SEARCH; 14602f3d8780SMatthew Ahrens 1461713d6c20SMatthew Ahrens mutex_enter(&dn->dn_dbufs_mtx); 14620f6d88adSAlex Reece db = avl_find(&dn->dn_dbufs, &db_search, &where); 14630f6d88adSAlex Reece ASSERT3P(db, ==, NULL); 14642f3d8780SMatthew Ahrens 14650f6d88adSAlex Reece db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 14660f6d88adSAlex Reece 14670f6d88adSAlex Reece for (; db != NULL; db = db_next) { 14680f6d88adSAlex Reece db_next = AVL_NEXT(&dn->dn_dbufs, db); 14690a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1470cdb0ab79Smaybee 14710f6d88adSAlex Reece if (db->db_level != 0 || db->db_blkid > end_blkid) { 14720f6d88adSAlex Reece break; 14730f6d88adSAlex Reece } 14740f6d88adSAlex Reece ASSERT3U(db->db_blkid, >=, start_blkid); 1475fa9e4066Sahrens 1476fa9e4066Sahrens /* found a level 0 buffer in the range */ 1477fa9e4066Sahrens mutex_enter(&db->db_mtx); 14783b2aab18SMatthew Ahrens if (dbuf_undirty(db, tx)) { 14793b2aab18SMatthew Ahrens /* mutex has been dropped and dbuf destroyed */ 14803b2aab18SMatthew Ahrens continue; 14813b2aab18SMatthew Ahrens } 14823b2aab18SMatthew Ahrens 1483ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED || 148482c9918fSTim Haley db->db_state == DB_NOFILL || 1485ea8dc4b6Seschrock db->db_state == DB_EVICTING) { 1486fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 1487fa9e4066Sahrens mutex_exit(&db->db_mtx); 1488fa9e4066Sahrens continue; 1489fa9e4066Sahrens } 1490c543ec06Sahrens if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1491c543ec06Sahrens /* will be handled in dbuf_read_done or dbuf_rele */ 1492c717a561Smaybee db->db_freed_in_flight = TRUE; 1493fa9e4066Sahrens mutex_exit(&db->db_mtx); 1494fa9e4066Sahrens continue; 1495fa9e4066Sahrens } 1496e914ace2STim Schumacher if (zfs_refcount_count(&db->db_holds) == 0) { 1497ea8dc4b6Seschrock ASSERT(db->db_buf); 1498dcbf3bd6SGeorge Wilson dbuf_destroy(db); 1499ea8dc4b6Seschrock continue; 1500ea8dc4b6Seschrock } 1501c717a561Smaybee /* The dbuf is referenced */ 1502fa9e4066Sahrens 1503c717a561Smaybee if (db->db_last_dirty != NULL) { 1504c717a561Smaybee dbuf_dirty_record_t *dr = db->db_last_dirty; 1505c717a561Smaybee 1506c717a561Smaybee if (dr->dr_txg == txg) { 1507ea8dc4b6Seschrock /* 1508c717a561Smaybee * This buffer is "in-use", re-adjust the file 1509c717a561Smaybee * size to reflect that this buffer may 1510c717a561Smaybee * contain new data when we sync. 1511ea8dc4b6Seschrock */ 151206e0070dSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID && 151306e0070dSMark Shellenbaum db->db_blkid > dn->dn_maxblkid) 151444eda4d7Smaybee dn->dn_maxblkid = db->db_blkid; 1515c717a561Smaybee dbuf_unoverride(dr); 1516c717a561Smaybee } else { 1517c717a561Smaybee /* 1518c717a561Smaybee * This dbuf is not dirty in the open context. 1519c717a561Smaybee * Either uncache it (if its not referenced in 1520c717a561Smaybee * the open context) or reset its contents to 1521c717a561Smaybee * empty. 1522c717a561Smaybee */ 1523c717a561Smaybee dbuf_fix_old_data(db, txg); 152444eda4d7Smaybee } 1525c717a561Smaybee } 1526c717a561Smaybee /* clear the contents if its cached */ 1527ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 1528ea8dc4b6Seschrock ASSERT(db->db.db_data != NULL); 1529fa9e4066Sahrens arc_release(db->db_buf, db); 1530fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 15316b4acc8bSahrens arc_buf_freeze(db->db_buf); 1532fa9e4066Sahrens } 1533ea8dc4b6Seschrock 1534fa9e4066Sahrens mutex_exit(&db->db_mtx); 1535fa9e4066Sahrens } 1536fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 1537fa9e4066Sahrens } 1538fa9e4066Sahrens 1539fa9e4066Sahrens void 1540fa9e4066Sahrens dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1541fa9e4066Sahrens { 1542fa9e4066Sahrens arc_buf_t *buf, *obuf; 1543fa9e4066Sahrens int osize = db->db.db_size; 1544ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1545744947dcSTom Erickson dnode_t *dn; 1546fa9e4066Sahrens 15470a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1548ea8dc4b6Seschrock 1549744947dcSTom Erickson DB_DNODE_ENTER(db); 1550744947dcSTom Erickson dn = DB_DNODE(db); 1551744947dcSTom Erickson 1552fa9e4066Sahrens /* XXX does *this* func really need the lock? */ 1553744947dcSTom Erickson ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1554fa9e4066Sahrens 1555fa9e4066Sahrens /* 155643466aaeSMax Grossman * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1557fa9e4066Sahrens * is OK, because there can be no other references to the db 1558fa9e4066Sahrens * when we are changing its size, so no concurrent DB_FILL can 1559fa9e4066Sahrens * be happening. 1560fa9e4066Sahrens */ 1561ea8dc4b6Seschrock /* 1562ea8dc4b6Seschrock * XXX we should be doing a dbuf_read, checking the return 1563ea8dc4b6Seschrock * value and returning that up to our callers 1564ea8dc4b6Seschrock */ 156543466aaeSMax Grossman dmu_buf_will_dirty(&db->db, tx); 1566fa9e4066Sahrens 1567fa9e4066Sahrens /* create the data buffer for the new block */ 15685602294fSDan Kimmel buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1569fa9e4066Sahrens 1570fa9e4066Sahrens /* copy old block data to the new block */ 1571fa9e4066Sahrens obuf = db->db_buf; 1572f65e61c0Sahrens bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1573fa9e4066Sahrens /* zero the remainder */ 1574f65e61c0Sahrens if (size > osize) 1575fa9e4066Sahrens bzero((uint8_t *)buf->b_data + osize, size - osize); 1576fa9e4066Sahrens 1577fa9e4066Sahrens mutex_enter(&db->db_mtx); 1578fa9e4066Sahrens dbuf_set_data(db, buf); 1579dcbf3bd6SGeorge Wilson arc_buf_destroy(obuf, db); 1580fa9e4066Sahrens db->db.db_size = size; 1581fa9e4066Sahrens 1582c717a561Smaybee if (db->db_level == 0) { 1583c717a561Smaybee ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1584c717a561Smaybee db->db_last_dirty->dt.dl.dr_data = buf; 1585c717a561Smaybee } 1586fa9e4066Sahrens mutex_exit(&db->db_mtx); 1587fa9e4066Sahrens 158861e255ceSMatthew Ahrens dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1589744947dcSTom Erickson DB_DNODE_EXIT(db); 1590fa9e4066Sahrens } 1591fa9e4066Sahrens 15923f9d6ad7SLin Ling void 15933f9d6ad7SLin Ling dbuf_release_bp(dmu_buf_impl_t *db) 15943f9d6ad7SLin Ling { 159543466aaeSMax Grossman objset_t *os = db->db_objset; 15963f9d6ad7SLin Ling 15973f9d6ad7SLin Ling ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 15983f9d6ad7SLin Ling ASSERT(arc_released(os->os_phys_buf) || 15993f9d6ad7SLin Ling list_link_active(&os->os_dsl_dataset->ds_synced_link)); 16003f9d6ad7SLin Ling ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 16013f9d6ad7SLin Ling 16021b912ec7SGeorge Wilson (void) arc_release(db->db_buf, db); 16033f9d6ad7SLin Ling } 16043f9d6ad7SLin Ling 16050f2e7d03SMatthew Ahrens /* 16060f2e7d03SMatthew Ahrens * We already have a dirty record for this TXG, and we are being 16070f2e7d03SMatthew Ahrens * dirtied again. 16080f2e7d03SMatthew Ahrens */ 16090f2e7d03SMatthew Ahrens static void 16100f2e7d03SMatthew Ahrens dbuf_redirty(dbuf_dirty_record_t *dr) 16110f2e7d03SMatthew Ahrens { 16120f2e7d03SMatthew Ahrens dmu_buf_impl_t *db = dr->dr_dbuf; 16130f2e7d03SMatthew Ahrens 16140f2e7d03SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 16150f2e7d03SMatthew Ahrens 16160f2e7d03SMatthew Ahrens if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 16170f2e7d03SMatthew Ahrens /* 16180f2e7d03SMatthew Ahrens * If this buffer has already been written out, 16190f2e7d03SMatthew Ahrens * we now need to reset its state. 16200f2e7d03SMatthew Ahrens */ 16210f2e7d03SMatthew Ahrens dbuf_unoverride(dr); 16220f2e7d03SMatthew Ahrens if (db->db.db_object != DMU_META_DNODE_OBJECT && 16230f2e7d03SMatthew Ahrens db->db_state != DB_NOFILL) { 16240f2e7d03SMatthew Ahrens /* Already released on initial dirty, so just thaw. */ 16250f2e7d03SMatthew Ahrens ASSERT(arc_released(db->db_buf)); 16260f2e7d03SMatthew Ahrens arc_buf_thaw(db->db_buf); 16270f2e7d03SMatthew Ahrens } 16280f2e7d03SMatthew Ahrens } 16290f2e7d03SMatthew Ahrens } 16300f2e7d03SMatthew Ahrens 1631c717a561Smaybee dbuf_dirty_record_t * 1632fa9e4066Sahrens dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1633fa9e4066Sahrens { 1634744947dcSTom Erickson dnode_t *dn; 1635744947dcSTom Erickson objset_t *os; 1636c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 1637fa9e4066Sahrens int drop_struct_lock = FALSE; 1638fa9e4066Sahrens int txgoff = tx->tx_txg & TXG_MASK; 1639fa9e4066Sahrens 1640fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1641e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 16429c9dc39aSek110237 DMU_TX_DIRTY_BUF(tx, db); 1643fa9e4066Sahrens 1644744947dcSTom Erickson DB_DNODE_ENTER(db); 1645744947dcSTom Erickson dn = DB_DNODE(db); 1646fa9e4066Sahrens /* 1647fa9e4066Sahrens * Shouldn't dirty a regular buffer in syncing context. Private 1648fa9e4066Sahrens * objects may be dirtied in syncing context, but only if they 1649fa9e4066Sahrens * were already pre-dirtied in open context. 1650fa9e4066Sahrens */ 1651c166b69dSPaul Dagnelie #ifdef DEBUG 1652c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1653c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1654c166b69dSPaul Dagnelie RW_READER, FTAG); 1655c166b69dSPaul Dagnelie } 1656c717a561Smaybee ASSERT(!dmu_tx_is_syncing(tx) || 1657c717a561Smaybee BP_IS_HOLE(dn->dn_objset->os_rootbp) || 165814843421SMatthew Ahrens DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 165914843421SMatthew Ahrens dn->dn_objset->os_dsl_dataset == NULL); 1660c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1661c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1662c166b69dSPaul Dagnelie #endif 1663fa9e4066Sahrens /* 1664fa9e4066Sahrens * We make this assert for private objects as well, but after we 1665fa9e4066Sahrens * check if we're already dirty. They are allowed to re-dirty 1666fa9e4066Sahrens * in syncing context. 1667fa9e4066Sahrens */ 1668ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1669c717a561Smaybee dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1670fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1671fa9e4066Sahrens 1672fa9e4066Sahrens mutex_enter(&db->db_mtx); 1673fa9e4066Sahrens /* 1674c717a561Smaybee * XXX make this true for indirects too? The problem is that 1675c717a561Smaybee * transactions created with dmu_tx_create_assigned() from 1676c717a561Smaybee * syncing context don't bother holding ahead. 1677fa9e4066Sahrens */ 1678c717a561Smaybee ASSERT(db->db_level != 0 || 167982c9918fSTim Haley db->db_state == DB_CACHED || db->db_state == DB_FILL || 168082c9918fSTim Haley db->db_state == DB_NOFILL); 1681fa9e4066Sahrens 1682fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1683fa9e4066Sahrens /* 1684fa9e4066Sahrens * Don't set dirtyctx to SYNC if we're just modifying this as we 1685fa9e4066Sahrens * initialize the objset. 1686fa9e4066Sahrens */ 1687c166b69dSPaul Dagnelie if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1688c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1689c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1690c166b69dSPaul Dagnelie RW_READER, FTAG); 1691c166b69dSPaul Dagnelie } 1692c166b69dSPaul Dagnelie if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1693c166b69dSPaul Dagnelie dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1694c166b69dSPaul Dagnelie DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1695fa9e4066Sahrens ASSERT(dn->dn_dirtyctx_firstset == NULL); 1696fa9e4066Sahrens dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1697fa9e4066Sahrens } 1698c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1699c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1700c166b69dSPaul Dagnelie FTAG); 1701c166b69dSPaul Dagnelie } 1702c166b69dSPaul Dagnelie } 1703aa02ea01STom Caputi 1704aa02ea01STom Caputi if (tx->tx_txg > dn->dn_dirty_txg) 1705aa02ea01STom Caputi dn->dn_dirty_txg = tx->tx_txg; 1706fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1707fa9e4066Sahrens 17080a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 17090a586ceaSMark Shellenbaum dn->dn_have_spill = B_TRUE; 17100a586ceaSMark Shellenbaum 1711fa9e4066Sahrens /* 1712fa9e4066Sahrens * If this buffer is already dirty, we're done. 1713fa9e4066Sahrens */ 1714c717a561Smaybee drp = &db->db_last_dirty; 1715c717a561Smaybee ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1716c717a561Smaybee db->db.db_object == DMU_META_DNODE_OBJECT); 17177e2186e3Sbonwick while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 17187e2186e3Sbonwick drp = &dr->dr_next; 17197e2186e3Sbonwick if (dr && dr->dr_txg == tx->tx_txg) { 1720744947dcSTom Erickson DB_DNODE_EXIT(db); 1721744947dcSTom Erickson 17220f2e7d03SMatthew Ahrens dbuf_redirty(dr); 1723fa9e4066Sahrens mutex_exit(&db->db_mtx); 17247e2186e3Sbonwick return (dr); 1725fa9e4066Sahrens } 1726fa9e4066Sahrens 1727fa9e4066Sahrens /* 1728fa9e4066Sahrens * Only valid if not already dirty. 1729fa9e4066Sahrens */ 173014843421SMatthew Ahrens ASSERT(dn->dn_object == 0 || 173114843421SMatthew Ahrens dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1732fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1733fa9e4066Sahrens 1734fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, db->db_level); 1735fa9e4066Sahrens 1736fa9e4066Sahrens /* 1737fa9e4066Sahrens * We should only be dirtying in syncing context if it's the 173814843421SMatthew Ahrens * mos or we're initializing the os or it's a special object. 173914843421SMatthew Ahrens * However, we are allowed to dirty in syncing context provided 174014843421SMatthew Ahrens * we already dirtied it in open context. Hence we must make 174114843421SMatthew Ahrens * this assertion only if we're not already dirty. 1742fa9e4066Sahrens */ 1743744947dcSTom Erickson os = dn->dn_objset; 17443991b535SGeorge Wilson VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1745c166b69dSPaul Dagnelie #ifdef DEBUG 1746c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1747c166b69dSPaul Dagnelie rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 174814843421SMatthew Ahrens ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 174914843421SMatthew Ahrens os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1750c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1751c166b69dSPaul Dagnelie rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1752c166b69dSPaul Dagnelie #endif 1753fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1754fa9e4066Sahrens 1755fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1756fa9e4066Sahrens 17570a586ceaSMark Shellenbaum if (db->db_blkid != DMU_BONUS_BLKID) { 175861e255ceSMatthew Ahrens dmu_objset_willuse_space(os, db->db.db_size, tx); 17591934e92fSmaybee } 17601934e92fSmaybee 1761ea8dc4b6Seschrock /* 1762ea8dc4b6Seschrock * If this buffer is dirty in an old transaction group we need 1763ea8dc4b6Seschrock * to make a copy of it so that the changes we make in this 1764ea8dc4b6Seschrock * transaction group won't leak out when we sync the older txg. 1765ea8dc4b6Seschrock */ 1766c717a561Smaybee dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1767c717a561Smaybee if (db->db_level == 0) { 1768c717a561Smaybee void *data_old = db->db_buf; 1769c717a561Smaybee 177082c9918fSTim Haley if (db->db_state != DB_NOFILL) { 17710a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 1772c717a561Smaybee dbuf_fix_old_data(db, tx->tx_txg); 1773c717a561Smaybee data_old = db->db.db_data; 1774c717a561Smaybee } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1775fa9e4066Sahrens /* 177682c9918fSTim Haley * Release the data buffer from the cache so 177782c9918fSTim Haley * that we can modify it without impacting 177882c9918fSTim Haley * possible other users of this cached data 177982c9918fSTim Haley * block. Note that indirect blocks and 178082c9918fSTim Haley * private objects are not released until the 178182c9918fSTim Haley * syncing state (since they are only modified 178282c9918fSTim Haley * then). 1783fa9e4066Sahrens */ 1784fa9e4066Sahrens arc_release(db->db_buf, db); 1785fa9e4066Sahrens dbuf_fix_old_data(db, tx->tx_txg); 1786c717a561Smaybee data_old = db->db_buf; 1787fa9e4066Sahrens } 1788c717a561Smaybee ASSERT(data_old != NULL); 178982c9918fSTim Haley } 1790c717a561Smaybee dr->dt.dl.dr_data = data_old; 1791c717a561Smaybee } else { 1792c717a561Smaybee mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1793c717a561Smaybee list_create(&dr->dt.di.dr_children, 1794c717a561Smaybee sizeof (dbuf_dirty_record_t), 1795c717a561Smaybee offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1796fa9e4066Sahrens } 179769962b56SMatthew Ahrens if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 179869962b56SMatthew Ahrens dr->dr_accounted = db->db.db_size; 1799c717a561Smaybee dr->dr_dbuf = db; 1800c717a561Smaybee dr->dr_txg = tx->tx_txg; 1801c717a561Smaybee dr->dr_next = *drp; 1802c717a561Smaybee *drp = dr; 1803fa9e4066Sahrens 1804fa9e4066Sahrens /* 1805fa9e4066Sahrens * We could have been freed_in_flight between the dbuf_noread 1806fa9e4066Sahrens * and dbuf_dirty. We win, as though the dbuf_noread() had 1807fa9e4066Sahrens * happened after the free. 1808fa9e4066Sahrens */ 18090a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 18100a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) { 1811c717a561Smaybee mutex_enter(&dn->dn_mtx); 1812bf16b11eSMatthew Ahrens if (dn->dn_free_ranges[txgoff] != NULL) { 1813bf16b11eSMatthew Ahrens range_tree_clear(dn->dn_free_ranges[txgoff], 1814bf16b11eSMatthew Ahrens db->db_blkid, 1); 1815bf16b11eSMatthew Ahrens } 1816fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1817c717a561Smaybee db->db_freed_in_flight = FALSE; 1818c717a561Smaybee } 1819fa9e4066Sahrens 1820fa9e4066Sahrens /* 1821fa9e4066Sahrens * This buffer is now part of this txg 1822fa9e4066Sahrens */ 1823fa9e4066Sahrens dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1824fa9e4066Sahrens db->db_dirtycnt += 1; 1825fa9e4066Sahrens ASSERT3U(db->db_dirtycnt, <=, 3); 1826fa9e4066Sahrens 1827fa9e4066Sahrens mutex_exit(&db->db_mtx); 1828fa9e4066Sahrens 18290a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 18300a586ceaSMark Shellenbaum db->db_blkid == DMU_SPILL_BLKID) { 1831c717a561Smaybee mutex_enter(&dn->dn_mtx); 1832c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1833c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1834c717a561Smaybee mutex_exit(&dn->dn_mtx); 1835fa9e4066Sahrens dnode_setdirty(dn, tx); 1836744947dcSTom Erickson DB_DNODE_EXIT(db); 1837c717a561Smaybee return (dr); 183892654925SMatthew Ahrens } 183992654925SMatthew Ahrens 184092654925SMatthew Ahrens /* 184192654925SMatthew Ahrens * The dn_struct_rwlock prevents db_blkptr from changing 184292654925SMatthew Ahrens * due to a write from syncing context completing 184392654925SMatthew Ahrens * while we are running, so we want to acquire it before 184492654925SMatthew Ahrens * looking at db_blkptr. 184592654925SMatthew Ahrens */ 184692654925SMatthew Ahrens if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 184792654925SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 184892654925SMatthew Ahrens drop_struct_lock = TRUE; 184992654925SMatthew Ahrens } 185092654925SMatthew Ahrens 1851d3469faaSMark Maybee /* 1852dcb6872cSMatthew Ahrens * We need to hold the dn_struct_rwlock to make this assertion, 1853dcb6872cSMatthew Ahrens * because it protects dn_phys / dn_next_nlevels from changing. 1854dcb6872cSMatthew Ahrens */ 1855dcb6872cSMatthew Ahrens ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1856dcb6872cSMatthew Ahrens dn->dn_phys->dn_nlevels > db->db_level || 1857dcb6872cSMatthew Ahrens dn->dn_next_nlevels[txgoff] > db->db_level || 1858dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1859dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1860dcb6872cSMatthew Ahrens 1861dcb6872cSMatthew Ahrens /* 186261e255ceSMatthew Ahrens * If we are overwriting a dedup BP, then unless it is snapshotted, 186361e255ceSMatthew Ahrens * when we get to syncing context we will need to decrement its 186461e255ceSMatthew Ahrens * refcount in the DDT. Prefetch the relevant DDT block so that 186561e255ceSMatthew Ahrens * syncing context won't have to wait for the i/o. 1866d3469faaSMark Maybee */ 186761e255ceSMatthew Ahrens ddt_prefetch(os->os_spa, db->db_blkptr); 1868fa9e4066Sahrens 18698346f03fSJonathan W Adams if (db->db_level == 0) { 1870eb633035STom Caputi ASSERT(!db->db_objset->os_raw_receive || 1871eb633035STom Caputi dn->dn_maxblkid >= db->db_blkid); 1872eb633035STom Caputi dnode_new_blkid(dn, db->db_blkid, tx, 1873eb633035STom Caputi drop_struct_lock, B_FALSE); 18748346f03fSJonathan W Adams ASSERT(dn->dn_maxblkid >= db->db_blkid); 18758346f03fSJonathan W Adams } 18768346f03fSJonathan W Adams 187744eda4d7Smaybee if (db->db_level+1 < dn->dn_nlevels) { 1878c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 1879c717a561Smaybee dbuf_dirty_record_t *di; 1880c717a561Smaybee int parent_held = FALSE; 1881c717a561Smaybee 1882c717a561Smaybee if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1883fa9e4066Sahrens int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1884c717a561Smaybee 1885fa9e4066Sahrens parent = dbuf_hold_level(dn, db->db_level+1, 1886fa9e4066Sahrens db->db_blkid >> epbs, FTAG); 188701025c89SJohn Harres ASSERT(parent != NULL); 1888c717a561Smaybee parent_held = TRUE; 1889c717a561Smaybee } 1890fa9e4066Sahrens if (drop_struct_lock) 1891fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1892c717a561Smaybee ASSERT3U(db->db_level+1, ==, parent->db_level); 1893c717a561Smaybee di = dbuf_dirty(parent, tx); 1894c717a561Smaybee if (parent_held) 1895ea8dc4b6Seschrock dbuf_rele(parent, FTAG); 1896c717a561Smaybee 1897c717a561Smaybee mutex_enter(&db->db_mtx); 189869962b56SMatthew Ahrens /* 189969962b56SMatthew Ahrens * Since we've dropped the mutex, it's possible that 190069962b56SMatthew Ahrens * dbuf_undirty() might have changed this out from under us. 190169962b56SMatthew Ahrens */ 1902c717a561Smaybee if (db->db_last_dirty == dr || 1903c717a561Smaybee dn->dn_object == DMU_META_DNODE_OBJECT) { 1904c717a561Smaybee mutex_enter(&di->dt.di.dr_mtx); 1905c717a561Smaybee ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1906c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1907c717a561Smaybee list_insert_tail(&di->dt.di.dr_children, dr); 1908c717a561Smaybee mutex_exit(&di->dt.di.dr_mtx); 1909c717a561Smaybee dr->dr_parent = di; 1910c717a561Smaybee } 1911c717a561Smaybee mutex_exit(&db->db_mtx); 1912fa9e4066Sahrens } else { 1913c717a561Smaybee ASSERT(db->db_level+1 == dn->dn_nlevels); 1914c717a561Smaybee ASSERT(db->db_blkid < dn->dn_nblkptr); 1915744947dcSTom Erickson ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1916c717a561Smaybee mutex_enter(&dn->dn_mtx); 1917c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1918c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1919c717a561Smaybee mutex_exit(&dn->dn_mtx); 1920fa9e4066Sahrens if (drop_struct_lock) 1921fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1922fa9e4066Sahrens } 1923fa9e4066Sahrens 1924fa9e4066Sahrens dnode_setdirty(dn, tx); 1925744947dcSTom Erickson DB_DNODE_EXIT(db); 1926c717a561Smaybee return (dr); 1927fa9e4066Sahrens } 1928fa9e4066Sahrens 19293b2aab18SMatthew Ahrens /* 19303e30c24aSWill Andrews * Undirty a buffer in the transaction group referenced by the given 19313e30c24aSWill Andrews * transaction. Return whether this evicted the dbuf. 19323b2aab18SMatthew Ahrens */ 19333b2aab18SMatthew Ahrens static boolean_t 1934fa9e4066Sahrens dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1935fa9e4066Sahrens { 1936744947dcSTom Erickson dnode_t *dn; 1937c717a561Smaybee uint64_t txg = tx->tx_txg; 193817f17c2dSbonwick dbuf_dirty_record_t *dr, **drp; 1939fa9e4066Sahrens 1940c717a561Smaybee ASSERT(txg != 0); 194146e1baa6SMatthew Ahrens 194246e1baa6SMatthew Ahrens /* 194346e1baa6SMatthew Ahrens * Due to our use of dn_nlevels below, this can only be called 194446e1baa6SMatthew Ahrens * in open context, unless we are operating on the MOS. 194546e1baa6SMatthew Ahrens * From syncing context, dn_nlevels may be different from the 194646e1baa6SMatthew Ahrens * dn_nlevels used when dbuf was dirtied. 194746e1baa6SMatthew Ahrens */ 194846e1baa6SMatthew Ahrens ASSERT(db->db_objset == 194946e1baa6SMatthew Ahrens dmu_objset_pool(db->db_objset)->dp_meta_objset || 195046e1baa6SMatthew Ahrens txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 19510a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 19523b2aab18SMatthew Ahrens ASSERT0(db->db_level); 19533b2aab18SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 1954fa9e4066Sahrens 1955fa9e4066Sahrens /* 1956fa9e4066Sahrens * If this buffer is not dirty, we're done. 1957fa9e4066Sahrens */ 195817f17c2dSbonwick for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1959c717a561Smaybee if (dr->dr_txg <= txg) 1960c717a561Smaybee break; 19613b2aab18SMatthew Ahrens if (dr == NULL || dr->dr_txg < txg) 19623b2aab18SMatthew Ahrens return (B_FALSE); 1963c717a561Smaybee ASSERT(dr->dr_txg == txg); 1964b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 1965fa9e4066Sahrens 1966744947dcSTom Erickson DB_DNODE_ENTER(db); 1967744947dcSTom Erickson dn = DB_DNODE(db); 1968744947dcSTom Erickson 1969fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1970fa9e4066Sahrens 1971fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1972fa9e4066Sahrens 197346e1baa6SMatthew Ahrens dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 197446e1baa6SMatthew Ahrens dr->dr_accounted, txg); 1975fa9e4066Sahrens 197617f17c2dSbonwick *drp = dr->dr_next; 1977c717a561Smaybee 19783f2366c2SGordon Ross /* 19793f2366c2SGordon Ross * Note that there are three places in dbuf_dirty() 19803f2366c2SGordon Ross * where this dirty record may be put on a list. 19813f2366c2SGordon Ross * Make sure to do a list_remove corresponding to 19823f2366c2SGordon Ross * every one of those list_insert calls. 19833f2366c2SGordon Ross */ 1984c717a561Smaybee if (dr->dr_parent) { 1985c717a561Smaybee mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1986c717a561Smaybee list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1987c717a561Smaybee mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 19883f2366c2SGordon Ross } else if (db->db_blkid == DMU_SPILL_BLKID || 19893f2366c2SGordon Ross db->db_level + 1 == dn->dn_nlevels) { 1990cdb0ab79Smaybee ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1991fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1992c717a561Smaybee list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1993fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1994c717a561Smaybee } 1995744947dcSTom Erickson DB_DNODE_EXIT(db); 1996c717a561Smaybee 199782c9918fSTim Haley if (db->db_state != DB_NOFILL) { 1998c717a561Smaybee dbuf_unoverride(dr); 1999c717a561Smaybee 2000c717a561Smaybee ASSERT(db->db_buf != NULL); 2001c717a561Smaybee ASSERT(dr->dt.dl.dr_data != NULL); 2002c717a561Smaybee if (dr->dt.dl.dr_data != db->db_buf) 2003dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 2004c717a561Smaybee } 2005d2b3cbbdSJorgen Lundman 2006c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2007fa9e4066Sahrens 2008fa9e4066Sahrens ASSERT(db->db_dirtycnt > 0); 2009fa9e4066Sahrens db->db_dirtycnt -= 1; 2010fa9e4066Sahrens 2011e914ace2STim Schumacher if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 2012dcbf3bd6SGeorge Wilson ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 2013dcbf3bd6SGeorge Wilson dbuf_destroy(db); 20143b2aab18SMatthew Ahrens return (B_TRUE); 2015fa9e4066Sahrens } 2016fa9e4066Sahrens 20173b2aab18SMatthew Ahrens return (B_FALSE); 2018fa9e4066Sahrens } 2019fa9e4066Sahrens 2020eb633035STom Caputi static void 2021eb633035STom Caputi dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx) 2022fa9e4066Sahrens { 202343466aaeSMax Grossman dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2024fa9e4066Sahrens 2025fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 2026e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2027fa9e4066Sahrens 20280f2e7d03SMatthew Ahrens /* 20290f2e7d03SMatthew Ahrens * Quick check for dirtyness. For already dirty blocks, this 20300f2e7d03SMatthew Ahrens * reduces runtime of this function by >90%, and overall performance 20310f2e7d03SMatthew Ahrens * by 50% for some workloads (e.g. file deletion with indirect blocks 20320f2e7d03SMatthew Ahrens * cached). 20330f2e7d03SMatthew Ahrens */ 20340f2e7d03SMatthew Ahrens mutex_enter(&db->db_mtx); 20350f2e7d03SMatthew Ahrens dbuf_dirty_record_t *dr; 20360f2e7d03SMatthew Ahrens for (dr = db->db_last_dirty; 20370f2e7d03SMatthew Ahrens dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 20380f2e7d03SMatthew Ahrens /* 20390f2e7d03SMatthew Ahrens * It's possible that it is already dirty but not cached, 20400f2e7d03SMatthew Ahrens * because there are some calls to dbuf_dirty() that don't 20410f2e7d03SMatthew Ahrens * go through dmu_buf_will_dirty(). 20420f2e7d03SMatthew Ahrens */ 20430f2e7d03SMatthew Ahrens if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 20440f2e7d03SMatthew Ahrens /* This dbuf is already dirty and cached. */ 20450f2e7d03SMatthew Ahrens dbuf_redirty(dr); 20460f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 20470f2e7d03SMatthew Ahrens return; 20480f2e7d03SMatthew Ahrens } 20490f2e7d03SMatthew Ahrens } 20500f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 20510f2e7d03SMatthew Ahrens 2052744947dcSTom Erickson DB_DNODE_ENTER(db); 2053744947dcSTom Erickson if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 2054eb633035STom Caputi flags |= DB_RF_HAVESTRUCT; 2055744947dcSTom Erickson DB_DNODE_EXIT(db); 2056eb633035STom Caputi (void) dbuf_read(db, NULL, flags); 2057c717a561Smaybee (void) dbuf_dirty(db, tx); 2058fa9e4066Sahrens } 2059fa9e4066Sahrens 2060fa9e4066Sahrens void 2061eb633035STom Caputi dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 2062eb633035STom Caputi { 2063eb633035STom Caputi dmu_buf_will_dirty_impl(db_fake, 2064eb633035STom Caputi DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx); 2065eb633035STom Caputi } 2066eb633035STom Caputi 2067eb633035STom Caputi void 206882c9918fSTim Haley dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 206982c9918fSTim Haley { 207082c9918fSTim Haley dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 207182c9918fSTim Haley 207282c9918fSTim Haley db->db_state = DB_NOFILL; 207382c9918fSTim Haley 207482c9918fSTim Haley dmu_buf_will_fill(db_fake, tx); 207582c9918fSTim Haley } 207682c9918fSTim Haley 207782c9918fSTim Haley void 2078ea8dc4b6Seschrock dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 2079fa9e4066Sahrens { 2080ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2081ea8dc4b6Seschrock 20820a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2083fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 2084fa9e4066Sahrens ASSERT(db->db_level == 0); 2085e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 2086fa9e4066Sahrens 2087ea8dc4b6Seschrock ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 2088fa9e4066Sahrens dmu_tx_private_ok(tx)); 2089fa9e4066Sahrens 2090fa9e4066Sahrens dbuf_noread(db); 2091c717a561Smaybee (void) dbuf_dirty(db, tx); 2092fa9e4066Sahrens } 2093fa9e4066Sahrens 2094eb633035STom Caputi /* 2095eb633035STom Caputi * This function is effectively the same as dmu_buf_will_dirty(), but 2096eb633035STom Caputi * indicates the caller expects raw encrypted data in the db, and provides 2097eb633035STom Caputi * the crypt params (byteorder, salt, iv, mac) which should be stored in the 2098eb633035STom Caputi * blkptr_t when this dbuf is written. This is only used for blocks of 2099eb633035STom Caputi * dnodes during a raw receive. 2100eb633035STom Caputi */ 2101eb633035STom Caputi void 2102eb633035STom Caputi dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder, 2103eb633035STom Caputi const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx) 2104eb633035STom Caputi { 2105eb633035STom Caputi dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2106eb633035STom Caputi dbuf_dirty_record_t *dr; 2107eb633035STom Caputi 2108eb633035STom Caputi /* 2109eb633035STom Caputi * dr_has_raw_params is only processed for blocks of dnodes 2110eb633035STom Caputi * (see dbuf_sync_dnode_leaf_crypt()). 2111eb633035STom Caputi */ 2112eb633035STom Caputi ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 2113eb633035STom Caputi ASSERT3U(db->db_level, ==, 0); 2114eb633035STom Caputi 2115eb633035STom Caputi dmu_buf_will_dirty_impl(db_fake, 2116eb633035STom Caputi DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx); 2117eb633035STom Caputi 2118eb633035STom Caputi dr = db->db_last_dirty; 2119eb633035STom Caputi while (dr != NULL && dr->dr_txg > tx->tx_txg) 2120eb633035STom Caputi dr = dr->dr_next; 2121eb633035STom Caputi 2122eb633035STom Caputi ASSERT3P(dr, !=, NULL); 2123eb633035STom Caputi ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 2124eb633035STom Caputi 2125eb633035STom Caputi dr->dt.dl.dr_has_raw_params = B_TRUE; 2126eb633035STom Caputi dr->dt.dl.dr_byteorder = byteorder; 2127eb633035STom Caputi bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN); 2128eb633035STom Caputi bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN); 2129eb633035STom Caputi bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN); 2130eb633035STom Caputi } 2131eb633035STom Caputi 2132fa9e4066Sahrens #pragma weak dmu_buf_fill_done = dbuf_fill_done 2133fa9e4066Sahrens /* ARGSUSED */ 2134fa9e4066Sahrens void 2135fa9e4066Sahrens dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 2136fa9e4066Sahrens { 2137fa9e4066Sahrens mutex_enter(&db->db_mtx); 21389c9dc39aSek110237 DBUF_VERIFY(db); 2139fa9e4066Sahrens 2140fa9e4066Sahrens if (db->db_state == DB_FILL) { 2141c717a561Smaybee if (db->db_level == 0 && db->db_freed_in_flight) { 21420a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2143fa9e4066Sahrens /* we were freed while filling */ 2144fa9e4066Sahrens /* XXX dbuf_undirty? */ 2145fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 2146c717a561Smaybee db->db_freed_in_flight = FALSE; 2147fa9e4066Sahrens } 2148fa9e4066Sahrens db->db_state = DB_CACHED; 2149fa9e4066Sahrens cv_broadcast(&db->db_changed); 2150fa9e4066Sahrens } 2151fa9e4066Sahrens mutex_exit(&db->db_mtx); 2152fa9e4066Sahrens } 2153fa9e4066Sahrens 21545d7b4d43SMatthew Ahrens void 21555d7b4d43SMatthew Ahrens dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 21565d7b4d43SMatthew Ahrens bp_embedded_type_t etype, enum zio_compress comp, 21575d7b4d43SMatthew Ahrens int uncompressed_size, int compressed_size, int byteorder, 21585d7b4d43SMatthew Ahrens dmu_tx_t *tx) 21595d7b4d43SMatthew Ahrens { 21605d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 21615d7b4d43SMatthew Ahrens struct dirty_leaf *dl; 21625d7b4d43SMatthew Ahrens dmu_object_type_t type; 21635d7b4d43SMatthew Ahrens 2164ca0cc391SMatthew Ahrens if (etype == BP_EMBEDDED_TYPE_DATA) { 2165ca0cc391SMatthew Ahrens ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 2166ca0cc391SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 2167ca0cc391SMatthew Ahrens } 2168ca0cc391SMatthew Ahrens 21695d7b4d43SMatthew Ahrens DB_DNODE_ENTER(db); 21705d7b4d43SMatthew Ahrens type = DB_DNODE(db)->dn_type; 21715d7b4d43SMatthew Ahrens DB_DNODE_EXIT(db); 21725d7b4d43SMatthew Ahrens 21735d7b4d43SMatthew Ahrens ASSERT0(db->db_level); 21745d7b4d43SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 21755d7b4d43SMatthew Ahrens 21765d7b4d43SMatthew Ahrens dmu_buf_will_not_fill(dbuf, tx); 21775d7b4d43SMatthew Ahrens 21785d7b4d43SMatthew Ahrens ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 21795d7b4d43SMatthew Ahrens dl = &db->db_last_dirty->dt.dl; 21805d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(&dl->dr_overridden_by, 21815d7b4d43SMatthew Ahrens data, comp, uncompressed_size, compressed_size); 21825d7b4d43SMatthew Ahrens BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 21835d7b4d43SMatthew Ahrens BP_SET_TYPE(&dl->dr_overridden_by, type); 21845d7b4d43SMatthew Ahrens BP_SET_LEVEL(&dl->dr_overridden_by, 0); 21855d7b4d43SMatthew Ahrens BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 21865d7b4d43SMatthew Ahrens 21875d7b4d43SMatthew Ahrens dl->dr_override_state = DR_OVERRIDDEN; 21885d7b4d43SMatthew Ahrens dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 21895d7b4d43SMatthew Ahrens } 21905d7b4d43SMatthew Ahrens 2191ea8dc4b6Seschrock /* 21922fdbea25SAleksandr Guzovskiy * Directly assign a provided arc buf to a given dbuf if it's not referenced 21932fdbea25SAleksandr Guzovskiy * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 21942fdbea25SAleksandr Guzovskiy */ 21952fdbea25SAleksandr Guzovskiy void 21962fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 21972fdbea25SAleksandr Guzovskiy { 2198e914ace2STim Schumacher ASSERT(!zfs_refcount_is_zero(&db->db_holds)); 21990a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 22002fdbea25SAleksandr Guzovskiy ASSERT(db->db_level == 0); 22015602294fSDan Kimmel ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 22022fdbea25SAleksandr Guzovskiy ASSERT(buf != NULL); 22036ccda740Sloli10K ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size); 22042fdbea25SAleksandr Guzovskiy ASSERT(tx->tx_txg != 0); 22052fdbea25SAleksandr Guzovskiy 22062fdbea25SAleksandr Guzovskiy arc_return_buf(buf, db); 22072fdbea25SAleksandr Guzovskiy ASSERT(arc_released(buf)); 22082fdbea25SAleksandr Guzovskiy 22092fdbea25SAleksandr Guzovskiy mutex_enter(&db->db_mtx); 22102fdbea25SAleksandr Guzovskiy 22112fdbea25SAleksandr Guzovskiy while (db->db_state == DB_READ || db->db_state == DB_FILL) 22122fdbea25SAleksandr Guzovskiy cv_wait(&db->db_changed, &db->db_mtx); 22132fdbea25SAleksandr Guzovskiy 22142fdbea25SAleksandr Guzovskiy ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 22152fdbea25SAleksandr Guzovskiy 22162fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED && 2217e914ace2STim Schumacher zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 2218eb633035STom Caputi /* 2219eb633035STom Caputi * In practice, we will never have a case where we have an 2220eb633035STom Caputi * encrypted arc buffer while additional holds exist on the 2221eb633035STom Caputi * dbuf. We don't handle this here so we simply assert that 2222eb633035STom Caputi * fact instead. 2223eb633035STom Caputi */ 2224eb633035STom Caputi ASSERT(!arc_is_encrypted(buf)); 22252fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 22262fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 22272fdbea25SAleksandr Guzovskiy bcopy(buf->b_data, db->db.db_data, db->db.db_size); 2228dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, db); 2229c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_copied(); 22302fdbea25SAleksandr Guzovskiy return; 22312fdbea25SAleksandr Guzovskiy } 22322fdbea25SAleksandr Guzovskiy 2233c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_nocopy(); 22342fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED) { 22352fdbea25SAleksandr Guzovskiy dbuf_dirty_record_t *dr = db->db_last_dirty; 22362fdbea25SAleksandr Guzovskiy 22372fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf != NULL); 22382fdbea25SAleksandr Guzovskiy if (dr != NULL && dr->dr_txg == tx->tx_txg) { 22392fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_data == db->db_buf); 2240eb633035STom Caputi 22412fdbea25SAleksandr Guzovskiy if (!arc_released(db->db_buf)) { 22422fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_override_state == 22432fdbea25SAleksandr Guzovskiy DR_OVERRIDDEN); 22442fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 22452fdbea25SAleksandr Guzovskiy } 22462fdbea25SAleksandr Guzovskiy dr->dt.dl.dr_data = buf; 2247dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 22482fdbea25SAleksandr Guzovskiy } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 22492fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 2250dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 22512fdbea25SAleksandr Guzovskiy } 22522fdbea25SAleksandr Guzovskiy db->db_buf = NULL; 22532fdbea25SAleksandr Guzovskiy } 22542fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf == NULL); 22552fdbea25SAleksandr Guzovskiy dbuf_set_data(db, buf); 22562fdbea25SAleksandr Guzovskiy db->db_state = DB_FILL; 22572fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 22582fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 225943466aaeSMax Grossman dmu_buf_fill_done(&db->db, tx); 22602fdbea25SAleksandr Guzovskiy } 22612fdbea25SAleksandr Guzovskiy 2262ea8dc4b6Seschrock void 2263dcbf3bd6SGeorge Wilson dbuf_destroy(dmu_buf_impl_t *db) 2264fa9e4066Sahrens { 2265744947dcSTom Erickson dnode_t *dn; 2266ea8dc4b6Seschrock dmu_buf_impl_t *parent = db->db_parent; 2267744947dcSTom Erickson dmu_buf_impl_t *dndb; 2268fa9e4066Sahrens 2269fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 2270e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2271fa9e4066Sahrens 2272dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) { 2273dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 2274dcbf3bd6SGeorge Wilson db->db_buf = NULL; 2275dcbf3bd6SGeorge Wilson } 2276ea8dc4b6Seschrock 22770a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 227854811da5SToomas Soome int slots = DB_DNODE(db)->dn_num_slots; 227954811da5SToomas Soome int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 228054811da5SToomas Soome if (db->db.db_data != NULL) { 228154811da5SToomas Soome zio_buf_free(db->db.db_data, bonuslen); 228254811da5SToomas Soome arc_space_return(bonuslen, ARC_SPACE_BONUS); 2283fa9e4066Sahrens db->db_state = DB_UNCACHED; 2284fa9e4066Sahrens } 228554811da5SToomas Soome } 2286fa9e4066Sahrens 2287dcbf3bd6SGeorge Wilson dbuf_clear_data(db); 2288dcbf3bd6SGeorge Wilson 2289dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2290adb52d92SMatthew Ahrens ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2291adb52d92SMatthew Ahrens db->db_caching_status == DB_DBUF_METADATA_CACHE); 2292adb52d92SMatthew Ahrens 2293adb52d92SMatthew Ahrens multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2294e914ace2STim Schumacher (void) zfs_refcount_remove_many( 2295adb52d92SMatthew Ahrens &dbuf_caches[db->db_caching_status].size, 2296dcbf3bd6SGeorge Wilson db->db.db_size, db); 2297adb52d92SMatthew Ahrens 2298adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2299dcbf3bd6SGeorge Wilson } 2300dcbf3bd6SGeorge Wilson 230182c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2302fa9e4066Sahrens ASSERT(db->db_data_pending == NULL); 2303fa9e4066Sahrens 2304ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2305ea8dc4b6Seschrock db->db_blkptr = NULL; 2306ea8dc4b6Seschrock 2307dcbf3bd6SGeorge Wilson /* 2308dcbf3bd6SGeorge Wilson * Now that db_state is DB_EVICTING, nobody else can find this via 2309dcbf3bd6SGeorge Wilson * the hash table. We can now drop db_mtx, which allows us to 2310dcbf3bd6SGeorge Wilson * acquire the dn_dbufs_mtx. 2311dcbf3bd6SGeorge Wilson */ 2312dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 2313dcbf3bd6SGeorge Wilson 2314744947dcSTom Erickson DB_DNODE_ENTER(db); 2315744947dcSTom Erickson dn = DB_DNODE(db); 2316744947dcSTom Erickson dndb = dn->dn_dbuf; 2317dcbf3bd6SGeorge Wilson if (db->db_blkid != DMU_BONUS_BLKID) { 2318dcbf3bd6SGeorge Wilson boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2319dcbf3bd6SGeorge Wilson if (needlock) 2320dcbf3bd6SGeorge Wilson mutex_enter(&dn->dn_dbufs_mtx); 23210f6d88adSAlex Reece avl_remove(&dn->dn_dbufs, db); 2322640c1670SJosef 'Jeff' Sipek atomic_dec_32(&dn->dn_dbufs_count); 2323744947dcSTom Erickson membar_producer(); 2324744947dcSTom Erickson DB_DNODE_EXIT(db); 2325dcbf3bd6SGeorge Wilson if (needlock) 2326dcbf3bd6SGeorge Wilson mutex_exit(&dn->dn_dbufs_mtx); 2327744947dcSTom Erickson /* 2328744947dcSTom Erickson * Decrementing the dbuf count means that the hold corresponding 2329744947dcSTom Erickson * to the removed dbuf is no longer discounted in dnode_move(), 2330744947dcSTom Erickson * so the dnode cannot be moved until after we release the hold. 2331744947dcSTom Erickson * The membar_producer() ensures visibility of the decremented 2332744947dcSTom Erickson * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2333744947dcSTom Erickson * release any lock. 2334744947dcSTom Erickson */ 2335c2919acbSMatthew Ahrens mutex_enter(&dn->dn_mtx); 2336c2919acbSMatthew Ahrens dnode_rele_and_unlock(dn, db, B_TRUE); 2337744947dcSTom Erickson db->db_dnode_handle = NULL; 2338dcbf3bd6SGeorge Wilson 2339dcbf3bd6SGeorge Wilson dbuf_hash_remove(db); 2340744947dcSTom Erickson } else { 2341744947dcSTom Erickson DB_DNODE_EXIT(db); 2342ea8dc4b6Seschrock } 2343ea8dc4b6Seschrock 2344e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2345ea8dc4b6Seschrock 2346dcbf3bd6SGeorge Wilson db->db_parent = NULL; 2347dcbf3bd6SGeorge Wilson 2348dcbf3bd6SGeorge Wilson ASSERT(db->db_buf == NULL); 2349dcbf3bd6SGeorge Wilson ASSERT(db->db.db_data == NULL); 2350dcbf3bd6SGeorge Wilson ASSERT(db->db_hash_next == NULL); 2351dcbf3bd6SGeorge Wilson ASSERT(db->db_blkptr == NULL); 2352dcbf3bd6SGeorge Wilson ASSERT(db->db_data_pending == NULL); 2353adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2354dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 2355dcbf3bd6SGeorge Wilson 2356dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2357dcbf3bd6SGeorge Wilson arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2358fa9e4066Sahrens 2359fa9e4066Sahrens /* 2360744947dcSTom Erickson * If this dbuf is referenced from an indirect dbuf, 2361fa9e4066Sahrens * decrement the ref count on the indirect dbuf. 2362fa9e4066Sahrens */ 2363c2919acbSMatthew Ahrens if (parent && parent != dndb) { 2364c2919acbSMatthew Ahrens mutex_enter(&parent->db_mtx); 2365c2919acbSMatthew Ahrens dbuf_rele_and_unlock(parent, db, B_TRUE); 2366c2919acbSMatthew Ahrens } 2367fa9e4066Sahrens } 2368fa9e4066Sahrens 2369a2cdcdd2SPaul Dagnelie /* 2370a2cdcdd2SPaul Dagnelie * Note: While bpp will always be updated if the function returns success, 2371a2cdcdd2SPaul Dagnelie * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2372*f67950b2SNasf-Fan * this happens when the dnode is the meta-dnode, or {user|group|project}used 2373a2cdcdd2SPaul Dagnelie * object. 2374a2cdcdd2SPaul Dagnelie */ 2375fa9e4066Sahrens static int 2376fa9e4066Sahrens dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2377fa9e4066Sahrens dmu_buf_impl_t **parentp, blkptr_t **bpp) 2378fa9e4066Sahrens { 23790b69c2f0Sahrens *parentp = NULL; 23800b69c2f0Sahrens *bpp = NULL; 23810b69c2f0Sahrens 23820a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 23830a586ceaSMark Shellenbaum 23840a586ceaSMark Shellenbaum if (blkid == DMU_SPILL_BLKID) { 23850a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 238606e0070dSMark Shellenbaum if (dn->dn_have_spill && 238706e0070dSMark Shellenbaum (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 238854811da5SToomas Soome *bpp = DN_SPILL_BLKPTR(dn->dn_phys); 23890a586ceaSMark Shellenbaum else 23900a586ceaSMark Shellenbaum *bpp = NULL; 23910a586ceaSMark Shellenbaum dbuf_add_ref(dn->dn_dbuf, NULL); 23920a586ceaSMark Shellenbaum *parentp = dn->dn_dbuf; 23930a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 23940a586ceaSMark Shellenbaum return (0); 23950a586ceaSMark Shellenbaum } 2396ea8dc4b6Seschrock 23977de35a3eSPaul Dagnelie int nlevels = 23987de35a3eSPaul Dagnelie (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 23997de35a3eSPaul Dagnelie int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2400fa9e4066Sahrens 2401fa9e4066Sahrens ASSERT3U(level * epbs, <, 64); 2402fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 24037de35a3eSPaul Dagnelie /* 24047de35a3eSPaul Dagnelie * This assertion shouldn't trip as long as the max indirect block size 24057de35a3eSPaul Dagnelie * is less than 1M. The reason for this is that up to that point, 24067de35a3eSPaul Dagnelie * the number of levels required to address an entire object with blocks 24077de35a3eSPaul Dagnelie * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 24087de35a3eSPaul Dagnelie * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 24097de35a3eSPaul Dagnelie * (i.e. we can address the entire object), objects will all use at most 24107de35a3eSPaul Dagnelie * N-1 levels and the assertion won't overflow. However, once epbs is 24117de35a3eSPaul Dagnelie * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 24127de35a3eSPaul Dagnelie * enough to address an entire object, so objects will have 5 levels, 24137de35a3eSPaul Dagnelie * but then this assertion will overflow. 24147de35a3eSPaul Dagnelie * 24157de35a3eSPaul Dagnelie * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 24167de35a3eSPaul Dagnelie * need to redo this logic to handle overflows. 24177de35a3eSPaul Dagnelie */ 24187de35a3eSPaul Dagnelie ASSERT(level >= nlevels || 24197de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs) + 24207de35a3eSPaul Dagnelie highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2421ea8dc4b6Seschrock if (level >= nlevels || 24227de35a3eSPaul Dagnelie blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 24237de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs)) || 24247de35a3eSPaul Dagnelie (fail_sparse && 24257de35a3eSPaul Dagnelie blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2426fa9e4066Sahrens /* the buffer has no parent yet */ 2427be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2428fa9e4066Sahrens } else if (level < nlevels-1) { 2429fa9e4066Sahrens /* this block is referenced from an indirect block */ 2430fa9e4066Sahrens int err = dbuf_hold_impl(dn, level+1, 2431a2cdcdd2SPaul Dagnelie blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2432fa9e4066Sahrens if (err) 2433fa9e4066Sahrens return (err); 2434ea8dc4b6Seschrock err = dbuf_read(*parentp, NULL, 2435ea8dc4b6Seschrock (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2436c543ec06Sahrens if (err) { 2437c543ec06Sahrens dbuf_rele(*parentp, NULL); 2438c543ec06Sahrens *parentp = NULL; 2439c543ec06Sahrens return (err); 2440c543ec06Sahrens } 2441fa9e4066Sahrens *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2442fa9e4066Sahrens (blkid & ((1ULL << epbs) - 1)); 24437de35a3eSPaul Dagnelie if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 24447de35a3eSPaul Dagnelie ASSERT(BP_IS_HOLE(*bpp)); 2445c543ec06Sahrens return (0); 2446fa9e4066Sahrens } else { 2447fa9e4066Sahrens /* the block is referenced from the dnode */ 2448fa9e4066Sahrens ASSERT3U(level, ==, nlevels-1); 2449fa9e4066Sahrens ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2450fa9e4066Sahrens blkid < dn->dn_phys->dn_nblkptr); 2451c543ec06Sahrens if (dn->dn_dbuf) { 2452c543ec06Sahrens dbuf_add_ref(dn->dn_dbuf, NULL); 2453fa9e4066Sahrens *parentp = dn->dn_dbuf; 2454c543ec06Sahrens } 2455fa9e4066Sahrens *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2456fa9e4066Sahrens return (0); 2457fa9e4066Sahrens } 2458fa9e4066Sahrens } 2459fa9e4066Sahrens 2460fa9e4066Sahrens static dmu_buf_impl_t * 2461fa9e4066Sahrens dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2462fa9e4066Sahrens dmu_buf_impl_t *parent, blkptr_t *blkptr) 2463fa9e4066Sahrens { 2464503ad85cSMatthew Ahrens objset_t *os = dn->dn_objset; 2465fa9e4066Sahrens dmu_buf_impl_t *db, *odb; 2466fa9e4066Sahrens 2467fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2468fa9e4066Sahrens ASSERT(dn->dn_type != DMU_OT_NONE); 2469fa9e4066Sahrens 2470dcbf3bd6SGeorge Wilson db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2471fa9e4066Sahrens 2472fa9e4066Sahrens db->db_objset = os; 2473fa9e4066Sahrens db->db.db_object = dn->dn_object; 2474fa9e4066Sahrens db->db_level = level; 2475fa9e4066Sahrens db->db_blkid = blkid; 2476c717a561Smaybee db->db_last_dirty = NULL; 2477ea8dc4b6Seschrock db->db_dirtycnt = 0; 2478744947dcSTom Erickson db->db_dnode_handle = dn->dn_handle; 2479ea8dc4b6Seschrock db->db_parent = parent; 2480ea8dc4b6Seschrock db->db_blkptr = blkptr; 2481fa9e4066Sahrens 2482bc9014e6SJustin Gibbs db->db_user = NULL; 2483d2058105SJustin T. Gibbs db->db_user_immediate_evict = FALSE; 2484d2058105SJustin T. Gibbs db->db_freed_in_flight = FALSE; 2485d2058105SJustin T. Gibbs db->db_pending_evict = FALSE; 2486ea8dc4b6Seschrock 24870a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID) { 2488ea8dc4b6Seschrock ASSERT3P(parent, ==, dn->dn_dbuf); 248954811da5SToomas Soome db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) - 24901934e92fSmaybee (dn->dn_nblkptr-1) * sizeof (blkptr_t); 24911934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 24920a586ceaSMark Shellenbaum db->db.db_offset = DMU_BONUS_BLKID; 2493ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2494adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2495ea8dc4b6Seschrock /* the bonus dbuf is not placed in the hash table */ 24965a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2497ea8dc4b6Seschrock return (db); 24980a586ceaSMark Shellenbaum } else if (blkid == DMU_SPILL_BLKID) { 24990a586ceaSMark Shellenbaum db->db.db_size = (blkptr != NULL) ? 25000a586ceaSMark Shellenbaum BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 25010a586ceaSMark Shellenbaum db->db.db_offset = 0; 2502fa9e4066Sahrens } else { 2503fa9e4066Sahrens int blocksize = 2504fa9e4066Sahrens db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2505fa9e4066Sahrens db->db.db_size = blocksize; 2506fa9e4066Sahrens db->db.db_offset = db->db_blkid * blocksize; 2507fa9e4066Sahrens } 2508fa9e4066Sahrens 2509fa9e4066Sahrens /* 2510fa9e4066Sahrens * Hold the dn_dbufs_mtx while we get the new dbuf 2511fa9e4066Sahrens * in the hash table *and* added to the dbufs list. 2512fa9e4066Sahrens * This prevents a possible deadlock with someone 2513fa9e4066Sahrens * trying to look up this dbuf before its added to the 2514fa9e4066Sahrens * dn_dbufs list. 2515fa9e4066Sahrens */ 2516fa9e4066Sahrens mutex_enter(&dn->dn_dbufs_mtx); 2517ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2518fa9e4066Sahrens if ((odb = dbuf_hash_insert(db)) != NULL) { 2519fa9e4066Sahrens /* someone else inserted it first */ 2520dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2521fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 2522fa9e4066Sahrens return (odb); 2523fa9e4066Sahrens } 25240f6d88adSAlex Reece avl_add(&dn->dn_dbufs, db); 2525653af1b8SStephen Blinick 2526ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2527adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2528fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 25295a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2530fa9e4066Sahrens 2531fa9e4066Sahrens if (parent && parent != dn->dn_dbuf) 2532fa9e4066Sahrens dbuf_add_ref(parent, db); 2533fa9e4066Sahrens 2534ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2535e914ace2STim Schumacher zfs_refcount_count(&dn->dn_holds) > 0); 2536e914ace2STim Schumacher (void) zfs_refcount_add(&dn->dn_holds, db); 2537640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 2538fa9e4066Sahrens 2539fa9e4066Sahrens dprintf_dbuf(db, "db=%p\n", db); 2540fa9e4066Sahrens 2541fa9e4066Sahrens return (db); 2542fa9e4066Sahrens } 2543fa9e4066Sahrens 2544a2cdcdd2SPaul Dagnelie typedef struct dbuf_prefetch_arg { 2545a2cdcdd2SPaul Dagnelie spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2546a2cdcdd2SPaul Dagnelie zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2547a2cdcdd2SPaul Dagnelie int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2548a2cdcdd2SPaul Dagnelie int dpa_curlevel; /* The current level that we're reading */ 2549dcbf3bd6SGeorge Wilson dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2550a2cdcdd2SPaul Dagnelie zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2551a2cdcdd2SPaul Dagnelie zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2552a2cdcdd2SPaul Dagnelie arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2553a2cdcdd2SPaul Dagnelie } dbuf_prefetch_arg_t; 2554a2cdcdd2SPaul Dagnelie 2555a2cdcdd2SPaul Dagnelie /* 2556a2cdcdd2SPaul Dagnelie * Actually issue the prefetch read for the block given. 2557a2cdcdd2SPaul Dagnelie */ 2558a2cdcdd2SPaul Dagnelie static void 2559a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2560fa9e4066Sahrens { 2561a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2562a2cdcdd2SPaul Dagnelie return; 2563a2cdcdd2SPaul Dagnelie 2564eb633035STom Caputi int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; 2565a2cdcdd2SPaul Dagnelie arc_flags_t aflags = 2566a2cdcdd2SPaul Dagnelie dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2567a2cdcdd2SPaul Dagnelie 2568eb633035STom Caputi /* dnodes are always read as raw and then converted later */ 2569eb633035STom Caputi if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) && 2570eb633035STom Caputi dpa->dpa_curlevel == 0) 2571eb633035STom Caputi zio_flags |= ZIO_FLAG_RAW; 2572eb633035STom Caputi 2573a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2574a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2575a2cdcdd2SPaul Dagnelie ASSERT(dpa->dpa_zio != NULL); 2576a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2577eb633035STom Caputi dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb); 2578a2cdcdd2SPaul Dagnelie } 2579a2cdcdd2SPaul Dagnelie 2580a2cdcdd2SPaul Dagnelie /* 2581a2cdcdd2SPaul Dagnelie * Called when an indirect block above our prefetch target is read in. This 2582a2cdcdd2SPaul Dagnelie * will either read in the next indirect block down the tree or issue the actual 2583a2cdcdd2SPaul Dagnelie * prefetch if the next block down is our target. 2584a2cdcdd2SPaul Dagnelie */ 2585eb633035STom Caputi /* ARGSUSED */ 2586a2cdcdd2SPaul Dagnelie static void 2587a3874b8bSToomas Soome dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, 2588a3874b8bSToomas Soome const blkptr_t *iobp, arc_buf_t *abuf, void *private) 2589a2cdcdd2SPaul Dagnelie { 2590a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = private; 2591a2cdcdd2SPaul Dagnelie 2592a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2593a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_curlevel, >, 0); 2594dcbf3bd6SGeorge Wilson 2595fa98e487SMatthew Ahrens if (abuf == NULL) { 2596fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error != 0); 2597fa98e487SMatthew Ahrens kmem_free(dpa, sizeof (*dpa)); 2598fa98e487SMatthew Ahrens return; 2599fa98e487SMatthew Ahrens } 2600fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 2601fa98e487SMatthew Ahrens 2602dcbf3bd6SGeorge Wilson /* 2603dcbf3bd6SGeorge Wilson * The dpa_dnode is only valid if we are called with a NULL 2604dcbf3bd6SGeorge Wilson * zio. This indicates that the arc_read() returned without 2605dcbf3bd6SGeorge Wilson * first calling zio_read() to issue a physical read. Once 2606dcbf3bd6SGeorge Wilson * a physical read is made the dpa_dnode must be invalidated 2607dcbf3bd6SGeorge Wilson * as the locks guarding it may have been dropped. If the 2608dcbf3bd6SGeorge Wilson * dpa_dnode is still valid, then we want to add it to the dbuf 2609dcbf3bd6SGeorge Wilson * cache. To do so, we must hold the dbuf associated with the block 2610dcbf3bd6SGeorge Wilson * we just prefetched, read its contents so that we associate it 2611dcbf3bd6SGeorge Wilson * with an arc_buf_t, and then release it. 2612dcbf3bd6SGeorge Wilson */ 2613a2cdcdd2SPaul Dagnelie if (zio != NULL) { 2614a2cdcdd2SPaul Dagnelie ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2615eb633035STom Caputi if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) { 2616dcbf3bd6SGeorge Wilson ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2617dcbf3bd6SGeorge Wilson } else { 2618a2cdcdd2SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2619dcbf3bd6SGeorge Wilson } 2620a2cdcdd2SPaul Dagnelie ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2621dcbf3bd6SGeorge Wilson 2622dcbf3bd6SGeorge Wilson dpa->dpa_dnode = NULL; 2623dcbf3bd6SGeorge Wilson } else if (dpa->dpa_dnode != NULL) { 2624dcbf3bd6SGeorge Wilson uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2625dcbf3bd6SGeorge Wilson (dpa->dpa_epbs * (dpa->dpa_curlevel - 2626dcbf3bd6SGeorge Wilson dpa->dpa_zb.zb_level)); 2627dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2628dcbf3bd6SGeorge Wilson dpa->dpa_curlevel, curblkid, FTAG); 2629dcbf3bd6SGeorge Wilson (void) dbuf_read(db, NULL, 2630dcbf3bd6SGeorge Wilson DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2631dcbf3bd6SGeorge Wilson dbuf_rele(db, FTAG); 2632a2cdcdd2SPaul Dagnelie } 2633a2cdcdd2SPaul Dagnelie 2634a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel--; 2635a2cdcdd2SPaul Dagnelie uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2636a2cdcdd2SPaul Dagnelie (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2637a2cdcdd2SPaul Dagnelie blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2638a2cdcdd2SPaul Dagnelie P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2639a3874b8bSToomas Soome 2640fa98e487SMatthew Ahrens if (BP_IS_HOLE(bp)) { 2641a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2642a2cdcdd2SPaul Dagnelie } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2643a2cdcdd2SPaul Dagnelie ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2644a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, bp); 2645a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2646a2cdcdd2SPaul Dagnelie } else { 2647a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2648a2cdcdd2SPaul Dagnelie zbookmark_phys_t zb; 2649a2cdcdd2SPaul Dagnelie 265027295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 265127295216Sbenrubson if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 265227295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 265327295216Sbenrubson 2654a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2655a2cdcdd2SPaul Dagnelie 2656a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2657a2cdcdd2SPaul Dagnelie dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2658a2cdcdd2SPaul Dagnelie 2659a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2660a2cdcdd2SPaul Dagnelie bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2661a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2662a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2663a2cdcdd2SPaul Dagnelie } 2664dcbf3bd6SGeorge Wilson 2665dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, private); 2666a2cdcdd2SPaul Dagnelie } 2667a2cdcdd2SPaul Dagnelie 2668a2cdcdd2SPaul Dagnelie /* 2669a2cdcdd2SPaul Dagnelie * Issue prefetch reads for the given block on the given level. If the indirect 2670a2cdcdd2SPaul Dagnelie * blocks above that block are not in memory, we will read them in 2671a2cdcdd2SPaul Dagnelie * asynchronously. As a result, this call never blocks waiting for a read to 2672eb633035STom Caputi * complete. Note that the prefetch might fail if the dataset is encrypted and 2673eb633035STom Caputi * the encryption key is unmapped before the IO completes. 2674a2cdcdd2SPaul Dagnelie */ 2675a2cdcdd2SPaul Dagnelie void 2676a2cdcdd2SPaul Dagnelie dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2677a2cdcdd2SPaul Dagnelie arc_flags_t aflags) 2678a2cdcdd2SPaul Dagnelie { 2679a2cdcdd2SPaul Dagnelie blkptr_t bp; 2680a2cdcdd2SPaul Dagnelie int epbs, nlevels, curlevel; 2681a2cdcdd2SPaul Dagnelie uint64_t curblkid; 2682fa9e4066Sahrens 26830a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2684fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2685fa9e4066Sahrens 2686cf6106c8SMatthew Ahrens if (blkid > dn->dn_maxblkid) 2687cf6106c8SMatthew Ahrens return; 2688cf6106c8SMatthew Ahrens 2689fa9e4066Sahrens if (dnode_block_freed(dn, blkid)) 2690fa9e4066Sahrens return; 2691fa9e4066Sahrens 2692fa9e4066Sahrens /* 2693a2cdcdd2SPaul Dagnelie * This dnode hasn't been written to disk yet, so there's nothing to 2694a2cdcdd2SPaul Dagnelie * prefetch. 2695fa9e4066Sahrens */ 2696a2cdcdd2SPaul Dagnelie nlevels = dn->dn_phys->dn_nlevels; 2697a2cdcdd2SPaul Dagnelie if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2698a2cdcdd2SPaul Dagnelie return; 2699a2cdcdd2SPaul Dagnelie 2700a2cdcdd2SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2701a2cdcdd2SPaul Dagnelie if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2702a2cdcdd2SPaul Dagnelie return; 2703a2cdcdd2SPaul Dagnelie 2704a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2705a2cdcdd2SPaul Dagnelie level, blkid); 2706a2cdcdd2SPaul Dagnelie if (db != NULL) { 2707fa9e4066Sahrens mutex_exit(&db->db_mtx); 2708a2cdcdd2SPaul Dagnelie /* 2709a2cdcdd2SPaul Dagnelie * This dbuf already exists. It is either CACHED, or 2710a2cdcdd2SPaul Dagnelie * (we assume) about to be read or filled. 2711a2cdcdd2SPaul Dagnelie */ 2712fa9e4066Sahrens return; 2713fa9e4066Sahrens } 2714fa9e4066Sahrens 2715a2cdcdd2SPaul Dagnelie /* 2716a2cdcdd2SPaul Dagnelie * Find the closest ancestor (indirect block) of the target block 2717a2cdcdd2SPaul Dagnelie * that is present in the cache. In this indirect block, we will 2718a2cdcdd2SPaul Dagnelie * find the bp that is at curlevel, curblkid. 2719a2cdcdd2SPaul Dagnelie */ 2720a2cdcdd2SPaul Dagnelie curlevel = level; 2721a2cdcdd2SPaul Dagnelie curblkid = blkid; 2722a2cdcdd2SPaul Dagnelie while (curlevel < nlevels - 1) { 2723a2cdcdd2SPaul Dagnelie int parent_level = curlevel + 1; 2724a2cdcdd2SPaul Dagnelie uint64_t parent_blkid = curblkid >> epbs; 2725a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db; 2726a2cdcdd2SPaul Dagnelie 2727a2cdcdd2SPaul Dagnelie if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2728a2cdcdd2SPaul Dagnelie FALSE, TRUE, FTAG, &db) == 0) { 2729a2cdcdd2SPaul Dagnelie blkptr_t *bpp = db->db_buf->b_data; 2730a2cdcdd2SPaul Dagnelie bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2731a2cdcdd2SPaul Dagnelie dbuf_rele(db, FTAG); 2732a2cdcdd2SPaul Dagnelie break; 2733a2cdcdd2SPaul Dagnelie } 2734a2cdcdd2SPaul Dagnelie 2735a2cdcdd2SPaul Dagnelie curlevel = parent_level; 2736a2cdcdd2SPaul Dagnelie curblkid = parent_blkid; 2737a2cdcdd2SPaul Dagnelie } 2738a2cdcdd2SPaul Dagnelie 2739a2cdcdd2SPaul Dagnelie if (curlevel == nlevels - 1) { 2740a2cdcdd2SPaul Dagnelie /* No cached indirect blocks found. */ 2741a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2742a2cdcdd2SPaul Dagnelie bp = dn->dn_phys->dn_blkptr[curblkid]; 2743a2cdcdd2SPaul Dagnelie } 2744a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(&bp)) 2745a2cdcdd2SPaul Dagnelie return; 2746a2cdcdd2SPaul Dagnelie 2747a2cdcdd2SPaul Dagnelie ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2748a2cdcdd2SPaul Dagnelie 2749a2cdcdd2SPaul Dagnelie zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2750a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL); 2751a2cdcdd2SPaul Dagnelie 2752a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2753b24ab676SJeff Bonwick dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2754a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2755a2cdcdd2SPaul Dagnelie dn->dn_object, level, blkid); 2756a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel = curlevel; 2757a2cdcdd2SPaul Dagnelie dpa->dpa_prio = prio; 2758a2cdcdd2SPaul Dagnelie dpa->dpa_aflags = aflags; 2759a2cdcdd2SPaul Dagnelie dpa->dpa_spa = dn->dn_objset->os_spa; 2760dcbf3bd6SGeorge Wilson dpa->dpa_dnode = dn; 2761a2cdcdd2SPaul Dagnelie dpa->dpa_epbs = epbs; 2762a2cdcdd2SPaul Dagnelie dpa->dpa_zio = pio; 2763a2cdcdd2SPaul Dagnelie 276427295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 276527295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 276627295216Sbenrubson dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 276727295216Sbenrubson 2768a2cdcdd2SPaul Dagnelie /* 2769a2cdcdd2SPaul Dagnelie * If we have the indirect just above us, no need to do the asynchronous 2770a2cdcdd2SPaul Dagnelie * prefetch chain; we'll just run the last step ourselves. If we're at 2771a2cdcdd2SPaul Dagnelie * a higher level, though, we want to issue the prefetches for all the 2772a2cdcdd2SPaul Dagnelie * indirect blocks asynchronously, so we can go on with whatever we were 2773a2cdcdd2SPaul Dagnelie * doing. 2774a2cdcdd2SPaul Dagnelie */ 2775a2cdcdd2SPaul Dagnelie if (curlevel == level) { 2776a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, ==, blkid); 2777a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, &bp); 2778a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2779a2cdcdd2SPaul Dagnelie } else { 2780a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 27817802d7bfSMatthew Ahrens zbookmark_phys_t zb; 2782b24ab676SJeff Bonwick 278327295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 278427295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 278527295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 278627295216Sbenrubson 2787a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2788a2cdcdd2SPaul Dagnelie dn->dn_object, curlevel, curblkid); 2789a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2790a2cdcdd2SPaul Dagnelie &bp, dbuf_prefetch_indirect_done, dpa, prio, 2791fa9e4066Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2792a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2793fa9e4066Sahrens } 2794a2cdcdd2SPaul Dagnelie /* 2795a2cdcdd2SPaul Dagnelie * We use pio here instead of dpa_zio since it's possible that 2796a2cdcdd2SPaul Dagnelie * dpa may have already been freed. 2797a2cdcdd2SPaul Dagnelie */ 2798a2cdcdd2SPaul Dagnelie zio_nowait(pio); 2799fa9e4066Sahrens } 2800fa9e4066Sahrens 2801fa9e4066Sahrens /* 2802eb633035STom Caputi * Helper function for __dbuf_hold_impl() to copy a buffer. Handles 2803eb633035STom Caputi * the case of encrypted, compressed and uncompressed buffers by 2804eb633035STom Caputi * allocating the new buffer, respectively, with arc_alloc_raw_buf(), 2805eb633035STom Caputi * arc_alloc_compressed_buf() or arc_alloc_buf().* 2806eb633035STom Caputi * 2807eb633035STom Caputi * NOTE: Declared noinline to avoid stack bloat in __dbuf_hold_impl(). 2808eb633035STom Caputi */ 2809eb633035STom Caputi static void 2810eb633035STom Caputi dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db, dbuf_dirty_record_t *dr) 2811eb633035STom Caputi { 2812eb633035STom Caputi arc_buf_t *data = dr->dt.dl.dr_data; 2813eb633035STom Caputi enum zio_compress compress_type = arc_get_compression(data); 2814eb633035STom Caputi 2815eb633035STom Caputi if (arc_is_encrypted(data)) { 2816eb633035STom Caputi boolean_t byteorder; 2817eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 2818eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 2819eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 2820eb633035STom Caputi 2821eb633035STom Caputi arc_get_raw_params(data, &byteorder, salt, iv, mac); 2822eb633035STom Caputi dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db, 2823eb633035STom Caputi dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac, 2824eb633035STom Caputi dn->dn_type, arc_buf_size(data), arc_buf_lsize(data), 2825eb633035STom Caputi compress_type)); 2826eb633035STom Caputi } else if (compress_type != ZIO_COMPRESS_OFF) { 2827eb633035STom Caputi dbuf_set_data(db, arc_alloc_compressed_buf( 2828eb633035STom Caputi dn->dn_objset->os_spa, db, arc_buf_size(data), 2829eb633035STom Caputi arc_buf_lsize(data), compress_type)); 2830eb633035STom Caputi } else { 2831eb633035STom Caputi dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db, 2832eb633035STom Caputi DBUF_GET_BUFC_TYPE(db), db->db.db_size)); 2833eb633035STom Caputi } 2834eb633035STom Caputi 2835eb633035STom Caputi bcopy(data->b_data, db->db.db_data, arc_buf_size(data)); 2836eb633035STom Caputi } 2837eb633035STom Caputi 2838eb633035STom Caputi /* 2839fa9e4066Sahrens * Returns with db_holds incremented, and db_mtx not held. 2840fa9e4066Sahrens * Note: dn_struct_rwlock must be held. 2841fa9e4066Sahrens */ 2842fa9e4066Sahrens int 2843a2cdcdd2SPaul Dagnelie dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2844a2cdcdd2SPaul Dagnelie boolean_t fail_sparse, boolean_t fail_uncached, 2845fa9e4066Sahrens void *tag, dmu_buf_impl_t **dbp) 2846fa9e4066Sahrens { 2847fa9e4066Sahrens dmu_buf_impl_t *db, *parent = NULL; 2848fa9e4066Sahrens 28490a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2850fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2851fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, level); 2852fa9e4066Sahrens 2853fa9e4066Sahrens *dbp = NULL; 2854ea8dc4b6Seschrock top: 2855fa9e4066Sahrens /* dbuf_find() returns with db_mtx held */ 2856e57a022bSJustin T. Gibbs db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2857fa9e4066Sahrens 2858fa9e4066Sahrens if (db == NULL) { 2859fa9e4066Sahrens blkptr_t *bp = NULL; 2860fa9e4066Sahrens int err; 2861fa9e4066Sahrens 2862a2cdcdd2SPaul Dagnelie if (fail_uncached) 2863a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2864a2cdcdd2SPaul Dagnelie 2865c543ec06Sahrens ASSERT3P(parent, ==, NULL); 2866fa9e4066Sahrens err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2867fa9e4066Sahrens if (fail_sparse) { 2868fa9e4066Sahrens if (err == 0 && bp && BP_IS_HOLE(bp)) 2869be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 2870fa9e4066Sahrens if (err) { 2871c543ec06Sahrens if (parent) 2872ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2873fa9e4066Sahrens return (err); 2874fa9e4066Sahrens } 2875fa9e4066Sahrens } 2876ea8dc4b6Seschrock if (err && err != ENOENT) 2877ea8dc4b6Seschrock return (err); 2878fa9e4066Sahrens db = dbuf_create(dn, level, blkid, parent, bp); 2879fa9e4066Sahrens } 2880fa9e4066Sahrens 2881a2cdcdd2SPaul Dagnelie if (fail_uncached && db->db_state != DB_CACHED) { 2882a2cdcdd2SPaul Dagnelie mutex_exit(&db->db_mtx); 2883a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2884a2cdcdd2SPaul Dagnelie } 2885a2cdcdd2SPaul Dagnelie 28867b38fab6SAlexander Motin if (db->db_buf != NULL) { 28877b38fab6SAlexander Motin arc_buf_access(db->db_buf); 2888ea8dc4b6Seschrock ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 28897b38fab6SAlexander Motin } 2890ea8dc4b6Seschrock 2891ea8dc4b6Seschrock ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2892ea8dc4b6Seschrock 2893fa9e4066Sahrens /* 2894c717a561Smaybee * If this buffer is currently syncing out, and we are are 2895c717a561Smaybee * still referencing it from db_data, we need to make a copy 2896c717a561Smaybee * of it in case we decide we want to dirty it again in this txg. 2897fa9e4066Sahrens */ 28980a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2899ea8dc4b6Seschrock dn->dn_object != DMU_META_DNODE_OBJECT && 2900c717a561Smaybee db->db_state == DB_CACHED && db->db_data_pending) { 2901c717a561Smaybee dbuf_dirty_record_t *dr = db->db_data_pending; 2902eb633035STom Caputi if (dr->dt.dl.dr_data == db->db_buf) 2903eb633035STom Caputi dbuf_hold_copy(dn, db, dr); 2904c717a561Smaybee } 2905fa9e4066Sahrens 2906dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2907e914ace2STim Schumacher ASSERT(zfs_refcount_is_zero(&db->db_holds)); 2908adb52d92SMatthew Ahrens ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2909adb52d92SMatthew Ahrens db->db_caching_status == DB_DBUF_METADATA_CACHE); 2910adb52d92SMatthew Ahrens 2911adb52d92SMatthew Ahrens multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2912e914ace2STim Schumacher (void) zfs_refcount_remove_many( 2913adb52d92SMatthew Ahrens &dbuf_caches[db->db_caching_status].size, 2914dcbf3bd6SGeorge Wilson db->db.db_size, db); 2915adb52d92SMatthew Ahrens 2916adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2917dcbf3bd6SGeorge Wilson } 2918e914ace2STim Schumacher (void) zfs_refcount_add(&db->db_holds, tag); 29199c9dc39aSek110237 DBUF_VERIFY(db); 2920fa9e4066Sahrens mutex_exit(&db->db_mtx); 2921fa9e4066Sahrens 2922fa9e4066Sahrens /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2923c543ec06Sahrens if (parent) 2924ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2925fa9e4066Sahrens 2926744947dcSTom Erickson ASSERT3P(DB_DNODE(db), ==, dn); 2927fa9e4066Sahrens ASSERT3U(db->db_blkid, ==, blkid); 2928fa9e4066Sahrens ASSERT3U(db->db_level, ==, level); 2929fa9e4066Sahrens *dbp = db; 2930fa9e4066Sahrens 2931fa9e4066Sahrens return (0); 2932fa9e4066Sahrens } 2933fa9e4066Sahrens 2934fa9e4066Sahrens dmu_buf_impl_t * 2935ea8dc4b6Seschrock dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2936fa9e4066Sahrens { 2937a2cdcdd2SPaul Dagnelie return (dbuf_hold_level(dn, 0, blkid, tag)); 2938fa9e4066Sahrens } 2939fa9e4066Sahrens 2940fa9e4066Sahrens dmu_buf_impl_t * 2941fa9e4066Sahrens dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2942fa9e4066Sahrens { 2943fa9e4066Sahrens dmu_buf_impl_t *db; 2944a2cdcdd2SPaul Dagnelie int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2945ea8dc4b6Seschrock return (err ? NULL : db); 2946fa9e4066Sahrens } 2947fa9e4066Sahrens 29481934e92fSmaybee void 2949ea8dc4b6Seschrock dbuf_create_bonus(dnode_t *dn) 2950fa9e4066Sahrens { 2951ea8dc4b6Seschrock ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2952ea8dc4b6Seschrock 2953ea8dc4b6Seschrock ASSERT(dn->dn_bonus == NULL); 29540a586ceaSMark Shellenbaum dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 29550a586ceaSMark Shellenbaum } 29560a586ceaSMark Shellenbaum 29570a586ceaSMark Shellenbaum int 29580a586ceaSMark Shellenbaum dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 29590a586ceaSMark Shellenbaum { 29600a586ceaSMark Shellenbaum dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2961744947dcSTom Erickson dnode_t *dn; 2962744947dcSTom Erickson 29630a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 2964be6fd75aSMatthew Ahrens return (SET_ERROR(ENOTSUP)); 29650a586ceaSMark Shellenbaum if (blksz == 0) 29660a586ceaSMark Shellenbaum blksz = SPA_MINBLOCKSIZE; 2967b5152584SMatthew Ahrens ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 29680a586ceaSMark Shellenbaum blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 29690a586ceaSMark Shellenbaum 2970744947dcSTom Erickson DB_DNODE_ENTER(db); 2971744947dcSTom Erickson dn = DB_DNODE(db); 2972744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 29730a586ceaSMark Shellenbaum dbuf_new_size(db, blksz, tx); 2974744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 2975744947dcSTom Erickson DB_DNODE_EXIT(db); 29760a586ceaSMark Shellenbaum 29770a586ceaSMark Shellenbaum return (0); 29780a586ceaSMark Shellenbaum } 29790a586ceaSMark Shellenbaum 29800a586ceaSMark Shellenbaum void 29810a586ceaSMark Shellenbaum dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 29820a586ceaSMark Shellenbaum { 29830a586ceaSMark Shellenbaum dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2984fa9e4066Sahrens } 2985fa9e4066Sahrens 2986ea8dc4b6Seschrock #pragma weak dmu_buf_add_ref = dbuf_add_ref 2987fa9e4066Sahrens void 2988fa9e4066Sahrens dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2989fa9e4066Sahrens { 2990e914ace2STim Schumacher int64_t holds = zfs_refcount_add(&db->db_holds, tag); 2991dcbf3bd6SGeorge Wilson ASSERT3S(holds, >, 1); 2992fa9e4066Sahrens } 2993fa9e4066Sahrens 2994e57a022bSJustin T. Gibbs #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2995e57a022bSJustin T. Gibbs boolean_t 2996e57a022bSJustin T. Gibbs dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2997e57a022bSJustin T. Gibbs void *tag) 2998e57a022bSJustin T. Gibbs { 2999e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3000e57a022bSJustin T. Gibbs dmu_buf_impl_t *found_db; 3001e57a022bSJustin T. Gibbs boolean_t result = B_FALSE; 3002e57a022bSJustin T. Gibbs 3003e57a022bSJustin T. Gibbs if (db->db_blkid == DMU_BONUS_BLKID) 3004e57a022bSJustin T. Gibbs found_db = dbuf_find_bonus(os, obj); 3005e57a022bSJustin T. Gibbs else 3006e57a022bSJustin T. Gibbs found_db = dbuf_find(os, obj, 0, blkid); 3007e57a022bSJustin T. Gibbs 3008e57a022bSJustin T. Gibbs if (found_db != NULL) { 3009e57a022bSJustin T. Gibbs if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 3010e914ace2STim Schumacher (void) zfs_refcount_add(&db->db_holds, tag); 3011e57a022bSJustin T. Gibbs result = B_TRUE; 3012e57a022bSJustin T. Gibbs } 3013e57a022bSJustin T. Gibbs mutex_exit(&db->db_mtx); 3014e57a022bSJustin T. Gibbs } 3015e57a022bSJustin T. Gibbs return (result); 3016e57a022bSJustin T. Gibbs } 3017e57a022bSJustin T. Gibbs 3018744947dcSTom Erickson /* 3019744947dcSTom Erickson * If you call dbuf_rele() you had better not be referencing the dnode handle 3020744947dcSTom Erickson * unless you have some other direct or indirect hold on the dnode. (An indirect 3021744947dcSTom Erickson * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 3022744947dcSTom Erickson * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 3023744947dcSTom Erickson * dnode's parent dbuf evicting its dnode handles. 3024744947dcSTom Erickson */ 3025fa9e4066Sahrens void 3026ea8dc4b6Seschrock dbuf_rele(dmu_buf_impl_t *db, void *tag) 3027fa9e4066Sahrens { 3028b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3029c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, tag, B_FALSE); 3030b24ab676SJeff Bonwick } 3031b24ab676SJeff Bonwick 303243466aaeSMax Grossman void 303343466aaeSMax Grossman dmu_buf_rele(dmu_buf_t *db, void *tag) 303443466aaeSMax Grossman { 303543466aaeSMax Grossman dbuf_rele((dmu_buf_impl_t *)db, tag); 303643466aaeSMax Grossman } 303743466aaeSMax Grossman 3038b24ab676SJeff Bonwick /* 3039b24ab676SJeff Bonwick * dbuf_rele() for an already-locked dbuf. This is necessary to allow 3040c2919acbSMatthew Ahrens * db_dirtycnt and db_holds to be updated atomically. The 'evicting' 3041c2919acbSMatthew Ahrens * argument should be set if we are already in the dbuf-evicting code 3042c2919acbSMatthew Ahrens * path, in which case we don't want to recursively evict. This allows us to 3043c2919acbSMatthew Ahrens * avoid deeply nested stacks that would have a call flow similar to this: 3044c2919acbSMatthew Ahrens * 3045c2919acbSMatthew Ahrens * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 3046c2919acbSMatthew Ahrens * ^ | 3047c2919acbSMatthew Ahrens * | | 3048c2919acbSMatthew Ahrens * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 3049c2919acbSMatthew Ahrens * 3050b24ab676SJeff Bonwick */ 3051b24ab676SJeff Bonwick void 3052c2919acbSMatthew Ahrens dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting) 3053b24ab676SJeff Bonwick { 3054fa9e4066Sahrens int64_t holds; 3055fa9e4066Sahrens 3056b24ab676SJeff Bonwick ASSERT(MUTEX_HELD(&db->db_mtx)); 30579c9dc39aSek110237 DBUF_VERIFY(db); 3058fa9e4066Sahrens 3059744947dcSTom Erickson /* 3060744947dcSTom Erickson * Remove the reference to the dbuf before removing its hold on the 3061744947dcSTom Erickson * dnode so we can guarantee in dnode_move() that a referenced bonus 3062744947dcSTom Erickson * buffer has a corresponding dnode hold. 3063744947dcSTom Erickson */ 3064e914ace2STim Schumacher holds = zfs_refcount_remove(&db->db_holds, tag); 3065ea8dc4b6Seschrock ASSERT(holds >= 0); 3066fa9e4066Sahrens 3067c717a561Smaybee /* 3068c717a561Smaybee * We can't freeze indirects if there is a possibility that they 3069c717a561Smaybee * may be modified in the current syncing context. 3070c717a561Smaybee */ 3071dcbf3bd6SGeorge Wilson if (db->db_buf != NULL && 3072dcbf3bd6SGeorge Wilson holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 30736b4acc8bSahrens arc_buf_freeze(db->db_buf); 3074dcbf3bd6SGeorge Wilson } 30756b4acc8bSahrens 3076fa9e4066Sahrens if (holds == db->db_dirtycnt && 3077d2058105SJustin T. Gibbs db->db_level == 0 && db->db_user_immediate_evict) 3078fa9e4066Sahrens dbuf_evict_user(db); 3079ea8dc4b6Seschrock 3080ea8dc4b6Seschrock if (holds == 0) { 30810a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 3082cd485b49SJustin T. Gibbs dnode_t *dn; 3083d2058105SJustin T. Gibbs boolean_t evict_dbuf = db->db_pending_evict; 3084cd485b49SJustin T. Gibbs 3085cd485b49SJustin T. Gibbs /* 3086cd485b49SJustin T. Gibbs * If the dnode moves here, we cannot cross this 3087cd485b49SJustin T. Gibbs * barrier until the move completes. 3088cd485b49SJustin T. Gibbs */ 3089cd485b49SJustin T. Gibbs DB_DNODE_ENTER(db); 3090cd485b49SJustin T. Gibbs 3091cd485b49SJustin T. Gibbs dn = DB_DNODE(db); 3092cd485b49SJustin T. Gibbs atomic_dec_32(&dn->dn_dbufs_count); 3093cd485b49SJustin T. Gibbs 3094cd485b49SJustin T. Gibbs /* 3095cd485b49SJustin T. Gibbs * Decrementing the dbuf count means that the bonus 3096cd485b49SJustin T. Gibbs * buffer's dnode hold is no longer discounted in 3097cd485b49SJustin T. Gibbs * dnode_move(). The dnode cannot move until after 3098d2058105SJustin T. Gibbs * the dnode_rele() below. 3099cd485b49SJustin T. Gibbs */ 3100cd485b49SJustin T. Gibbs DB_DNODE_EXIT(db); 3101cd485b49SJustin T. Gibbs 3102cd485b49SJustin T. Gibbs /* 3103cd485b49SJustin T. Gibbs * Do not reference db after its lock is dropped. 3104cd485b49SJustin T. Gibbs * Another thread may evict it. 3105cd485b49SJustin T. Gibbs */ 3106ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 3107744947dcSTom Erickson 3108d2058105SJustin T. Gibbs if (evict_dbuf) 3109cd485b49SJustin T. Gibbs dnode_evict_bonus(dn); 3110d2058105SJustin T. Gibbs 3111d2058105SJustin T. Gibbs dnode_rele(dn, db); 3112ea8dc4b6Seschrock } else if (db->db_buf == NULL) { 3113ea8dc4b6Seschrock /* 3114ea8dc4b6Seschrock * This is a special case: we never associated this 3115ea8dc4b6Seschrock * dbuf with any data allocated from the ARC. 3116ea8dc4b6Seschrock */ 311782c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || 311882c9918fSTim Haley db->db_state == DB_NOFILL); 3119dcbf3bd6SGeorge Wilson dbuf_destroy(db); 3120ea8dc4b6Seschrock } else if (arc_released(db->db_buf)) { 3121ea8dc4b6Seschrock /* 3122ea8dc4b6Seschrock * This dbuf has anonymous data associated with it. 3123ea8dc4b6Seschrock */ 3124dcbf3bd6SGeorge Wilson dbuf_destroy(db); 3125ea8dc4b6Seschrock } else { 3126dcbf3bd6SGeorge Wilson boolean_t do_arc_evict = B_FALSE; 3127dcbf3bd6SGeorge Wilson blkptr_t bp; 3128dcbf3bd6SGeorge Wilson spa_t *spa = dmu_objset_spa(db->db_objset); 31299253d63dSGeorge Wilson 3130dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) && 3131dcbf3bd6SGeorge Wilson db->db_blkptr != NULL && 3132bbfa8ea8SMatthew Ahrens !BP_IS_HOLE(db->db_blkptr) && 3133bbfa8ea8SMatthew Ahrens !BP_IS_EMBEDDED(db->db_blkptr)) { 3134dcbf3bd6SGeorge Wilson do_arc_evict = B_TRUE; 3135dcbf3bd6SGeorge Wilson bp = *db->db_blkptr; 3136dcbf3bd6SGeorge Wilson } 3137dcbf3bd6SGeorge Wilson 3138dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) || 3139dcbf3bd6SGeorge Wilson db->db_pending_evict) { 3140dcbf3bd6SGeorge Wilson dbuf_destroy(db); 3141dcbf3bd6SGeorge Wilson } else if (!multilist_link_active(&db->db_cache_link)) { 3142adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, 3143adb52d92SMatthew Ahrens DB_NO_CACHE); 3144adb52d92SMatthew Ahrens 3145adb52d92SMatthew Ahrens dbuf_cached_state_t dcs = 3146adb52d92SMatthew Ahrens dbuf_include_in_metadata_cache(db) ? 3147adb52d92SMatthew Ahrens DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 3148adb52d92SMatthew Ahrens db->db_caching_status = dcs; 3149adb52d92SMatthew Ahrens 3150adb52d92SMatthew Ahrens multilist_insert(dbuf_caches[dcs].cache, db); 3151e914ace2STim Schumacher (void) zfs_refcount_add_many( 3152e914ace2STim Schumacher &dbuf_caches[dcs].size, db->db.db_size, db); 3153dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 3154dcbf3bd6SGeorge Wilson 3155c2919acbSMatthew Ahrens if (db->db_caching_status == DB_DBUF_CACHE && 3156c2919acbSMatthew Ahrens !evicting) { 3157dcbf3bd6SGeorge Wilson dbuf_evict_notify(); 3158dcbf3bd6SGeorge Wilson } 3159adb52d92SMatthew Ahrens } 3160dcbf3bd6SGeorge Wilson 3161dcbf3bd6SGeorge Wilson if (do_arc_evict) 3162bbfa8ea8SMatthew Ahrens arc_freed(spa, &bp); 3163bbfa8ea8SMatthew Ahrens } 3164ea8dc4b6Seschrock } else { 3165ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 3166fa9e4066Sahrens } 3167dcbf3bd6SGeorge Wilson 3168fa9e4066Sahrens } 3169fa9e4066Sahrens 3170fa9e4066Sahrens #pragma weak dmu_buf_refcount = dbuf_refcount 3171fa9e4066Sahrens uint64_t 3172fa9e4066Sahrens dbuf_refcount(dmu_buf_impl_t *db) 3173fa9e4066Sahrens { 3174e914ace2STim Schumacher return (zfs_refcount_count(&db->db_holds)); 3175fa9e4066Sahrens } 3176fa9e4066Sahrens 3177eb633035STom Caputi uint64_t 3178eb633035STom Caputi dmu_buf_user_refcount(dmu_buf_t *db_fake) 3179eb633035STom Caputi { 3180eb633035STom Caputi uint64_t holds; 3181eb633035STom Caputi dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3182eb633035STom Caputi 3183eb633035STom Caputi mutex_enter(&db->db_mtx); 3184eb633035STom Caputi ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt); 3185eb633035STom Caputi holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt; 3186eb633035STom Caputi mutex_exit(&db->db_mtx); 3187eb633035STom Caputi 3188eb633035STom Caputi return (holds); 3189eb633035STom Caputi } 3190eb633035STom Caputi 3191fa9e4066Sahrens void * 3192bc9014e6SJustin Gibbs dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 3193bc9014e6SJustin Gibbs dmu_buf_user_t *new_user) 3194fa9e4066Sahrens { 3195bc9014e6SJustin Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3196bc9014e6SJustin Gibbs 3197bc9014e6SJustin Gibbs mutex_enter(&db->db_mtx); 3198bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 3199bc9014e6SJustin Gibbs if (db->db_user == old_user) 3200bc9014e6SJustin Gibbs db->db_user = new_user; 3201bc9014e6SJustin Gibbs else 3202bc9014e6SJustin Gibbs old_user = db->db_user; 3203bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 3204bc9014e6SJustin Gibbs mutex_exit(&db->db_mtx); 3205bc9014e6SJustin Gibbs 3206bc9014e6SJustin Gibbs return (old_user); 3207fa9e4066Sahrens } 3208fa9e4066Sahrens 3209fa9e4066Sahrens void * 3210bc9014e6SJustin Gibbs dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3211bc9014e6SJustin Gibbs { 3212bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, NULL, user)); 3213bc9014e6SJustin Gibbs } 3214bc9014e6SJustin Gibbs 3215bc9014e6SJustin Gibbs void * 3216bc9014e6SJustin Gibbs dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3217fa9e4066Sahrens { 3218fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3219fa9e4066Sahrens 3220d2058105SJustin T. Gibbs db->db_user_immediate_evict = TRUE; 3221bc9014e6SJustin Gibbs return (dmu_buf_set_user(db_fake, user)); 3222fa9e4066Sahrens } 3223fa9e4066Sahrens 3224fa9e4066Sahrens void * 3225bc9014e6SJustin Gibbs dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 3226fa9e4066Sahrens { 3227bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, user, NULL)); 3228fa9e4066Sahrens } 3229fa9e4066Sahrens 3230fa9e4066Sahrens void * 3231fa9e4066Sahrens dmu_buf_get_user(dmu_buf_t *db_fake) 3232fa9e4066Sahrens { 3233fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3234fa9e4066Sahrens 3235bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 3236bc9014e6SJustin Gibbs return (db->db_user); 3237bc9014e6SJustin Gibbs } 3238bc9014e6SJustin Gibbs 3239bc9014e6SJustin Gibbs void 3240bc9014e6SJustin Gibbs dmu_buf_user_evict_wait() 3241bc9014e6SJustin Gibbs { 3242bc9014e6SJustin Gibbs taskq_wait(dbu_evict_taskq); 3243fa9e4066Sahrens } 3244fa9e4066Sahrens 324580901aeaSGeorge Wilson blkptr_t * 324680901aeaSGeorge Wilson dmu_buf_get_blkptr(dmu_buf_t *db) 324780901aeaSGeorge Wilson { 324880901aeaSGeorge Wilson dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 324980901aeaSGeorge Wilson return (dbi->db_blkptr); 325080901aeaSGeorge Wilson } 325180901aeaSGeorge Wilson 3252ae972795SMatthew Ahrens objset_t * 3253ae972795SMatthew Ahrens dmu_buf_get_objset(dmu_buf_t *db) 3254ae972795SMatthew Ahrens { 3255ae972795SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3256ae972795SMatthew Ahrens return (dbi->db_objset); 3257ae972795SMatthew Ahrens } 3258ae972795SMatthew Ahrens 325979d72832SMatthew Ahrens dnode_t * 326079d72832SMatthew Ahrens dmu_buf_dnode_enter(dmu_buf_t *db) 326179d72832SMatthew Ahrens { 326279d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 326379d72832SMatthew Ahrens DB_DNODE_ENTER(dbi); 326479d72832SMatthew Ahrens return (DB_DNODE(dbi)); 326579d72832SMatthew Ahrens } 326679d72832SMatthew Ahrens 326779d72832SMatthew Ahrens void 326879d72832SMatthew Ahrens dmu_buf_dnode_exit(dmu_buf_t *db) 326979d72832SMatthew Ahrens { 327079d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 327179d72832SMatthew Ahrens DB_DNODE_EXIT(dbi); 327279d72832SMatthew Ahrens } 327379d72832SMatthew Ahrens 3274c717a561Smaybee static void 3275c717a561Smaybee dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 3276fa9e4066Sahrens { 3277c717a561Smaybee /* ASSERT(dmu_tx_is_syncing(tx) */ 3278c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 3279c717a561Smaybee 3280c717a561Smaybee if (db->db_blkptr != NULL) 3281c717a561Smaybee return; 3282c717a561Smaybee 32830a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 328454811da5SToomas Soome db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys); 32850a586ceaSMark Shellenbaum BP_ZERO(db->db_blkptr); 32860a586ceaSMark Shellenbaum return; 32870a586ceaSMark Shellenbaum } 3288c717a561Smaybee if (db->db_level == dn->dn_phys->dn_nlevels-1) { 3289c717a561Smaybee /* 3290c717a561Smaybee * This buffer was allocated at a time when there was 3291c717a561Smaybee * no available blkptrs from the dnode, or it was 3292c717a561Smaybee * inappropriate to hook it in (i.e., nlevels mis-match). 3293c717a561Smaybee */ 3294c717a561Smaybee ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 3295c717a561Smaybee ASSERT(db->db_parent == NULL); 3296c717a561Smaybee db->db_parent = dn->dn_dbuf; 3297c717a561Smaybee db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 3298c717a561Smaybee DBUF_VERIFY(db); 3299c717a561Smaybee } else { 3300c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 3301c717a561Smaybee int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3302c717a561Smaybee 3303c717a561Smaybee ASSERT(dn->dn_phys->dn_nlevels > 1); 3304c717a561Smaybee if (parent == NULL) { 3305c717a561Smaybee mutex_exit(&db->db_mtx); 3306c717a561Smaybee rw_enter(&dn->dn_struct_rwlock, RW_READER); 3307a2cdcdd2SPaul Dagnelie parent = dbuf_hold_level(dn, db->db_level + 1, 3308a2cdcdd2SPaul Dagnelie db->db_blkid >> epbs, db); 3309c717a561Smaybee rw_exit(&dn->dn_struct_rwlock); 3310c717a561Smaybee mutex_enter(&db->db_mtx); 3311c717a561Smaybee db->db_parent = parent; 3312c717a561Smaybee } 3313c717a561Smaybee db->db_blkptr = (blkptr_t *)parent->db.db_data + 3314c717a561Smaybee (db->db_blkid & ((1ULL << epbs) - 1)); 3315c717a561Smaybee DBUF_VERIFY(db); 3316c717a561Smaybee } 3317c717a561Smaybee } 3318c717a561Smaybee 3319eb633035STom Caputi /* 3320eb633035STom Caputi * When syncing out blocks of dnodes, adjust the block to deal with 3321eb633035STom Caputi * encryption. Normally, we make sure the block is decrypted before writing 3322eb633035STom Caputi * it. If we have crypt params, then we are writing a raw (encrypted) block, 3323eb633035STom Caputi * from a raw receive. In this case, set the ARC buf's crypt params so 3324eb633035STom Caputi * that the BP will be filled with the correct byteorder, salt, iv, and mac. 3325eb633035STom Caputi * 3326eb633035STom Caputi * XXX we should handle decrypting the dnode block in dbuf_dirty(). 3327eb633035STom Caputi */ 3328eb633035STom Caputi static void 3329eb633035STom Caputi dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) 3330eb633035STom Caputi { 3331eb633035STom Caputi int err; 3332eb633035STom Caputi dmu_buf_impl_t *db = dr->dr_dbuf; 3333eb633035STom Caputi 3334eb633035STom Caputi ASSERT(MUTEX_HELD(&db->db_mtx)); 3335eb633035STom Caputi ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); 3336eb633035STom Caputi ASSERT3U(db->db_level, ==, 0); 3337eb633035STom Caputi 3338eb633035STom Caputi if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { 3339eb633035STom Caputi zbookmark_phys_t zb; 3340eb633035STom Caputi 3341eb633035STom Caputi /* 3342eb633035STom Caputi * Unfortunately, there is currently no mechanism for 3343eb633035STom Caputi * syncing context to handle decryption errors. An error 3344eb633035STom Caputi * here is only possible if an attacker maliciously 3345eb633035STom Caputi * changed a dnode block and updated the associated 3346eb633035STom Caputi * checksums going up the block tree. 3347eb633035STom Caputi */ 3348eb633035STom Caputi SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset), 3349eb633035STom Caputi db->db.db_object, db->db_level, db->db_blkid); 3350eb633035STom Caputi err = arc_untransform(db->db_buf, db->db_objset->os_spa, 3351eb633035STom Caputi &zb, B_TRUE); 3352eb633035STom Caputi if (err) 3353eb633035STom Caputi panic("Invalid dnode block MAC"); 3354eb633035STom Caputi } else if (dr->dt.dl.dr_has_raw_params) { 3355eb633035STom Caputi (void) arc_release(dr->dt.dl.dr_data, db); 3356eb633035STom Caputi arc_convert_to_raw(dr->dt.dl.dr_data, 3357eb633035STom Caputi dmu_objset_id(db->db_objset), 3358eb633035STom Caputi dr->dt.dl.dr_byteorder, DMU_OT_DNODE, 3359eb633035STom Caputi dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac); 3360eb633035STom Caputi } 3361eb633035STom Caputi } 3362eb633035STom Caputi 3363c717a561Smaybee static void 3364c717a561Smaybee dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3365c717a561Smaybee { 3366c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3367744947dcSTom Erickson dnode_t *dn; 3368c717a561Smaybee zio_t *zio; 3369c717a561Smaybee 3370c717a561Smaybee ASSERT(dmu_tx_is_syncing(tx)); 3371c717a561Smaybee 3372c717a561Smaybee dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3373c717a561Smaybee 3374c717a561Smaybee mutex_enter(&db->db_mtx); 3375c717a561Smaybee 3376c717a561Smaybee ASSERT(db->db_level > 0); 3377c717a561Smaybee DBUF_VERIFY(db); 3378c717a561Smaybee 33793e30c24aSWill Andrews /* Read the block if it hasn't been read yet. */ 3380c717a561Smaybee if (db->db_buf == NULL) { 3381c717a561Smaybee mutex_exit(&db->db_mtx); 3382c717a561Smaybee (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 3383c717a561Smaybee mutex_enter(&db->db_mtx); 3384c717a561Smaybee } 3385c717a561Smaybee ASSERT3U(db->db_state, ==, DB_CACHED); 3386c717a561Smaybee ASSERT(db->db_buf != NULL); 3387c717a561Smaybee 3388744947dcSTom Erickson DB_DNODE_ENTER(db); 3389744947dcSTom Erickson dn = DB_DNODE(db); 33903e30c24aSWill Andrews /* Indirect block size must match what the dnode thinks it is. */ 3391744947dcSTom Erickson ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3392c717a561Smaybee dbuf_check_blkptr(dn, db); 3393744947dcSTom Erickson DB_DNODE_EXIT(db); 3394c717a561Smaybee 33953e30c24aSWill Andrews /* Provide the pending dirty record to child dbufs */ 3396c717a561Smaybee db->db_data_pending = dr; 3397c717a561Smaybee 3398af2c4821Smaybee mutex_exit(&db->db_mtx); 33995cabbc6bSPrashanth Sreenivasa 3400088f3894Sahrens dbuf_write(dr, db->db_buf, tx); 3401c717a561Smaybee 3402c717a561Smaybee zio = dr->dr_zio; 3403c717a561Smaybee mutex_enter(&dr->dt.di.dr_mtx); 340446e1baa6SMatthew Ahrens dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3405c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3406c717a561Smaybee mutex_exit(&dr->dt.di.dr_mtx); 3407c717a561Smaybee zio_nowait(zio); 3408c717a561Smaybee } 3409c717a561Smaybee 3410c717a561Smaybee static void 3411c717a561Smaybee dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3412c717a561Smaybee { 3413c717a561Smaybee arc_buf_t **datap = &dr->dt.dl.dr_data; 3414c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3415744947dcSTom Erickson dnode_t *dn; 3416744947dcSTom Erickson objset_t *os; 3417c717a561Smaybee uint64_t txg = tx->tx_txg; 3418fa9e4066Sahrens 3419fa9e4066Sahrens ASSERT(dmu_tx_is_syncing(tx)); 3420fa9e4066Sahrens 3421fa9e4066Sahrens dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3422fa9e4066Sahrens 3423fa9e4066Sahrens mutex_enter(&db->db_mtx); 3424fa9e4066Sahrens /* 3425fa9e4066Sahrens * To be synced, we must be dirtied. But we 3426fa9e4066Sahrens * might have been freed after the dirty. 3427fa9e4066Sahrens */ 3428fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 3429fa9e4066Sahrens /* This buffer has been freed since it was dirtied */ 3430fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 3431fa9e4066Sahrens } else if (db->db_state == DB_FILL) { 3432fa9e4066Sahrens /* This buffer was freed and is now being re-filled */ 3433c717a561Smaybee ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3434fa9e4066Sahrens } else { 343582c9918fSTim Haley ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3436fa9e4066Sahrens } 34379c9dc39aSek110237 DBUF_VERIFY(db); 3438fa9e4066Sahrens 3439744947dcSTom Erickson DB_DNODE_ENTER(db); 3440744947dcSTom Erickson dn = DB_DNODE(db); 3441744947dcSTom Erickson 34420a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 34430a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 34440a586ceaSMark Shellenbaum dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 34450a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 34460a586ceaSMark Shellenbaum } 34470a586ceaSMark Shellenbaum 3448fa9e4066Sahrens /* 3449c717a561Smaybee * If this is a bonus buffer, simply copy the bonus data into the 3450c717a561Smaybee * dnode. It will be written out when the dnode is synced (and it 3451c717a561Smaybee * will be synced, since it must have been dirty for dbuf_sync to 3452c717a561Smaybee * be called). 3453fa9e4066Sahrens */ 34540a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 3455c717a561Smaybee dbuf_dirty_record_t **drp; 34561934e92fSmaybee 3457ea8dc4b6Seschrock ASSERT(*datap != NULL); 3458fb09f5aaSMadhav Suresh ASSERT0(db->db_level); 345954811da5SToomas Soome ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=, 346054811da5SToomas Soome DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1)); 346154811da5SToomas Soome bcopy(*datap, DN_BONUS(dn->dn_phys), 346254811da5SToomas Soome DN_MAX_BONUS_LEN(dn->dn_phys)); 3463744947dcSTom Erickson DB_DNODE_EXIT(db); 3464744947dcSTom Erickson 34650e8c6158Smaybee if (*datap != db->db.db_data) { 346654811da5SToomas Soome int slots = DB_DNODE(db)->dn_num_slots; 346754811da5SToomas Soome int bonuslen = DN_SLOTS_TO_BONUSLEN(slots); 346854811da5SToomas Soome zio_buf_free(*datap, bonuslen); 346954811da5SToomas Soome arc_space_return(bonuslen, ARC_SPACE_BONUS); 34700e8c6158Smaybee } 3471ea8dc4b6Seschrock db->db_data_pending = NULL; 3472c717a561Smaybee drp = &db->db_last_dirty; 3473c717a561Smaybee while (*drp != dr) 3474c717a561Smaybee drp = &(*drp)->dr_next; 347517f17c2dSbonwick ASSERT(dr->dr_next == NULL); 3476b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 347717f17c2dSbonwick *drp = dr->dr_next; 3478c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3479ea8dc4b6Seschrock ASSERT(db->db_dirtycnt > 0); 3480ea8dc4b6Seschrock db->db_dirtycnt -= 1; 3481c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg, B_FALSE); 3482ea8dc4b6Seschrock return; 3483ea8dc4b6Seschrock } 3484ea8dc4b6Seschrock 3485744947dcSTom Erickson os = dn->dn_objset; 3486744947dcSTom Erickson 3487c5c6ffa0Smaybee /* 3488f82bfe17Sgw25295 * This function may have dropped the db_mtx lock allowing a dmu_sync 3489f82bfe17Sgw25295 * operation to sneak in. As a result, we need to ensure that we 3490f82bfe17Sgw25295 * don't check the dr_override_state until we have returned from 3491f82bfe17Sgw25295 * dbuf_check_blkptr. 3492f82bfe17Sgw25295 */ 3493f82bfe17Sgw25295 dbuf_check_blkptr(dn, db); 3494f82bfe17Sgw25295 3495f82bfe17Sgw25295 /* 3496744947dcSTom Erickson * If this buffer is in the middle of an immediate write, 3497c717a561Smaybee * wait for the synchronous IO to complete. 3498c5c6ffa0Smaybee */ 3499c717a561Smaybee while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3500c5c6ffa0Smaybee ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3501c5c6ffa0Smaybee cv_wait(&db->db_changed, &db->db_mtx); 3502c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3503c5c6ffa0Smaybee } 3504c717a561Smaybee 3505eb633035STom Caputi /* 3506eb633035STom Caputi * If this is a dnode block, ensure it is appropriately encrypted 3507eb633035STom Caputi * or decrypted, depending on what we are writing to it this txg. 3508eb633035STom Caputi */ 3509eb633035STom Caputi if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT) 3510eb633035STom Caputi dbuf_prepare_encrypted_dnode_leaf(dr); 3511eb633035STom Caputi 3512ab69d62fSMatthew Ahrens if (db->db_state != DB_NOFILL && 3513ab69d62fSMatthew Ahrens dn->dn_object != DMU_META_DNODE_OBJECT && 3514e914ace2STim Schumacher zfs_refcount_count(&db->db_holds) > 1 && 3515b24ab676SJeff Bonwick dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3516ab69d62fSMatthew Ahrens *datap == db->db_buf) { 3517fa9e4066Sahrens /* 351882c9918fSTim Haley * If this buffer is currently "in use" (i.e., there 351982c9918fSTim Haley * are active holds and db_data still references it), 352082c9918fSTim Haley * then make a copy before we start the write so that 352182c9918fSTim Haley * any modifications from the open txg will not leak 352282c9918fSTim Haley * into this write. 3523fa9e4066Sahrens * 352482c9918fSTim Haley * NOTE: this copy does not need to be made for 352582c9918fSTim Haley * objects only modified in the syncing context (e.g. 352682c9918fSTim Haley * DNONE_DNODE blocks). 3527fa9e4066Sahrens */ 35285602294fSDan Kimmel int psize = arc_buf_size(*datap); 3529eb633035STom Caputi int lsize = arc_buf_lsize(*datap); 3530ab69d62fSMatthew Ahrens arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 35315602294fSDan Kimmel enum zio_compress compress_type = arc_get_compression(*datap); 35325602294fSDan Kimmel 3533eb633035STom Caputi if (arc_is_encrypted(*datap)) { 3534eb633035STom Caputi boolean_t byteorder; 3535eb633035STom Caputi uint8_t salt[ZIO_DATA_SALT_LEN]; 3536eb633035STom Caputi uint8_t iv[ZIO_DATA_IV_LEN]; 3537eb633035STom Caputi uint8_t mac[ZIO_DATA_MAC_LEN]; 3538eb633035STom Caputi 3539eb633035STom Caputi arc_get_raw_params(*datap, &byteorder, salt, iv, mac); 3540eb633035STom Caputi *datap = arc_alloc_raw_buf(os->os_spa, db, 3541eb633035STom Caputi dmu_objset_id(os), byteorder, salt, iv, mac, 3542eb633035STom Caputi dn->dn_type, psize, lsize, compress_type); 3543eb633035STom Caputi } else if (compress_type != ZIO_COMPRESS_OFF) { 35445602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 35455602294fSDan Kimmel *datap = arc_alloc_compressed_buf(os->os_spa, db, 35465602294fSDan Kimmel psize, lsize, compress_type); 3547eb633035STom Caputi } else { 3548eb633035STom Caputi *datap = arc_alloc_buf(os->os_spa, db, type, psize); 35495602294fSDan Kimmel } 35505602294fSDan Kimmel bcopy(db->db.db_data, (*datap)->b_data, psize); 3551fa9e4066Sahrens } 3552c717a561Smaybee db->db_data_pending = dr; 3553fa9e4066Sahrens 3554fa9e4066Sahrens mutex_exit(&db->db_mtx); 3555fa9e4066Sahrens 3556088f3894Sahrens dbuf_write(dr, *datap, tx); 3557c717a561Smaybee 3558c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 3559744947dcSTom Erickson if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3560c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3561744947dcSTom Erickson DB_DNODE_EXIT(db); 3562744947dcSTom Erickson } else { 3563744947dcSTom Erickson /* 3564744947dcSTom Erickson * Although zio_nowait() does not "wait for an IO", it does 3565744947dcSTom Erickson * initiate the IO. If this is an empty write it seems plausible 3566744947dcSTom Erickson * that the IO could actually be completed before the nowait 3567744947dcSTom Erickson * returns. We need to DB_DNODE_EXIT() first in case 3568744947dcSTom Erickson * zio_nowait() invalidates the dbuf. 3569744947dcSTom Erickson */ 3570744947dcSTom Erickson DB_DNODE_EXIT(db); 3571c717a561Smaybee zio_nowait(dr->dr_zio); 3572fa9e4066Sahrens } 3573744947dcSTom Erickson } 3574c717a561Smaybee 3575c717a561Smaybee void 357646e1baa6SMatthew Ahrens dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3577c717a561Smaybee { 3578c717a561Smaybee dbuf_dirty_record_t *dr; 3579c717a561Smaybee 3580c717a561Smaybee while (dr = list_head(list)) { 3581c717a561Smaybee if (dr->dr_zio != NULL) { 3582c717a561Smaybee /* 3583c717a561Smaybee * If we find an already initialized zio then we 3584c717a561Smaybee * are processing the meta-dnode, and we have finished. 3585c717a561Smaybee * The dbufs for all dnodes are put back on the list 3586c717a561Smaybee * during processing, so that we can zio_wait() 3587c717a561Smaybee * these IOs after initiating all child IOs. 3588c717a561Smaybee */ 3589c717a561Smaybee ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3590c717a561Smaybee DMU_META_DNODE_OBJECT); 3591c717a561Smaybee break; 3592fa9e4066Sahrens } 359346e1baa6SMatthew Ahrens if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 359446e1baa6SMatthew Ahrens dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 359546e1baa6SMatthew Ahrens VERIFY3U(dr->dr_dbuf->db_level, ==, level); 359646e1baa6SMatthew Ahrens } 3597c717a561Smaybee list_remove(list, dr); 3598c717a561Smaybee if (dr->dr_dbuf->db_level > 0) 3599c717a561Smaybee dbuf_sync_indirect(dr, tx); 3600c717a561Smaybee else 3601c717a561Smaybee dbuf_sync_leaf(dr, tx); 3602c717a561Smaybee } 3603c717a561Smaybee } 3604c717a561Smaybee 3605fa9e4066Sahrens /* ARGSUSED */ 3606fa9e4066Sahrens static void 3607c717a561Smaybee dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3608fa9e4066Sahrens { 3609fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 3610744947dcSTom Erickson dnode_t *dn; 3611e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3612c717a561Smaybee blkptr_t *bp_orig = &zio->io_bp_orig; 3613b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 3614b24ab676SJeff Bonwick int64_t delta; 3615fa9e4066Sahrens uint64_t fill = 0; 3616b24ab676SJeff Bonwick int i; 3617fa9e4066Sahrens 361811ceac77SAlex Reece ASSERT3P(db->db_blkptr, !=, NULL); 361911ceac77SAlex Reece ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3620e14bb325SJeff Bonwick 3621744947dcSTom Erickson DB_DNODE_ENTER(db); 3622744947dcSTom Erickson dn = DB_DNODE(db); 3623b24ab676SJeff Bonwick delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3624b24ab676SJeff Bonwick dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3625b24ab676SJeff Bonwick zio->io_prev_space_delta = delta; 3626fa9e4066Sahrens 362743466aaeSMax Grossman if (bp->blk_birth != 0) { 36280a586ceaSMark Shellenbaum ASSERT((db->db_blkid != DMU_SPILL_BLKID && 36290a586ceaSMark Shellenbaum BP_GET_TYPE(bp) == dn->dn_type) || 36300a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && 36315d7b4d43SMatthew Ahrens BP_GET_TYPE(bp) == dn->dn_bonustype) || 36325d7b4d43SMatthew Ahrens BP_IS_EMBEDDED(bp)); 3633e14bb325SJeff Bonwick ASSERT(BP_GET_LEVEL(bp) == db->db_level); 363443466aaeSMax Grossman } 3635e14bb325SJeff Bonwick 3636fa9e4066Sahrens mutex_enter(&db->db_mtx); 3637fa9e4066Sahrens 36380a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 36390a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 36400a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 364111ceac77SAlex Reece ASSERT(!(BP_IS_HOLE(bp)) && 364254811da5SToomas Soome db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 36430a586ceaSMark Shellenbaum } 36440a586ceaSMark Shellenbaum #endif 36450a586ceaSMark Shellenbaum 3646fa9e4066Sahrens if (db->db_level == 0) { 3647fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 36480a586ceaSMark Shellenbaum if (db->db_blkid > dn->dn_phys->dn_maxblkid && 3649eb633035STom Caputi db->db_blkid != DMU_SPILL_BLKID) { 3650eb633035STom Caputi ASSERT0(db->db_objset->os_raw_receive); 3651fa9e4066Sahrens dn->dn_phys->dn_maxblkid = db->db_blkid; 3652eb633035STom Caputi } 3653fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 3654fa9e4066Sahrens 3655fa9e4066Sahrens if (dn->dn_type == DMU_OT_DNODE) { 365654811da5SToomas Soome i = 0; 365754811da5SToomas Soome while (i < db->db.db_size) { 365854811da5SToomas Soome dnode_phys_t *dnp = 365954811da5SToomas Soome (void *)(((char *)db->db.db_data) + i); 366054811da5SToomas Soome 366154811da5SToomas Soome i += DNODE_MIN_SIZE; 366254811da5SToomas Soome if (dnp->dn_type != DMU_OT_NONE) { 3663fa9e4066Sahrens fill++; 366454811da5SToomas Soome i += dnp->dn_extra_slots * 366554811da5SToomas Soome DNODE_MIN_SIZE; 366654811da5SToomas Soome } 3667fa9e4066Sahrens } 3668fa9e4066Sahrens } else { 366943466aaeSMax Grossman if (BP_IS_HOLE(bp)) { 367043466aaeSMax Grossman fill = 0; 367143466aaeSMax Grossman } else { 3672fa9e4066Sahrens fill = 1; 3673fa9e4066Sahrens } 367443466aaeSMax Grossman } 3675fa9e4066Sahrens } else { 3676e14bb325SJeff Bonwick blkptr_t *ibp = db->db.db_data; 3677fa9e4066Sahrens ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3678e14bb325SJeff Bonwick for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3679e14bb325SJeff Bonwick if (BP_IS_HOLE(ibp)) 3680fa9e4066Sahrens continue; 36815d7b4d43SMatthew Ahrens fill += BP_GET_FILL(ibp); 3682fa9e4066Sahrens } 3683fa9e4066Sahrens } 3684744947dcSTom Erickson DB_DNODE_EXIT(db); 3685fa9e4066Sahrens 36865d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) 3687eb633035STom Caputi BP_SET_FILL(bp, fill); 3688fa9e4066Sahrens 3689fa9e4066Sahrens mutex_exit(&db->db_mtx); 369011ceac77SAlex Reece 369111ceac77SAlex Reece rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 369211ceac77SAlex Reece *db->db_blkptr = *bp; 369311ceac77SAlex Reece rw_exit(&dn->dn_struct_rwlock); 3694fa9e4066Sahrens } 3695fa9e4066Sahrens 36968df0bcf0SPaul Dagnelie /* ARGSUSED */ 36978df0bcf0SPaul Dagnelie /* 36988df0bcf0SPaul Dagnelie * This function gets called just prior to running through the compression 36998df0bcf0SPaul Dagnelie * stage of the zio pipeline. If we're an indirect block comprised of only 37008df0bcf0SPaul Dagnelie * holes, then we want this indirect to be compressed away to a hole. In 37018df0bcf0SPaul Dagnelie * order to do that we must zero out any information about the holes that 37028df0bcf0SPaul Dagnelie * this indirect points to prior to before we try to compress it. 37038df0bcf0SPaul Dagnelie */ 37048df0bcf0SPaul Dagnelie static void 37058df0bcf0SPaul Dagnelie dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 37068df0bcf0SPaul Dagnelie { 37078df0bcf0SPaul Dagnelie dmu_buf_impl_t *db = vdb; 37088df0bcf0SPaul Dagnelie dnode_t *dn; 37098df0bcf0SPaul Dagnelie blkptr_t *bp; 37101a01181fSGeorge Wilson unsigned int epbs, i; 37118df0bcf0SPaul Dagnelie 37128df0bcf0SPaul Dagnelie ASSERT3U(db->db_level, >, 0); 37138df0bcf0SPaul Dagnelie DB_DNODE_ENTER(db); 37148df0bcf0SPaul Dagnelie dn = DB_DNODE(db); 37158df0bcf0SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 37161a01181fSGeorge Wilson ASSERT3U(epbs, <, 31); 37178df0bcf0SPaul Dagnelie 37188df0bcf0SPaul Dagnelie /* Determine if all our children are holes */ 37198df0bcf0SPaul Dagnelie for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 37208df0bcf0SPaul Dagnelie if (!BP_IS_HOLE(bp)) 37218df0bcf0SPaul Dagnelie break; 37228df0bcf0SPaul Dagnelie } 37238df0bcf0SPaul Dagnelie 37248df0bcf0SPaul Dagnelie /* 37258df0bcf0SPaul Dagnelie * If all the children are holes, then zero them all out so that 37268df0bcf0SPaul Dagnelie * we may get compressed away. 37278df0bcf0SPaul Dagnelie */ 37288df0bcf0SPaul Dagnelie if (i == 1 << epbs) { 37291a01181fSGeorge Wilson /* 37301a01181fSGeorge Wilson * We only found holes. Grab the rwlock to prevent 37311a01181fSGeorge Wilson * anybody from reading the blocks we're about to 37321a01181fSGeorge Wilson * zero out. 37331a01181fSGeorge Wilson */ 37341a01181fSGeorge Wilson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 37358df0bcf0SPaul Dagnelie bzero(db->db.db_data, db->db.db_size); 37361a01181fSGeorge Wilson rw_exit(&dn->dn_struct_rwlock); 37378df0bcf0SPaul Dagnelie } 37388df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 37398df0bcf0SPaul Dagnelie } 37408df0bcf0SPaul Dagnelie 374169962b56SMatthew Ahrens /* 374269962b56SMatthew Ahrens * The SPA will call this callback several times for each zio - once 374369962b56SMatthew Ahrens * for every physical child i/o (zio->io_phys_children times). This 374469962b56SMatthew Ahrens * allows the DMU to monitor the progress of each logical i/o. For example, 374569962b56SMatthew Ahrens * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 374669962b56SMatthew Ahrens * block. There may be a long delay before all copies/fragments are completed, 374769962b56SMatthew Ahrens * so this callback allows us to retire dirty space gradually, as the physical 374869962b56SMatthew Ahrens * i/os complete. 374969962b56SMatthew Ahrens */ 375069962b56SMatthew Ahrens /* ARGSUSED */ 375169962b56SMatthew Ahrens static void 375269962b56SMatthew Ahrens dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 375369962b56SMatthew Ahrens { 375469962b56SMatthew Ahrens dmu_buf_impl_t *db = arg; 375569962b56SMatthew Ahrens objset_t *os = db->db_objset; 375669962b56SMatthew Ahrens dsl_pool_t *dp = dmu_objset_pool(os); 375769962b56SMatthew Ahrens dbuf_dirty_record_t *dr; 375869962b56SMatthew Ahrens int delta = 0; 375969962b56SMatthew Ahrens 376069962b56SMatthew Ahrens dr = db->db_data_pending; 376169962b56SMatthew Ahrens ASSERT3U(dr->dr_txg, ==, zio->io_txg); 376269962b56SMatthew Ahrens 376369962b56SMatthew Ahrens /* 376469962b56SMatthew Ahrens * The callback will be called io_phys_children times. Retire one 376569962b56SMatthew Ahrens * portion of our dirty space each time we are called. Any rounding 376669962b56SMatthew Ahrens * error will be cleaned up by dsl_pool_sync()'s call to 376769962b56SMatthew Ahrens * dsl_pool_undirty_space(). 376869962b56SMatthew Ahrens */ 376969962b56SMatthew Ahrens delta = dr->dr_accounted / zio->io_phys_children; 377069962b56SMatthew Ahrens dsl_pool_undirty_space(dp, delta, zio->io_txg); 377169962b56SMatthew Ahrens } 377269962b56SMatthew Ahrens 3773c717a561Smaybee /* ARGSUSED */ 3774c717a561Smaybee static void 3775c717a561Smaybee dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3776c717a561Smaybee { 3777c717a561Smaybee dmu_buf_impl_t *db = vdb; 3778b24ab676SJeff Bonwick blkptr_t *bp_orig = &zio->io_bp_orig; 377943466aaeSMax Grossman blkptr_t *bp = db->db_blkptr; 378043466aaeSMax Grossman objset_t *os = db->db_objset; 378143466aaeSMax Grossman dmu_tx_t *tx = os->os_synctx; 3782c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 3783c717a561Smaybee 3784fb09f5aaSMadhav Suresh ASSERT0(zio->io_error); 3785b24ab676SJeff Bonwick ASSERT(db->db_blkptr == bp); 3786b24ab676SJeff Bonwick 378780901aeaSGeorge Wilson /* 378880901aeaSGeorge Wilson * For nopwrites and rewrites we ensure that the bp matches our 378980901aeaSGeorge Wilson * original and bypass all the accounting. 379080901aeaSGeorge Wilson */ 379180901aeaSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3792b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, bp_orig)); 3793b24ab676SJeff Bonwick } else { 379443466aaeSMax Grossman dsl_dataset_t *ds = os->os_dsl_dataset; 3795b24ab676SJeff Bonwick (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3796b24ab676SJeff Bonwick dsl_dataset_block_born(ds, bp, tx); 3797b24ab676SJeff Bonwick } 3798c717a561Smaybee 3799c717a561Smaybee mutex_enter(&db->db_mtx); 3800c717a561Smaybee 3801b24ab676SJeff Bonwick DBUF_VERIFY(db); 3802b24ab676SJeff Bonwick 3803c717a561Smaybee drp = &db->db_last_dirty; 380417f17c2dSbonwick while ((dr = *drp) != db->db_data_pending) 380517f17c2dSbonwick drp = &dr->dr_next; 380617f17c2dSbonwick ASSERT(!list_link_active(&dr->dr_dirty_node)); 3807b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 380817f17c2dSbonwick ASSERT(dr->dr_next == NULL); 380917f17c2dSbonwick *drp = dr->dr_next; 3810c717a561Smaybee 38110a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 38120a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 3813744947dcSTom Erickson dnode_t *dn; 3814744947dcSTom Erickson 3815744947dcSTom Erickson DB_DNODE_ENTER(db); 3816744947dcSTom Erickson dn = DB_DNODE(db); 38170a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 38180a586ceaSMark Shellenbaum ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 381954811da5SToomas Soome db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys)); 3820744947dcSTom Erickson DB_DNODE_EXIT(db); 38210a586ceaSMark Shellenbaum } 38220a586ceaSMark Shellenbaum #endif 38230a586ceaSMark Shellenbaum 3824c717a561Smaybee if (db->db_level == 0) { 38250a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3826c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 382782c9918fSTim Haley if (db->db_state != DB_NOFILL) { 3828c717a561Smaybee if (dr->dt.dl.dr_data != db->db_buf) 3829dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 383082c9918fSTim Haley } 3831c717a561Smaybee } else { 3832744947dcSTom Erickson dnode_t *dn; 3833744947dcSTom Erickson 3834744947dcSTom Erickson DB_DNODE_ENTER(db); 3835744947dcSTom Erickson dn = DB_DNODE(db); 3836c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3837c717a561Smaybee ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3838c717a561Smaybee if (!BP_IS_HOLE(db->db_blkptr)) { 3839c717a561Smaybee int epbs = 3840c717a561Smaybee dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 384143466aaeSMax Grossman ASSERT3U(db->db_blkid, <=, 384243466aaeSMax Grossman dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3843c717a561Smaybee ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3844c717a561Smaybee db->db.db_size); 3845c717a561Smaybee } 3846744947dcSTom Erickson DB_DNODE_EXIT(db); 3847c25056deSgw25295 mutex_destroy(&dr->dt.di.dr_mtx); 3848c25056deSgw25295 list_destroy(&dr->dt.di.dr_children); 3849c717a561Smaybee } 3850c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3851c717a561Smaybee 3852c717a561Smaybee cv_broadcast(&db->db_changed); 3853c717a561Smaybee ASSERT(db->db_dirtycnt > 0); 3854c717a561Smaybee db->db_dirtycnt -= 1; 3855c717a561Smaybee db->db_data_pending = NULL; 3856c2919acbSMatthew Ahrens dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE); 3857b24ab676SJeff Bonwick } 3858b24ab676SJeff Bonwick 3859b24ab676SJeff Bonwick static void 3860b24ab676SJeff Bonwick dbuf_write_nofill_ready(zio_t *zio) 3861b24ab676SJeff Bonwick { 3862b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, zio->io_private); 3863b24ab676SJeff Bonwick } 3864b24ab676SJeff Bonwick 3865b24ab676SJeff Bonwick static void 3866b24ab676SJeff Bonwick dbuf_write_nofill_done(zio_t *zio) 3867b24ab676SJeff Bonwick { 3868b24ab676SJeff Bonwick dbuf_write_done(zio, NULL, zio->io_private); 3869b24ab676SJeff Bonwick } 3870b24ab676SJeff Bonwick 3871b24ab676SJeff Bonwick static void 3872b24ab676SJeff Bonwick dbuf_write_override_ready(zio_t *zio) 3873b24ab676SJeff Bonwick { 3874b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3875b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3876b24ab676SJeff Bonwick 3877b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, db); 3878b24ab676SJeff Bonwick } 3879b24ab676SJeff Bonwick 3880b24ab676SJeff Bonwick static void 3881b24ab676SJeff Bonwick dbuf_write_override_done(zio_t *zio) 3882b24ab676SJeff Bonwick { 3883b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3884b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3885b24ab676SJeff Bonwick blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3886b24ab676SJeff Bonwick 3887b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3888b24ab676SJeff Bonwick if (!BP_EQUAL(zio->io_bp, obp)) { 3889b24ab676SJeff Bonwick if (!BP_IS_HOLE(obp)) 3890b24ab676SJeff Bonwick dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3891b24ab676SJeff Bonwick arc_release(dr->dt.dl.dr_data, db); 3892b24ab676SJeff Bonwick } 3893c717a561Smaybee mutex_exit(&db->db_mtx); 38944ee0199eSRobert Mustacchi dbuf_write_done(zio, NULL, db); 3895770499e1SDan Kimmel 3896770499e1SDan Kimmel if (zio->io_abd != NULL) 3897770499e1SDan Kimmel abd_put(zio->io_abd); 3898b24ab676SJeff Bonwick } 3899c717a561Smaybee 39005cabbc6bSPrashanth Sreenivasa typedef struct dbuf_remap_impl_callback_arg { 39015cabbc6bSPrashanth Sreenivasa objset_t *drica_os; 39025cabbc6bSPrashanth Sreenivasa uint64_t drica_blk_birth; 39035cabbc6bSPrashanth Sreenivasa dmu_tx_t *drica_tx; 39045cabbc6bSPrashanth Sreenivasa } dbuf_remap_impl_callback_arg_t; 39055cabbc6bSPrashanth Sreenivasa 39065cabbc6bSPrashanth Sreenivasa static void 39075cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 39085cabbc6bSPrashanth Sreenivasa void *arg) 39095cabbc6bSPrashanth Sreenivasa { 39105cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t *drica = arg; 39115cabbc6bSPrashanth Sreenivasa objset_t *os = drica->drica_os; 39125cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(os); 39135cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = drica->drica_tx; 39145cabbc6bSPrashanth Sreenivasa 39155cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 39165cabbc6bSPrashanth Sreenivasa 39175cabbc6bSPrashanth Sreenivasa if (os == spa_meta_objset(spa)) { 39185cabbc6bSPrashanth Sreenivasa spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 39195cabbc6bSPrashanth Sreenivasa } else { 39205cabbc6bSPrashanth Sreenivasa dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 39215cabbc6bSPrashanth Sreenivasa size, drica->drica_blk_birth, tx); 39225cabbc6bSPrashanth Sreenivasa } 39235cabbc6bSPrashanth Sreenivasa } 39245cabbc6bSPrashanth Sreenivasa 39255cabbc6bSPrashanth Sreenivasa static void 39265cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 39275cabbc6bSPrashanth Sreenivasa { 39285cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = *bp; 39295cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 39305cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t drica; 39315cabbc6bSPrashanth Sreenivasa 39325cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 39335cabbc6bSPrashanth Sreenivasa 39345cabbc6bSPrashanth Sreenivasa drica.drica_os = dn->dn_objset; 39355cabbc6bSPrashanth Sreenivasa drica.drica_blk_birth = bp->blk_birth; 39365cabbc6bSPrashanth Sreenivasa drica.drica_tx = tx; 39375cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 39385cabbc6bSPrashanth Sreenivasa &drica)) { 39395cabbc6bSPrashanth Sreenivasa /* 39405cabbc6bSPrashanth Sreenivasa * The struct_rwlock prevents dbuf_read_impl() from 39415cabbc6bSPrashanth Sreenivasa * dereferencing the BP while we are changing it. To 39425cabbc6bSPrashanth Sreenivasa * avoid lock contention, only grab it when we are actually 39435cabbc6bSPrashanth Sreenivasa * changing the BP. 39445cabbc6bSPrashanth Sreenivasa */ 39455cabbc6bSPrashanth Sreenivasa rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 39465cabbc6bSPrashanth Sreenivasa *bp = bp_copy; 39475cabbc6bSPrashanth Sreenivasa rw_exit(&dn->dn_struct_rwlock); 39485cabbc6bSPrashanth Sreenivasa } 39495cabbc6bSPrashanth Sreenivasa } 39505cabbc6bSPrashanth Sreenivasa 39515cabbc6bSPrashanth Sreenivasa /* 39525cabbc6bSPrashanth Sreenivasa * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 39535cabbc6bSPrashanth Sreenivasa * to remap a copy of every bp in the dbuf. 39545cabbc6bSPrashanth Sreenivasa */ 39555cabbc6bSPrashanth Sreenivasa boolean_t 39565cabbc6bSPrashanth Sreenivasa dbuf_can_remap(const dmu_buf_impl_t *db) 39575cabbc6bSPrashanth Sreenivasa { 39585cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 39595cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 39605cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 39615cabbc6bSPrashanth Sreenivasa 39625cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_level, >, 0); 39635cabbc6bSPrashanth Sreenivasa ASSERT3S(db->db_state, ==, DB_CACHED); 39645cabbc6bSPrashanth Sreenivasa 39655cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 39665cabbc6bSPrashanth Sreenivasa 39675cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 39685cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 39695cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = bp[i]; 39705cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 39715cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 39725cabbc6bSPrashanth Sreenivasa break; 39735cabbc6bSPrashanth Sreenivasa } 39745cabbc6bSPrashanth Sreenivasa } 39755cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 39765cabbc6bSPrashanth Sreenivasa 39775cabbc6bSPrashanth Sreenivasa return (ret); 39785cabbc6bSPrashanth Sreenivasa } 39795cabbc6bSPrashanth Sreenivasa 39805cabbc6bSPrashanth Sreenivasa boolean_t 39815cabbc6bSPrashanth Sreenivasa dnode_needs_remap(const dnode_t *dn) 39825cabbc6bSPrashanth Sreenivasa { 39835cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 39845cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 39855cabbc6bSPrashanth Sreenivasa 39865cabbc6bSPrashanth Sreenivasa if (dn->dn_phys->dn_nlevels == 0) { 39875cabbc6bSPrashanth Sreenivasa return (B_FALSE); 39885cabbc6bSPrashanth Sreenivasa } 39895cabbc6bSPrashanth Sreenivasa 39905cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 39915cabbc6bSPrashanth Sreenivasa 39925cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 39935cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 39945cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 39955cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 39965cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 39975cabbc6bSPrashanth Sreenivasa break; 39985cabbc6bSPrashanth Sreenivasa } 39995cabbc6bSPrashanth Sreenivasa } 40005cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 40015cabbc6bSPrashanth Sreenivasa 40025cabbc6bSPrashanth Sreenivasa return (ret); 40035cabbc6bSPrashanth Sreenivasa } 40045cabbc6bSPrashanth Sreenivasa 40055cabbc6bSPrashanth Sreenivasa /* 40065cabbc6bSPrashanth Sreenivasa * Remap any existing BP's to concrete vdevs, if possible. 40075cabbc6bSPrashanth Sreenivasa */ 40085cabbc6bSPrashanth Sreenivasa static void 40095cabbc6bSPrashanth Sreenivasa dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 40105cabbc6bSPrashanth Sreenivasa { 40115cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 40125cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 40135cabbc6bSPrashanth Sreenivasa 40145cabbc6bSPrashanth Sreenivasa if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 40155cabbc6bSPrashanth Sreenivasa return; 40165cabbc6bSPrashanth Sreenivasa 40175cabbc6bSPrashanth Sreenivasa if (db->db_level > 0) { 40185cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 40195cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 40205cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &bp[i], tx); 40215cabbc6bSPrashanth Sreenivasa } 40225cabbc6bSPrashanth Sreenivasa } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 40235cabbc6bSPrashanth Sreenivasa dnode_phys_t *dnp = db->db.db_data; 40245cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 40255cabbc6bSPrashanth Sreenivasa DMU_OT_DNODE); 40265cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 40275cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 40285cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 40295cabbc6bSPrashanth Sreenivasa } 40305cabbc6bSPrashanth Sreenivasa } 40315cabbc6bSPrashanth Sreenivasa } 40325cabbc6bSPrashanth Sreenivasa } 40335cabbc6bSPrashanth Sreenivasa 40345cabbc6bSPrashanth Sreenivasa 40353e30c24aSWill Andrews /* Issue I/O to commit a dirty buffer to disk. */ 4036b24ab676SJeff Bonwick static void 4037b24ab676SJeff Bonwick dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 4038b24ab676SJeff Bonwick { 4039b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 4040744947dcSTom Erickson dnode_t *dn; 4041744947dcSTom Erickson objset_t *os; 4042b24ab676SJeff Bonwick dmu_buf_impl_t *parent = db->db_parent; 4043b24ab676SJeff Bonwick uint64_t txg = tx->tx_txg; 40447802d7bfSMatthew Ahrens zbookmark_phys_t zb; 4045b24ab676SJeff Bonwick zio_prop_t zp; 4046b24ab676SJeff Bonwick zio_t *zio; 40470a586ceaSMark Shellenbaum int wp_flag = 0; 4048b24ab676SJeff Bonwick 404911ceac77SAlex Reece ASSERT(dmu_tx_is_syncing(tx)); 405011ceac77SAlex Reece 4051744947dcSTom Erickson DB_DNODE_ENTER(db); 4052744947dcSTom Erickson dn = DB_DNODE(db); 4053744947dcSTom Erickson os = dn->dn_objset; 4054744947dcSTom Erickson 4055b24ab676SJeff Bonwick if (db->db_state != DB_NOFILL) { 4056b24ab676SJeff Bonwick if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 4057b24ab676SJeff Bonwick /* 4058b24ab676SJeff Bonwick * Private object buffers are released here rather 4059b24ab676SJeff Bonwick * than in dbuf_dirty() since they are only modified 4060b24ab676SJeff Bonwick * in the syncing context and we don't want the 4061b24ab676SJeff Bonwick * overhead of making multiple copies of the data. 4062b24ab676SJeff Bonwick */ 4063b24ab676SJeff Bonwick if (BP_IS_HOLE(db->db_blkptr)) { 4064b24ab676SJeff Bonwick arc_buf_thaw(data); 4065b24ab676SJeff Bonwick } else { 40663f9d6ad7SLin Ling dbuf_release_bp(db); 4067b24ab676SJeff Bonwick } 40685cabbc6bSPrashanth Sreenivasa dbuf_remap(dn, db, tx); 4069b24ab676SJeff Bonwick } 4070b24ab676SJeff Bonwick } 4071b24ab676SJeff Bonwick 4072b24ab676SJeff Bonwick if (parent != dn->dn_dbuf) { 40733e30c24aSWill Andrews /* Our parent is an indirect block. */ 40743e30c24aSWill Andrews /* We have a dirty parent that has been scheduled for write. */ 4075b24ab676SJeff Bonwick ASSERT(parent && parent->db_data_pending); 40763e30c24aSWill Andrews /* Our parent's buffer is one level closer to the dnode. */ 4077b24ab676SJeff Bonwick ASSERT(db->db_level == parent->db_level-1); 40783e30c24aSWill Andrews /* 40793e30c24aSWill Andrews * We're about to modify our parent's db_data by modifying 40803e30c24aSWill Andrews * our block pointer, so the parent must be released. 40813e30c24aSWill Andrews */ 4082b24ab676SJeff Bonwick ASSERT(arc_released(parent->db_buf)); 4083b24ab676SJeff Bonwick zio = parent->db_data_pending->dr_zio; 4084b24ab676SJeff Bonwick } else { 40853e30c24aSWill Andrews /* Our parent is the dnode itself. */ 40860a586ceaSMark Shellenbaum ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 40870a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) || 40880a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 40890a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 4090b24ab676SJeff Bonwick ASSERT3P(db->db_blkptr, ==, 4091b24ab676SJeff Bonwick &dn->dn_phys->dn_blkptr[db->db_blkid]); 4092b24ab676SJeff Bonwick zio = dn->dn_zio; 4093b24ab676SJeff Bonwick } 4094b24ab676SJeff Bonwick 4095b24ab676SJeff Bonwick ASSERT(db->db_level == 0 || data == db->db_buf); 4096b24ab676SJeff Bonwick ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 4097b24ab676SJeff Bonwick ASSERT(zio); 4098b24ab676SJeff Bonwick 4099b24ab676SJeff Bonwick SET_BOOKMARK(&zb, os->os_dsl_dataset ? 4100b24ab676SJeff Bonwick os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 4101b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 4102b24ab676SJeff Bonwick 41030a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 41040a586ceaSMark Shellenbaum wp_flag = WP_SPILL; 41050a586ceaSMark Shellenbaum wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 41060a586ceaSMark Shellenbaum 4107adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 4108eb633035STom Caputi 4109744947dcSTom Erickson DB_DNODE_EXIT(db); 4110b24ab676SJeff Bonwick 411111ceac77SAlex Reece /* 411211ceac77SAlex Reece * We copy the blkptr now (rather than when we instantiate the dirty 411311ceac77SAlex Reece * record), because its value can change between open context and 411411ceac77SAlex Reece * syncing context. We do not need to hold dn_struct_rwlock to read 411511ceac77SAlex Reece * db_blkptr because we are in syncing context. 411611ceac77SAlex Reece */ 411711ceac77SAlex Reece dr->dr_bp_copy = *db->db_blkptr; 411811ceac77SAlex Reece 41195d7b4d43SMatthew Ahrens if (db->db_level == 0 && 41205d7b4d43SMatthew Ahrens dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 41215d7b4d43SMatthew Ahrens /* 41225d7b4d43SMatthew Ahrens * The BP for this block has been provided by open context 41235d7b4d43SMatthew Ahrens * (by dmu_sync() or dmu_buf_write_embedded()). 41245d7b4d43SMatthew Ahrens */ 4125770499e1SDan Kimmel abd_t *contents = (data != NULL) ? 4126770499e1SDan Kimmel abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 41275d7b4d43SMatthew Ahrens 41285602294fSDan Kimmel dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 41295602294fSDan Kimmel contents, db->db.db_size, db->db.db_size, &zp, 41308df0bcf0SPaul Dagnelie dbuf_write_override_ready, NULL, NULL, 41318df0bcf0SPaul Dagnelie dbuf_write_override_done, 413269962b56SMatthew Ahrens dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 4133b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 4134b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 4135b24ab676SJeff Bonwick zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 413680901aeaSGeorge Wilson dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 4137b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 4138b24ab676SJeff Bonwick } else if (db->db_state == DB_NOFILL) { 4139810e43b2SBill Pijewski ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 4140810e43b2SBill Pijewski zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 4141b24ab676SJeff Bonwick dr->dr_zio = zio_write(zio, os->os_spa, txg, 41425602294fSDan Kimmel &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 41438df0bcf0SPaul Dagnelie dbuf_write_nofill_ready, NULL, NULL, 41448df0bcf0SPaul Dagnelie dbuf_write_nofill_done, db, 4145b24ab676SJeff Bonwick ZIO_PRIORITY_ASYNC_WRITE, 4146b24ab676SJeff Bonwick ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 4147b24ab676SJeff Bonwick } else { 4148b24ab676SJeff Bonwick ASSERT(arc_released(data)); 41498df0bcf0SPaul Dagnelie 41508df0bcf0SPaul Dagnelie /* 41518df0bcf0SPaul Dagnelie * For indirect blocks, we want to setup the children 41528df0bcf0SPaul Dagnelie * ready callback so that we can properly handle an indirect 41538df0bcf0SPaul Dagnelie * block that only contains holes. 41548df0bcf0SPaul Dagnelie */ 4155a3874b8bSToomas Soome arc_write_done_func_t *children_ready_cb = NULL; 41568df0bcf0SPaul Dagnelie if (db->db_level != 0) 41578df0bcf0SPaul Dagnelie children_ready_cb = dbuf_write_children_ready; 41588df0bcf0SPaul Dagnelie 4159b24ab676SJeff Bonwick dr->dr_zio = arc_write(zio, os->os_spa, txg, 416011ceac77SAlex Reece &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 4161dcbf3bd6SGeorge Wilson &zp, dbuf_write_ready, children_ready_cb, 416269962b56SMatthew Ahrens dbuf_write_physdone, dbuf_write_done, db, 416369962b56SMatthew Ahrens ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 4164b24ab676SJeff Bonwick } 4165fa9e4066Sahrens } 4166