1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5f65e61c0Sahrens * Common Development and Distribution License (the "License"). 6f65e61c0Sahrens * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2206e0070dSMark Shellenbaum * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 233f2366c2SGordon Ross * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24*fa98e487SMatthew Ahrens * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25aad02571SSaso Kiselkov * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26810e43b2SBill Pijewski * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27bc9014e6SJustin Gibbs * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 28c3d26abcSMatthew Ahrens * Copyright (c) 2014 Integros [integros.com] 29fa9e4066Sahrens */ 30fa9e4066Sahrens 31fa9e4066Sahrens #include <sys/zfs_context.h> 32fa9e4066Sahrens #include <sys/dmu.h> 332f3d8780SMatthew Ahrens #include <sys/dmu_send.h> 34fa9e4066Sahrens #include <sys/dmu_impl.h> 35fa9e4066Sahrens #include <sys/dbuf.h> 36fa9e4066Sahrens #include <sys/dmu_objset.h> 37fa9e4066Sahrens #include <sys/dsl_dataset.h> 38fa9e4066Sahrens #include <sys/dsl_dir.h> 39fa9e4066Sahrens #include <sys/dmu_tx.h> 40fa9e4066Sahrens #include <sys/spa.h> 41fa9e4066Sahrens #include <sys/zio.h> 42fa9e4066Sahrens #include <sys/dmu_zfetch.h> 430a586ceaSMark Shellenbaum #include <sys/sa.h> 440a586ceaSMark Shellenbaum #include <sys/sa_impl.h> 455d7b4d43SMatthew Ahrens #include <sys/zfeature.h> 465d7b4d43SMatthew Ahrens #include <sys/blkptr.h> 47bf16b11eSMatthew Ahrens #include <sys/range_tree.h> 48dcbf3bd6SGeorge Wilson #include <sys/callb.h> 49770499e1SDan Kimmel #include <sys/abd.h> 505cabbc6bSPrashanth Sreenivasa #include <sys/vdev.h> 513a2d8a1bSPaul Dagnelie #include <sys/cityhash.h> 52adb52d92SMatthew Ahrens #include <sys/spa_impl.h> 53dcbf3bd6SGeorge Wilson 54dcbf3bd6SGeorge Wilson uint_t zfs_dbuf_evict_key; 55fa9e4066Sahrens 563b2aab18SMatthew Ahrens static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 57088f3894Sahrens static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 58fa9e4066Sahrens 59bc9014e6SJustin Gibbs #ifndef __lint 60bc9014e6SJustin Gibbs extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu, 6140510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_sync, 6240510e8eSJosef 'Jeff' Sipek dmu_buf_evict_func_t *evict_func_async, 6340510e8eSJosef 'Jeff' Sipek dmu_buf_t **clear_on_evict_dbufp); 64bc9014e6SJustin Gibbs #endif /* ! __lint */ 65bc9014e6SJustin Gibbs 66fa9e4066Sahrens /* 67fa9e4066Sahrens * Global data structures and functions for the dbuf cache. 68fa9e4066Sahrens */ 69dcbf3bd6SGeorge Wilson static kmem_cache_t *dbuf_kmem_cache; 70bc9014e6SJustin Gibbs static taskq_t *dbu_evict_taskq; 71fa9e4066Sahrens 72dcbf3bd6SGeorge Wilson static kthread_t *dbuf_cache_evict_thread; 73dcbf3bd6SGeorge Wilson static kmutex_t dbuf_evict_lock; 74dcbf3bd6SGeorge Wilson static kcondvar_t dbuf_evict_cv; 75dcbf3bd6SGeorge Wilson static boolean_t dbuf_evict_thread_exit; 76dcbf3bd6SGeorge Wilson 77dcbf3bd6SGeorge Wilson /* 78adb52d92SMatthew Ahrens * There are two dbuf caches; each dbuf can only be in one of them at a time. 79adb52d92SMatthew Ahrens * 80adb52d92SMatthew Ahrens * 1. Cache of metadata dbufs, to help make read-heavy administrative commands 81adb52d92SMatthew Ahrens * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs 82adb52d92SMatthew Ahrens * that represent the metadata that describes filesystems/snapshots/ 83adb52d92SMatthew Ahrens * bookmarks/properties/etc. We only evict from this cache when we export a 84adb52d92SMatthew Ahrens * pool, to short-circuit as much I/O as possible for all administrative 85adb52d92SMatthew Ahrens * commands that need the metadata. There is no eviction policy for this 86adb52d92SMatthew Ahrens * cache, because we try to only include types in it which would occupy a 87adb52d92SMatthew Ahrens * very small amount of space per object but create a large impact on the 88adb52d92SMatthew Ahrens * performance of these commands. Instead, after it reaches a maximum size 89adb52d92SMatthew Ahrens * (which should only happen on very small memory systems with a very large 90adb52d92SMatthew Ahrens * number of filesystem objects), we stop taking new dbufs into the 91adb52d92SMatthew Ahrens * metadata cache, instead putting them in the normal dbuf cache. 92adb52d92SMatthew Ahrens * 93adb52d92SMatthew Ahrens * 2. LRU cache of dbufs. The "dbuf cache" maintains a list of dbufs that 94dcbf3bd6SGeorge Wilson * are not currently held but have been recently released. These dbufs 95dcbf3bd6SGeorge Wilson * are not eligible for arc eviction until they are aged out of the cache. 96dcbf3bd6SGeorge Wilson * Dbufs that are aged out of the cache will be immediately destroyed and 97dcbf3bd6SGeorge Wilson * become eligible for arc eviction. 98adb52d92SMatthew Ahrens * 99adb52d92SMatthew Ahrens * Dbufs are added to these caches once the last hold is released. If a dbuf is 100adb52d92SMatthew Ahrens * later accessed and still exists in the dbuf cache, then it will be removed 101adb52d92SMatthew Ahrens * from the cache and later re-added to the head of the cache. 102adb52d92SMatthew Ahrens * 103adb52d92SMatthew Ahrens * If a given dbuf meets the requirements for the metadata cache, it will go 104adb52d92SMatthew Ahrens * there, otherwise it will be considered for the generic LRU dbuf cache. The 105adb52d92SMatthew Ahrens * caches and the refcounts tracking their sizes are stored in an array indexed 106adb52d92SMatthew Ahrens * by those caches' matching enum values (from dbuf_cached_state_t). 107dcbf3bd6SGeorge Wilson */ 108adb52d92SMatthew Ahrens typedef struct dbuf_cache { 109adb52d92SMatthew Ahrens multilist_t *cache; 110adb52d92SMatthew Ahrens refcount_t size; 111adb52d92SMatthew Ahrens } dbuf_cache_t; 112adb52d92SMatthew Ahrens dbuf_cache_t dbuf_caches[DB_CACHE_MAX]; 113dcbf3bd6SGeorge Wilson 114adb52d92SMatthew Ahrens /* Size limits for the caches */ 115adb52d92SMatthew Ahrens uint64_t dbuf_cache_max_bytes = 0; 116adb52d92SMatthew Ahrens uint64_t dbuf_metadata_cache_max_bytes = 0; 117adb52d92SMatthew Ahrens /* Set the default sizes of the caches to log2 fraction of arc size */ 118268bbb2aSGeorge Wilson int dbuf_cache_shift = 5; 119adb52d92SMatthew Ahrens int dbuf_metadata_cache_shift = 6; 120dcbf3bd6SGeorge Wilson 121dcbf3bd6SGeorge Wilson /* 122adb52d92SMatthew Ahrens * For diagnostic purposes, this is incremented whenever we can't add 123adb52d92SMatthew Ahrens * something to the metadata cache because it's full, and instead put 124adb52d92SMatthew Ahrens * the data in the regular dbuf cache. 125adb52d92SMatthew Ahrens */ 126adb52d92SMatthew Ahrens uint64_t dbuf_metadata_cache_overflow; 127adb52d92SMatthew Ahrens 128adb52d92SMatthew Ahrens /* 129adb52d92SMatthew Ahrens * The LRU dbuf cache uses a three-stage eviction policy: 130dcbf3bd6SGeorge Wilson * - A low water marker designates when the dbuf eviction thread 131dcbf3bd6SGeorge Wilson * should stop evicting from the dbuf cache. 132dcbf3bd6SGeorge Wilson * - When we reach the maximum size (aka mid water mark), we 133dcbf3bd6SGeorge Wilson * signal the eviction thread to run. 134dcbf3bd6SGeorge Wilson * - The high water mark indicates when the eviction thread 135dcbf3bd6SGeorge Wilson * is unable to keep up with the incoming load and eviction must 136dcbf3bd6SGeorge Wilson * happen in the context of the calling thread. 137dcbf3bd6SGeorge Wilson * 138dcbf3bd6SGeorge Wilson * The dbuf cache: 139dcbf3bd6SGeorge Wilson * (max size) 140dcbf3bd6SGeorge Wilson * low water mid water hi water 141dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 142dcbf3bd6SGeorge Wilson * | | | | 143dcbf3bd6SGeorge Wilson * | | | | 144dcbf3bd6SGeorge Wilson * | | | | 145dcbf3bd6SGeorge Wilson * | | | | 146dcbf3bd6SGeorge Wilson * +----------------------------------------+----------+----------+ 147dcbf3bd6SGeorge Wilson * stop signal evict 148dcbf3bd6SGeorge Wilson * evicting eviction directly 149dcbf3bd6SGeorge Wilson * thread 150dcbf3bd6SGeorge Wilson * 151dcbf3bd6SGeorge Wilson * The high and low water marks indicate the operating range for the eviction 152dcbf3bd6SGeorge Wilson * thread. The low water mark is, by default, 90% of the total size of the 153dcbf3bd6SGeorge Wilson * cache and the high water mark is at 110% (both of these percentages can be 154dcbf3bd6SGeorge Wilson * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct, 155dcbf3bd6SGeorge Wilson * respectively). The eviction thread will try to ensure that the cache remains 156dcbf3bd6SGeorge Wilson * within this range by waking up every second and checking if the cache is 157dcbf3bd6SGeorge Wilson * above the low water mark. The thread can also be woken up by callers adding 158dcbf3bd6SGeorge Wilson * elements into the cache if the cache is larger than the mid water (i.e max 159dcbf3bd6SGeorge Wilson * cache size). Once the eviction thread is woken up and eviction is required, 160dcbf3bd6SGeorge Wilson * it will continue evicting buffers until it's able to reduce the cache size 161dcbf3bd6SGeorge Wilson * to the low water mark. If the cache size continues to grow and hits the high 162dcbf3bd6SGeorge Wilson * water mark, then callers adding elments to the cache will begin to evict 163dcbf3bd6SGeorge Wilson * directly from the cache until the cache is no longer above the high water 164dcbf3bd6SGeorge Wilson * mark. 165dcbf3bd6SGeorge Wilson */ 166dcbf3bd6SGeorge Wilson 167dcbf3bd6SGeorge Wilson /* 168dcbf3bd6SGeorge Wilson * The percentage above and below the maximum cache size. 169dcbf3bd6SGeorge Wilson */ 170dcbf3bd6SGeorge Wilson uint_t dbuf_cache_hiwater_pct = 10; 171dcbf3bd6SGeorge Wilson uint_t dbuf_cache_lowater_pct = 10; 172dcbf3bd6SGeorge Wilson 173fa9e4066Sahrens /* ARGSUSED */ 174fa9e4066Sahrens static int 175fa9e4066Sahrens dbuf_cons(void *vdb, void *unused, int kmflag) 176fa9e4066Sahrens { 177fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 178fa9e4066Sahrens bzero(db, sizeof (dmu_buf_impl_t)); 179fa9e4066Sahrens 180fa9e4066Sahrens mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 181fa9e4066Sahrens cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 182dcbf3bd6SGeorge Wilson multilist_link_init(&db->db_cache_link); 183fa9e4066Sahrens refcount_create(&db->db_holds); 1840f6d88adSAlex Reece 185fa9e4066Sahrens return (0); 186fa9e4066Sahrens } 187fa9e4066Sahrens 188fa9e4066Sahrens /* ARGSUSED */ 189fa9e4066Sahrens static void 190fa9e4066Sahrens dbuf_dest(void *vdb, void *unused) 191fa9e4066Sahrens { 192fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 193fa9e4066Sahrens mutex_destroy(&db->db_mtx); 194fa9e4066Sahrens cv_destroy(&db->db_changed); 195dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 196fa9e4066Sahrens refcount_destroy(&db->db_holds); 197fa9e4066Sahrens } 198fa9e4066Sahrens 199fa9e4066Sahrens /* 200fa9e4066Sahrens * dbuf hash table routines 201fa9e4066Sahrens */ 202fa9e4066Sahrens static dbuf_hash_table_t dbuf_hash_table; 203fa9e4066Sahrens 204fa9e4066Sahrens static uint64_t dbuf_hash_count; 205fa9e4066Sahrens 2063a2d8a1bSPaul Dagnelie /* 2073a2d8a1bSPaul Dagnelie * We use Cityhash for this. It's fast, and has good hash properties without 2083a2d8a1bSPaul Dagnelie * requiring any large static buffers. 2093a2d8a1bSPaul Dagnelie */ 210fa9e4066Sahrens static uint64_t 211fa9e4066Sahrens dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 212fa9e4066Sahrens { 2133a2d8a1bSPaul Dagnelie return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid)); 214fa9e4066Sahrens } 215fa9e4066Sahrens 216fa9e4066Sahrens #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 217fa9e4066Sahrens ((dbuf)->db.db_object == (obj) && \ 218fa9e4066Sahrens (dbuf)->db_objset == (os) && \ 219fa9e4066Sahrens (dbuf)->db_level == (level) && \ 220fa9e4066Sahrens (dbuf)->db_blkid == (blkid)) 221fa9e4066Sahrens 222fa9e4066Sahrens dmu_buf_impl_t * 223e57a022bSJustin T. Gibbs dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid) 224fa9e4066Sahrens { 225fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 226dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 227fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 228fa9e4066Sahrens dmu_buf_impl_t *db; 229fa9e4066Sahrens 230fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 231fa9e4066Sahrens for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 232fa9e4066Sahrens if (DBUF_EQUAL(db, os, obj, level, blkid)) { 233fa9e4066Sahrens mutex_enter(&db->db_mtx); 234ea8dc4b6Seschrock if (db->db_state != DB_EVICTING) { 235fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 236fa9e4066Sahrens return (db); 237fa9e4066Sahrens } 238fa9e4066Sahrens mutex_exit(&db->db_mtx); 239fa9e4066Sahrens } 240fa9e4066Sahrens } 241fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 242fa9e4066Sahrens return (NULL); 243fa9e4066Sahrens } 244fa9e4066Sahrens 245e57a022bSJustin T. Gibbs static dmu_buf_impl_t * 246e57a022bSJustin T. Gibbs dbuf_find_bonus(objset_t *os, uint64_t object) 247e57a022bSJustin T. Gibbs { 248e57a022bSJustin T. Gibbs dnode_t *dn; 249e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = NULL; 250e57a022bSJustin T. Gibbs 251e57a022bSJustin T. Gibbs if (dnode_hold(os, object, FTAG, &dn) == 0) { 252e57a022bSJustin T. Gibbs rw_enter(&dn->dn_struct_rwlock, RW_READER); 253e57a022bSJustin T. Gibbs if (dn->dn_bonus != NULL) { 254e57a022bSJustin T. Gibbs db = dn->dn_bonus; 255e57a022bSJustin T. Gibbs mutex_enter(&db->db_mtx); 256e57a022bSJustin T. Gibbs } 257e57a022bSJustin T. Gibbs rw_exit(&dn->dn_struct_rwlock); 258e57a022bSJustin T. Gibbs dnode_rele(dn, FTAG); 259e57a022bSJustin T. Gibbs } 260e57a022bSJustin T. Gibbs return (db); 261e57a022bSJustin T. Gibbs } 262e57a022bSJustin T. Gibbs 263fa9e4066Sahrens /* 264fa9e4066Sahrens * Insert an entry into the hash table. If there is already an element 265fa9e4066Sahrens * equal to elem in the hash table, then the already existing element 266fa9e4066Sahrens * will be returned and the new element will not be inserted. 267fa9e4066Sahrens * Otherwise returns NULL. 268fa9e4066Sahrens */ 269fa9e4066Sahrens static dmu_buf_impl_t * 270fa9e4066Sahrens dbuf_hash_insert(dmu_buf_impl_t *db) 271fa9e4066Sahrens { 272fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 273503ad85cSMatthew Ahrens objset_t *os = db->db_objset; 274fa9e4066Sahrens uint64_t obj = db->db.db_object; 275fa9e4066Sahrens int level = db->db_level; 276fa9e4066Sahrens uint64_t blkid = db->db_blkid; 277dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(os, obj, level, blkid); 278fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 279fa9e4066Sahrens dmu_buf_impl_t *dbf; 280fa9e4066Sahrens 281fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 282fa9e4066Sahrens for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 283fa9e4066Sahrens if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 284fa9e4066Sahrens mutex_enter(&dbf->db_mtx); 285ea8dc4b6Seschrock if (dbf->db_state != DB_EVICTING) { 286fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 287fa9e4066Sahrens return (dbf); 288fa9e4066Sahrens } 289fa9e4066Sahrens mutex_exit(&dbf->db_mtx); 290fa9e4066Sahrens } 291fa9e4066Sahrens } 292fa9e4066Sahrens 293fa9e4066Sahrens mutex_enter(&db->db_mtx); 294fa9e4066Sahrens db->db_hash_next = h->hash_table[idx]; 295fa9e4066Sahrens h->hash_table[idx] = db; 296fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 2971a5e258fSJosef 'Jeff' Sipek atomic_inc_64(&dbuf_hash_count); 298fa9e4066Sahrens 299fa9e4066Sahrens return (NULL); 300fa9e4066Sahrens } 301fa9e4066Sahrens 302fa9e4066Sahrens /* 303bbfa8ea8SMatthew Ahrens * Remove an entry from the hash table. It must be in the EVICTING state. 304fa9e4066Sahrens */ 305fa9e4066Sahrens static void 306fa9e4066Sahrens dbuf_hash_remove(dmu_buf_impl_t *db) 307fa9e4066Sahrens { 308fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 309dcbf3bd6SGeorge Wilson uint64_t hv = dbuf_hash(db->db_objset, db->db.db_object, 310fa9e4066Sahrens db->db_level, db->db_blkid); 311fa9e4066Sahrens uint64_t idx = hv & h->hash_table_mask; 312fa9e4066Sahrens dmu_buf_impl_t *dbf, **dbp; 313fa9e4066Sahrens 314fa9e4066Sahrens /* 315bbfa8ea8SMatthew Ahrens * We musn't hold db_mtx to maintain lock ordering: 316fa9e4066Sahrens * DBUF_HASH_MUTEX > db_mtx. 317fa9e4066Sahrens */ 318fa9e4066Sahrens ASSERT(refcount_is_zero(&db->db_holds)); 319ea8dc4b6Seschrock ASSERT(db->db_state == DB_EVICTING); 320fa9e4066Sahrens ASSERT(!MUTEX_HELD(&db->db_mtx)); 321fa9e4066Sahrens 322fa9e4066Sahrens mutex_enter(DBUF_HASH_MUTEX(h, idx)); 323fa9e4066Sahrens dbp = &h->hash_table[idx]; 324fa9e4066Sahrens while ((dbf = *dbp) != db) { 325fa9e4066Sahrens dbp = &dbf->db_hash_next; 326fa9e4066Sahrens ASSERT(dbf != NULL); 327fa9e4066Sahrens } 328fa9e4066Sahrens *dbp = db->db_hash_next; 329fa9e4066Sahrens db->db_hash_next = NULL; 330fa9e4066Sahrens mutex_exit(DBUF_HASH_MUTEX(h, idx)); 3311a5e258fSJosef 'Jeff' Sipek atomic_dec_64(&dbuf_hash_count); 332fa9e4066Sahrens } 333fa9e4066Sahrens 334bc9014e6SJustin Gibbs typedef enum { 335bc9014e6SJustin Gibbs DBVU_EVICTING, 336bc9014e6SJustin Gibbs DBVU_NOT_EVICTING 337bc9014e6SJustin Gibbs } dbvu_verify_type_t; 338bc9014e6SJustin Gibbs 339bc9014e6SJustin Gibbs static void 340bc9014e6SJustin Gibbs dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type) 341bc9014e6SJustin Gibbs { 342bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 343bc9014e6SJustin Gibbs int64_t holds; 344bc9014e6SJustin Gibbs 345bc9014e6SJustin Gibbs if (db->db_user == NULL) 346bc9014e6SJustin Gibbs return; 347bc9014e6SJustin Gibbs 348bc9014e6SJustin Gibbs /* Only data blocks support the attachment of user data. */ 349bc9014e6SJustin Gibbs ASSERT(db->db_level == 0); 350bc9014e6SJustin Gibbs 351bc9014e6SJustin Gibbs /* Clients must resolve a dbuf before attaching user data. */ 352bc9014e6SJustin Gibbs ASSERT(db->db.db_data != NULL); 353bc9014e6SJustin Gibbs ASSERT3U(db->db_state, ==, DB_CACHED); 354bc9014e6SJustin Gibbs 355bc9014e6SJustin Gibbs holds = refcount_count(&db->db_holds); 356bc9014e6SJustin Gibbs if (verify_type == DBVU_EVICTING) { 357bc9014e6SJustin Gibbs /* 358bc9014e6SJustin Gibbs * Immediate eviction occurs when holds == dirtycnt. 359bc9014e6SJustin Gibbs * For normal eviction buffers, holds is zero on 360bc9014e6SJustin Gibbs * eviction, except when dbuf_fix_old_data() calls 361bc9014e6SJustin Gibbs * dbuf_clear_data(). However, the hold count can grow 362bc9014e6SJustin Gibbs * during eviction even though db_mtx is held (see 363bc9014e6SJustin Gibbs * dmu_bonus_hold() for an example), so we can only 364bc9014e6SJustin Gibbs * test the generic invariant that holds >= dirtycnt. 365bc9014e6SJustin Gibbs */ 366bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 367bc9014e6SJustin Gibbs } else { 368d2058105SJustin T. Gibbs if (db->db_user_immediate_evict == TRUE) 369bc9014e6SJustin Gibbs ASSERT3U(holds, >=, db->db_dirtycnt); 370bc9014e6SJustin Gibbs else 371bc9014e6SJustin Gibbs ASSERT3U(holds, >, 0); 372bc9014e6SJustin Gibbs } 373bc9014e6SJustin Gibbs #endif 374bc9014e6SJustin Gibbs } 375bc9014e6SJustin Gibbs 376fa9e4066Sahrens static void 377fa9e4066Sahrens dbuf_evict_user(dmu_buf_impl_t *db) 378fa9e4066Sahrens { 379bc9014e6SJustin Gibbs dmu_buf_user_t *dbu = db->db_user; 380bc9014e6SJustin Gibbs 381fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 382fa9e4066Sahrens 383bc9014e6SJustin Gibbs if (dbu == NULL) 384fa9e4066Sahrens return; 385fa9e4066Sahrens 386bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_EVICTING); 387bc9014e6SJustin Gibbs db->db_user = NULL; 388bc9014e6SJustin Gibbs 389bc9014e6SJustin Gibbs #ifdef ZFS_DEBUG 390bc9014e6SJustin Gibbs if (dbu->dbu_clear_on_evict_dbufp != NULL) 391bc9014e6SJustin Gibbs *dbu->dbu_clear_on_evict_dbufp = NULL; 392bc9014e6SJustin Gibbs #endif 393bc9014e6SJustin Gibbs 394bc9014e6SJustin Gibbs /* 39540510e8eSJosef 'Jeff' Sipek * There are two eviction callbacks - one that we call synchronously 39640510e8eSJosef 'Jeff' Sipek * and one that we invoke via a taskq. The async one is useful for 39740510e8eSJosef 'Jeff' Sipek * avoiding lock order reversals and limiting stack depth. 39840510e8eSJosef 'Jeff' Sipek * 39940510e8eSJosef 'Jeff' Sipek * Note that if we have a sync callback but no async callback, 40040510e8eSJosef 'Jeff' Sipek * it's likely that the sync callback will free the structure 40140510e8eSJosef 'Jeff' Sipek * containing the dbu. In that case we need to take care to not 40240510e8eSJosef 'Jeff' Sipek * dereference dbu after calling the sync evict func. 403bc9014e6SJustin Gibbs */ 40440510e8eSJosef 'Jeff' Sipek boolean_t has_async = (dbu->dbu_evict_func_async != NULL); 40540510e8eSJosef 'Jeff' Sipek 40640510e8eSJosef 'Jeff' Sipek if (dbu->dbu_evict_func_sync != NULL) 40740510e8eSJosef 'Jeff' Sipek dbu->dbu_evict_func_sync(dbu); 40840510e8eSJosef 'Jeff' Sipek 40940510e8eSJosef 'Jeff' Sipek if (has_async) { 41040510e8eSJosef 'Jeff' Sipek taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async, 41140510e8eSJosef 'Jeff' Sipek dbu, 0, &dbu->dbu_tqent); 41240510e8eSJosef 'Jeff' Sipek } 413fa9e4066Sahrens } 414fa9e4066Sahrens 415744947dcSTom Erickson boolean_t 416744947dcSTom Erickson dbuf_is_metadata(dmu_buf_impl_t *db) 417744947dcSTom Erickson { 418744947dcSTom Erickson if (db->db_level > 0) { 419744947dcSTom Erickson return (B_TRUE); 420744947dcSTom Erickson } else { 421744947dcSTom Erickson boolean_t is_metadata; 422744947dcSTom Erickson 423744947dcSTom Erickson DB_DNODE_ENTER(db); 424ad135b5dSChristopher Siden is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 425744947dcSTom Erickson DB_DNODE_EXIT(db); 426744947dcSTom Erickson 427744947dcSTom Erickson return (is_metadata); 428744947dcSTom Erickson } 429744947dcSTom Erickson } 430744947dcSTom Erickson 431dcbf3bd6SGeorge Wilson /* 432adb52d92SMatthew Ahrens * This returns whether this dbuf should be stored in the metadata cache, which 433adb52d92SMatthew Ahrens * is based on whether it's from one of the dnode types that store data related 434adb52d92SMatthew Ahrens * to traversing dataset hierarchies. 435adb52d92SMatthew Ahrens */ 436adb52d92SMatthew Ahrens static boolean_t 437adb52d92SMatthew Ahrens dbuf_include_in_metadata_cache(dmu_buf_impl_t *db) 438adb52d92SMatthew Ahrens { 439adb52d92SMatthew Ahrens DB_DNODE_ENTER(db); 440adb52d92SMatthew Ahrens dmu_object_type_t type = DB_DNODE(db)->dn_type; 441adb52d92SMatthew Ahrens DB_DNODE_EXIT(db); 442adb52d92SMatthew Ahrens 443adb52d92SMatthew Ahrens /* Check if this dbuf is one of the types we care about */ 444adb52d92SMatthew Ahrens if (DMU_OT_IS_METADATA_CACHED(type)) { 445adb52d92SMatthew Ahrens /* If we hit this, then we set something up wrong in dmu_ot */ 446adb52d92SMatthew Ahrens ASSERT(DMU_OT_IS_METADATA(type)); 447adb52d92SMatthew Ahrens 448adb52d92SMatthew Ahrens /* 449adb52d92SMatthew Ahrens * Sanity check for small-memory systems: don't allocate too 450adb52d92SMatthew Ahrens * much memory for this purpose. 451adb52d92SMatthew Ahrens */ 452adb52d92SMatthew Ahrens if (refcount_count(&dbuf_caches[DB_DBUF_METADATA_CACHE].size) > 453adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes) { 454adb52d92SMatthew Ahrens dbuf_metadata_cache_overflow++; 455adb52d92SMatthew Ahrens DTRACE_PROBE1(dbuf__metadata__cache__overflow, 456adb52d92SMatthew Ahrens dmu_buf_impl_t *, db); 457adb52d92SMatthew Ahrens return (B_FALSE); 458adb52d92SMatthew Ahrens } 459adb52d92SMatthew Ahrens 460adb52d92SMatthew Ahrens return (B_TRUE); 461adb52d92SMatthew Ahrens } 462adb52d92SMatthew Ahrens 463adb52d92SMatthew Ahrens return (B_FALSE); 464adb52d92SMatthew Ahrens } 465adb52d92SMatthew Ahrens 466adb52d92SMatthew Ahrens /* 467dcbf3bd6SGeorge Wilson * This function *must* return indices evenly distributed between all 468dcbf3bd6SGeorge Wilson * sublists of the multilist. This is needed due to how the dbuf eviction 469dcbf3bd6SGeorge Wilson * code is laid out; dbuf_evict_thread() assumes dbufs are evenly 470dcbf3bd6SGeorge Wilson * distributed between all sublists and uses this assumption when 471dcbf3bd6SGeorge Wilson * deciding which sublist to evict from and how much to evict from it. 472dcbf3bd6SGeorge Wilson */ 473dcbf3bd6SGeorge Wilson unsigned int 474dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func(multilist_t *ml, void *obj) 475ea8dc4b6Seschrock { 476dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = obj; 477ea8dc4b6Seschrock 478dcbf3bd6SGeorge Wilson /* 479dcbf3bd6SGeorge Wilson * The assumption here, is the hash value for a given 480dcbf3bd6SGeorge Wilson * dmu_buf_impl_t will remain constant throughout it's lifetime 481dcbf3bd6SGeorge Wilson * (i.e. it's objset, object, level and blkid fields don't change). 482dcbf3bd6SGeorge Wilson * Thus, we don't need to store the dbuf's sublist index 483dcbf3bd6SGeorge Wilson * on insertion, as this index can be recalculated on removal. 484dcbf3bd6SGeorge Wilson * 485dcbf3bd6SGeorge Wilson * Also, the low order bits of the hash value are thought to be 486dcbf3bd6SGeorge Wilson * distributed evenly. Otherwise, in the case that the multilist 487dcbf3bd6SGeorge Wilson * has a power of two number of sublists, each sublists' usage 488dcbf3bd6SGeorge Wilson * would not be evenly distributed. 489dcbf3bd6SGeorge Wilson */ 490dcbf3bd6SGeorge Wilson return (dbuf_hash(db->db_objset, db->db.db_object, 491dcbf3bd6SGeorge Wilson db->db_level, db->db_blkid) % 492dcbf3bd6SGeorge Wilson multilist_get_num_sublists(ml)); 493dcbf3bd6SGeorge Wilson } 494dcbf3bd6SGeorge Wilson 495dcbf3bd6SGeorge Wilson static inline boolean_t 496dcbf3bd6SGeorge Wilson dbuf_cache_above_hiwater(void) 497dcbf3bd6SGeorge Wilson { 498dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_hiwater_bytes = 499dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100; 500dcbf3bd6SGeorge Wilson 501adb52d92SMatthew Ahrens return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 502dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes); 503dcbf3bd6SGeorge Wilson } 504dcbf3bd6SGeorge Wilson 505dcbf3bd6SGeorge Wilson static inline boolean_t 506dcbf3bd6SGeorge Wilson dbuf_cache_above_lowater(void) 507dcbf3bd6SGeorge Wilson { 508dcbf3bd6SGeorge Wilson uint64_t dbuf_cache_lowater_bytes = 509dcbf3bd6SGeorge Wilson (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100; 510dcbf3bd6SGeorge Wilson 511adb52d92SMatthew Ahrens return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 512dcbf3bd6SGeorge Wilson dbuf_cache_max_bytes - dbuf_cache_lowater_bytes); 513dcbf3bd6SGeorge Wilson } 514dcbf3bd6SGeorge Wilson 515dcbf3bd6SGeorge Wilson /* 516dcbf3bd6SGeorge Wilson * Evict the oldest eligible dbuf from the dbuf cache. 517dcbf3bd6SGeorge Wilson */ 518dcbf3bd6SGeorge Wilson static void 519dcbf3bd6SGeorge Wilson dbuf_evict_one(void) 520dcbf3bd6SGeorge Wilson { 521adb52d92SMatthew Ahrens int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache); 522adb52d92SMatthew Ahrens multilist_sublist_t *mls = multilist_sublist_lock( 523adb52d92SMatthew Ahrens dbuf_caches[DB_DBUF_CACHE].cache, idx); 524dcbf3bd6SGeorge Wilson 525dcbf3bd6SGeorge Wilson ASSERT(!MUTEX_HELD(&dbuf_evict_lock)); 526dcbf3bd6SGeorge Wilson 527dcbf3bd6SGeorge Wilson /* 528dcbf3bd6SGeorge Wilson * Set the thread's tsd to indicate that it's processing evictions. 529dcbf3bd6SGeorge Wilson * Once a thread stops evicting from the dbuf cache it will 530dcbf3bd6SGeorge Wilson * reset its tsd to NULL. 531dcbf3bd6SGeorge Wilson */ 532dcbf3bd6SGeorge Wilson ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL); 533dcbf3bd6SGeorge Wilson (void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE); 534dcbf3bd6SGeorge Wilson 535dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = multilist_sublist_tail(mls); 536dcbf3bd6SGeorge Wilson while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) { 537dcbf3bd6SGeorge Wilson db = multilist_sublist_prev(mls, db); 538dcbf3bd6SGeorge Wilson } 539dcbf3bd6SGeorge Wilson 540dcbf3bd6SGeorge Wilson DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db, 541dcbf3bd6SGeorge Wilson multilist_sublist_t *, mls); 542dcbf3bd6SGeorge Wilson 543dcbf3bd6SGeorge Wilson if (db != NULL) { 544dcbf3bd6SGeorge Wilson multilist_sublist_remove(mls, db); 545dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 546adb52d92SMatthew Ahrens (void) refcount_remove_many(&dbuf_caches[DB_DBUF_CACHE].size, 547dcbf3bd6SGeorge Wilson db->db.db_size, db); 548adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE); 549adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 550ea8dc4b6Seschrock dbuf_destroy(db); 551dcbf3bd6SGeorge Wilson } else { 552dcbf3bd6SGeorge Wilson multilist_sublist_unlock(mls); 553dcbf3bd6SGeorge Wilson } 554dcbf3bd6SGeorge Wilson (void) tsd_set(zfs_dbuf_evict_key, NULL); 555dcbf3bd6SGeorge Wilson } 556dcbf3bd6SGeorge Wilson 557dcbf3bd6SGeorge Wilson /* 558dcbf3bd6SGeorge Wilson * The dbuf evict thread is responsible for aging out dbufs from the 559dcbf3bd6SGeorge Wilson * cache. Once the cache has reached it's maximum size, dbufs are removed 560dcbf3bd6SGeorge Wilson * and destroyed. The eviction thread will continue running until the size 561dcbf3bd6SGeorge Wilson * of the dbuf cache is at or below the maximum size. Once the dbuf is aged 562dcbf3bd6SGeorge Wilson * out of the cache it is destroyed and becomes eligible for arc eviction. 563dcbf3bd6SGeorge Wilson */ 5643f7978d0SAlan Somers /* ARGSUSED */ 565dcbf3bd6SGeorge Wilson static void 5663f7978d0SAlan Somers dbuf_evict_thread(void *unused) 567dcbf3bd6SGeorge Wilson { 568dcbf3bd6SGeorge Wilson callb_cpr_t cpr; 569dcbf3bd6SGeorge Wilson 570dcbf3bd6SGeorge Wilson CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG); 571dcbf3bd6SGeorge Wilson 572dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 573dcbf3bd6SGeorge Wilson while (!dbuf_evict_thread_exit) { 574dcbf3bd6SGeorge Wilson while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 575dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_BEGIN(&cpr); 576dcbf3bd6SGeorge Wilson (void) cv_timedwait_hires(&dbuf_evict_cv, 577dcbf3bd6SGeorge Wilson &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); 578dcbf3bd6SGeorge Wilson CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock); 579dcbf3bd6SGeorge Wilson } 580dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 581dcbf3bd6SGeorge Wilson 582dcbf3bd6SGeorge Wilson /* 583dcbf3bd6SGeorge Wilson * Keep evicting as long as we're above the low water mark 584dcbf3bd6SGeorge Wilson * for the cache. We do this without holding the locks to 585dcbf3bd6SGeorge Wilson * minimize lock contention. 586dcbf3bd6SGeorge Wilson */ 587dcbf3bd6SGeorge Wilson while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) { 588dcbf3bd6SGeorge Wilson dbuf_evict_one(); 589dcbf3bd6SGeorge Wilson } 590dcbf3bd6SGeorge Wilson 591dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 592dcbf3bd6SGeorge Wilson } 593dcbf3bd6SGeorge Wilson 594dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 595dcbf3bd6SGeorge Wilson cv_broadcast(&dbuf_evict_cv); 596dcbf3bd6SGeorge Wilson CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */ 597dcbf3bd6SGeorge Wilson thread_exit(); 598dcbf3bd6SGeorge Wilson } 599dcbf3bd6SGeorge Wilson 600dcbf3bd6SGeorge Wilson /* 601dcbf3bd6SGeorge Wilson * Wake up the dbuf eviction thread if the dbuf cache is at its max size. 602dcbf3bd6SGeorge Wilson * If the dbuf cache is at its high water mark, then evict a dbuf from the 603dcbf3bd6SGeorge Wilson * dbuf cache using the callers context. 604dcbf3bd6SGeorge Wilson */ 605dcbf3bd6SGeorge Wilson static void 606dcbf3bd6SGeorge Wilson dbuf_evict_notify(void) 607dcbf3bd6SGeorge Wilson { 608dcbf3bd6SGeorge Wilson 609dcbf3bd6SGeorge Wilson /* 610dcbf3bd6SGeorge Wilson * We use thread specific data to track when a thread has 611dcbf3bd6SGeorge Wilson * started processing evictions. This allows us to avoid deeply 612dcbf3bd6SGeorge Wilson * nested stacks that would have a call flow similar to this: 613dcbf3bd6SGeorge Wilson * 614dcbf3bd6SGeorge Wilson * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify() 615dcbf3bd6SGeorge Wilson * ^ | 616dcbf3bd6SGeorge Wilson * | | 617dcbf3bd6SGeorge Wilson * +-----dbuf_destroy()<--dbuf_evict_one()<--------+ 618dcbf3bd6SGeorge Wilson * 619dcbf3bd6SGeorge Wilson * The dbuf_eviction_thread will always have its tsd set until 620dcbf3bd6SGeorge Wilson * that thread exits. All other threads will only set their tsd 621dcbf3bd6SGeorge Wilson * if they are participating in the eviction process. This only 622dcbf3bd6SGeorge Wilson * happens if the eviction thread is unable to process evictions 623dcbf3bd6SGeorge Wilson * fast enough. To keep the dbuf cache size in check, other threads 624dcbf3bd6SGeorge Wilson * can evict from the dbuf cache directly. Those threads will set 625dcbf3bd6SGeorge Wilson * their tsd values so that we ensure that they only evict one dbuf 626dcbf3bd6SGeorge Wilson * from the dbuf cache. 627dcbf3bd6SGeorge Wilson */ 628dcbf3bd6SGeorge Wilson if (tsd_get(zfs_dbuf_evict_key) != NULL) 629dcbf3bd6SGeorge Wilson return; 630dcbf3bd6SGeorge Wilson 631dbfd9f93SMatthew Ahrens /* 632dbfd9f93SMatthew Ahrens * We check if we should evict without holding the dbuf_evict_lock, 633dbfd9f93SMatthew Ahrens * because it's OK to occasionally make the wrong decision here, 634dbfd9f93SMatthew Ahrens * and grabbing the lock results in massive lock contention. 635dbfd9f93SMatthew Ahrens */ 636adb52d92SMatthew Ahrens if (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) > 637adb52d92SMatthew Ahrens dbuf_cache_max_bytes) { 638dbfd9f93SMatthew Ahrens if (dbuf_cache_above_hiwater()) 639dcbf3bd6SGeorge Wilson dbuf_evict_one(); 640dbfd9f93SMatthew Ahrens cv_signal(&dbuf_evict_cv); 641dcbf3bd6SGeorge Wilson } 642ea8dc4b6Seschrock } 643ea8dc4b6Seschrock 644ea8dc4b6Seschrock void 645fa9e4066Sahrens dbuf_init(void) 646fa9e4066Sahrens { 647ea8dc4b6Seschrock uint64_t hsize = 1ULL << 16; 648fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 649fa9e4066Sahrens int i; 650fa9e4066Sahrens 651fa9e4066Sahrens /* 652fa9e4066Sahrens * The hash table is big enough to fill all of physical memory 653ea8dc4b6Seschrock * with an average 4K block size. The table will take up 654ea8dc4b6Seschrock * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 655fa9e4066Sahrens */ 656ea8dc4b6Seschrock while (hsize * 4096 < physmem * PAGESIZE) 657fa9e4066Sahrens hsize <<= 1; 658fa9e4066Sahrens 659ea8dc4b6Seschrock retry: 660fa9e4066Sahrens h->hash_table_mask = hsize - 1; 661ea8dc4b6Seschrock h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 662ea8dc4b6Seschrock if (h->hash_table == NULL) { 663ea8dc4b6Seschrock /* XXX - we should really return an error instead of assert */ 664ea8dc4b6Seschrock ASSERT(hsize > (1ULL << 10)); 665ea8dc4b6Seschrock hsize >>= 1; 666ea8dc4b6Seschrock goto retry; 667ea8dc4b6Seschrock } 668fa9e4066Sahrens 669dcbf3bd6SGeorge Wilson dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t", 670fa9e4066Sahrens sizeof (dmu_buf_impl_t), 671fa9e4066Sahrens 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 672fa9e4066Sahrens 673fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 674fa9e4066Sahrens mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 675bc9014e6SJustin Gibbs 676bc9014e6SJustin Gibbs /* 677adb52d92SMatthew Ahrens * Setup the parameters for the dbuf caches. We set the sizes of the 678adb52d92SMatthew Ahrens * dbuf cache and the metadata cache to 1/32nd and 1/16th (default) 679adb52d92SMatthew Ahrens * of the size of the ARC, respectively. If the values are set in 680adb52d92SMatthew Ahrens * /etc/system and they're not greater than the size of the ARC, then 681adb52d92SMatthew Ahrens * we honor that value. 682dcbf3bd6SGeorge Wilson */ 683268bbb2aSGeorge Wilson if (dbuf_cache_max_bytes == 0 || 684268bbb2aSGeorge Wilson dbuf_cache_max_bytes >= arc_max_bytes()) { 685268bbb2aSGeorge Wilson dbuf_cache_max_bytes = arc_max_bytes() >> dbuf_cache_shift; 686268bbb2aSGeorge Wilson } 687adb52d92SMatthew Ahrens if (dbuf_metadata_cache_max_bytes == 0 || 688adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes >= arc_max_bytes()) { 689adb52d92SMatthew Ahrens dbuf_metadata_cache_max_bytes = 690adb52d92SMatthew Ahrens arc_max_bytes() >> dbuf_metadata_cache_shift; 691adb52d92SMatthew Ahrens } 692dcbf3bd6SGeorge Wilson 693dcbf3bd6SGeorge Wilson /* 694bc9014e6SJustin Gibbs * All entries are queued via taskq_dispatch_ent(), so min/maxalloc 695bc9014e6SJustin Gibbs * configuration is not required. 696bc9014e6SJustin Gibbs */ 697bc9014e6SJustin Gibbs dbu_evict_taskq = taskq_create("dbu_evict", 1, minclsyspri, 0, 0, 0); 698dcbf3bd6SGeorge Wilson 699adb52d92SMatthew Ahrens for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 700adb52d92SMatthew Ahrens dbuf_caches[dcs].cache = 701adb52d92SMatthew Ahrens multilist_create(sizeof (dmu_buf_impl_t), 702dcbf3bd6SGeorge Wilson offsetof(dmu_buf_impl_t, db_cache_link), 703dcbf3bd6SGeorge Wilson dbuf_cache_multilist_index_func); 704adb52d92SMatthew Ahrens refcount_create(&dbuf_caches[dcs].size); 705adb52d92SMatthew Ahrens } 706dcbf3bd6SGeorge Wilson 707dcbf3bd6SGeorge Wilson tsd_create(&zfs_dbuf_evict_key, NULL); 708dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_FALSE; 709dcbf3bd6SGeorge Wilson mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL); 710dcbf3bd6SGeorge Wilson cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL); 711dcbf3bd6SGeorge Wilson dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread, 712dcbf3bd6SGeorge Wilson NULL, 0, &p0, TS_RUN, minclsyspri); 713fa9e4066Sahrens } 714fa9e4066Sahrens 715fa9e4066Sahrens void 716fa9e4066Sahrens dbuf_fini(void) 717fa9e4066Sahrens { 718fa9e4066Sahrens dbuf_hash_table_t *h = &dbuf_hash_table; 719fa9e4066Sahrens int i; 720fa9e4066Sahrens 721fa9e4066Sahrens for (i = 0; i < DBUF_MUTEXES; i++) 722fa9e4066Sahrens mutex_destroy(&h->hash_mutexes[i]); 723fa9e4066Sahrens kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 724dcbf3bd6SGeorge Wilson kmem_cache_destroy(dbuf_kmem_cache); 725bc9014e6SJustin Gibbs taskq_destroy(dbu_evict_taskq); 726dcbf3bd6SGeorge Wilson 727dcbf3bd6SGeorge Wilson mutex_enter(&dbuf_evict_lock); 728dcbf3bd6SGeorge Wilson dbuf_evict_thread_exit = B_TRUE; 729dcbf3bd6SGeorge Wilson while (dbuf_evict_thread_exit) { 730dcbf3bd6SGeorge Wilson cv_signal(&dbuf_evict_cv); 731dcbf3bd6SGeorge Wilson cv_wait(&dbuf_evict_cv, &dbuf_evict_lock); 732dcbf3bd6SGeorge Wilson } 733dcbf3bd6SGeorge Wilson mutex_exit(&dbuf_evict_lock); 734dcbf3bd6SGeorge Wilson tsd_destroy(&zfs_dbuf_evict_key); 735dcbf3bd6SGeorge Wilson 736dcbf3bd6SGeorge Wilson mutex_destroy(&dbuf_evict_lock); 737dcbf3bd6SGeorge Wilson cv_destroy(&dbuf_evict_cv); 738dcbf3bd6SGeorge Wilson 739adb52d92SMatthew Ahrens for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) { 740adb52d92SMatthew Ahrens refcount_destroy(&dbuf_caches[dcs].size); 741adb52d92SMatthew Ahrens multilist_destroy(dbuf_caches[dcs].cache); 742adb52d92SMatthew Ahrens } 743fa9e4066Sahrens } 744fa9e4066Sahrens 745fa9e4066Sahrens /* 746fa9e4066Sahrens * Other stuff. 747fa9e4066Sahrens */ 748fa9e4066Sahrens 7499c9dc39aSek110237 #ifdef ZFS_DEBUG 750fa9e4066Sahrens static void 751fa9e4066Sahrens dbuf_verify(dmu_buf_impl_t *db) 752fa9e4066Sahrens { 753744947dcSTom Erickson dnode_t *dn; 754b24ab676SJeff Bonwick dbuf_dirty_record_t *dr; 755fa9e4066Sahrens 756fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 757fa9e4066Sahrens 758fa9e4066Sahrens if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 759fa9e4066Sahrens return; 760fa9e4066Sahrens 761fa9e4066Sahrens ASSERT(db->db_objset != NULL); 762744947dcSTom Erickson DB_DNODE_ENTER(db); 763744947dcSTom Erickson dn = DB_DNODE(db); 764fa9e4066Sahrens if (dn == NULL) { 765fa9e4066Sahrens ASSERT(db->db_parent == NULL); 766fa9e4066Sahrens ASSERT(db->db_blkptr == NULL); 767fa9e4066Sahrens } else { 768fa9e4066Sahrens ASSERT3U(db->db.db_object, ==, dn->dn_object); 769fa9e4066Sahrens ASSERT3P(db->db_objset, ==, dn->dn_objset); 770fa9e4066Sahrens ASSERT3U(db->db_level, <, dn->dn_nlevels); 771744947dcSTom Erickson ASSERT(db->db_blkid == DMU_BONUS_BLKID || 772744947dcSTom Erickson db->db_blkid == DMU_SPILL_BLKID || 7730f6d88adSAlex Reece !avl_is_empty(&dn->dn_dbufs)); 774fa9e4066Sahrens } 7750a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 776fa9e4066Sahrens ASSERT(dn != NULL); 7771934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 7780a586ceaSMark Shellenbaum ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 7790a586ceaSMark Shellenbaum } else if (db->db_blkid == DMU_SPILL_BLKID) { 7800a586ceaSMark Shellenbaum ASSERT(dn != NULL); 7810a586ceaSMark Shellenbaum ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 782fb09f5aaSMadhav Suresh ASSERT0(db->db.db_offset); 783fa9e4066Sahrens } else { 784fa9e4066Sahrens ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 785fa9e4066Sahrens } 786fa9e4066Sahrens 787b24ab676SJeff Bonwick for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 788b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 789b24ab676SJeff Bonwick 790b24ab676SJeff Bonwick for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 791b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 792b24ab676SJeff Bonwick 79388b7b0f2SMatthew Ahrens /* 79488b7b0f2SMatthew Ahrens * We can't assert that db_size matches dn_datablksz because it 79588b7b0f2SMatthew Ahrens * can be momentarily different when another thread is doing 79688b7b0f2SMatthew Ahrens * dnode_set_blksz(). 79788b7b0f2SMatthew Ahrens */ 79888b7b0f2SMatthew Ahrens if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 799b24ab676SJeff Bonwick dr = db->db_data_pending; 800fa9e4066Sahrens /* 80188b7b0f2SMatthew Ahrens * It should only be modified in syncing context, so 80288b7b0f2SMatthew Ahrens * make sure we only have one copy of the data. 803fa9e4066Sahrens */ 804c717a561Smaybee ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 805fa9e4066Sahrens } 806fa9e4066Sahrens 807fa9e4066Sahrens /* verify db->db_blkptr */ 808fa9e4066Sahrens if (db->db_blkptr) { 809fa9e4066Sahrens if (db->db_parent == dn->dn_dbuf) { 810fa9e4066Sahrens /* db is pointed to by the dnode */ 811fa9e4066Sahrens /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 81214843421SMatthew Ahrens if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 813fa9e4066Sahrens ASSERT(db->db_parent == NULL); 814fa9e4066Sahrens else 815fa9e4066Sahrens ASSERT(db->db_parent != NULL); 8160a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 817fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 818fa9e4066Sahrens &dn->dn_phys->dn_blkptr[db->db_blkid]); 819fa9e4066Sahrens } else { 820fa9e4066Sahrens /* db is pointed to by an indirect block */ 821fa9e4066Sahrens int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 822fa9e4066Sahrens ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 823fa9e4066Sahrens ASSERT3U(db->db_parent->db.db_object, ==, 824fa9e4066Sahrens db->db.db_object); 825fa9e4066Sahrens /* 826fa9e4066Sahrens * dnode_grow_indblksz() can make this fail if we don't 827fa9e4066Sahrens * have the struct_rwlock. XXX indblksz no longer 828fa9e4066Sahrens * grows. safe to do this now? 829fa9e4066Sahrens */ 830744947dcSTom Erickson if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 831fa9e4066Sahrens ASSERT3P(db->db_blkptr, ==, 832fa9e4066Sahrens ((blkptr_t *)db->db_parent->db.db_data + 833fa9e4066Sahrens db->db_blkid % epb)); 834fa9e4066Sahrens } 835fa9e4066Sahrens } 836fa9e4066Sahrens } 837fa9e4066Sahrens if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 8383f9d6ad7SLin Ling (db->db_buf == NULL || db->db_buf->b_data) && 8390a586ceaSMark Shellenbaum db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 840fa9e4066Sahrens db->db_state != DB_FILL && !dn->dn_free_txg) { 841fa9e4066Sahrens /* 842fa9e4066Sahrens * If the blkptr isn't set but they have nonzero data, 843fa9e4066Sahrens * it had better be dirty, otherwise we'll lose that 844fa9e4066Sahrens * data when we evict this buffer. 8458df0bcf0SPaul Dagnelie * 8468df0bcf0SPaul Dagnelie * There is an exception to this rule for indirect blocks; in 8478df0bcf0SPaul Dagnelie * this case, if the indirect block is a hole, we fill in a few 8488df0bcf0SPaul Dagnelie * fields on each of the child blocks (importantly, birth time) 8498df0bcf0SPaul Dagnelie * to prevent hole birth times from being lost when you 8508df0bcf0SPaul Dagnelie * partially fill in a hole. 851fa9e4066Sahrens */ 852fa9e4066Sahrens if (db->db_dirtycnt == 0) { 8538df0bcf0SPaul Dagnelie if (db->db_level == 0) { 854fa9e4066Sahrens uint64_t *buf = db->db.db_data; 855fa9e4066Sahrens int i; 856fa9e4066Sahrens 857fa9e4066Sahrens for (i = 0; i < db->db.db_size >> 3; i++) { 858fa9e4066Sahrens ASSERT(buf[i] == 0); 859fa9e4066Sahrens } 8608df0bcf0SPaul Dagnelie } else { 8618df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 8628df0bcf0SPaul Dagnelie ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==, 8638df0bcf0SPaul Dagnelie db->db.db_size); 8648df0bcf0SPaul Dagnelie /* 8658df0bcf0SPaul Dagnelie * We want to verify that all the blkptrs in the 8668df0bcf0SPaul Dagnelie * indirect block are holes, but we may have 8678df0bcf0SPaul Dagnelie * automatically set up a few fields for them. 8688df0bcf0SPaul Dagnelie * We iterate through each blkptr and verify 8698df0bcf0SPaul Dagnelie * they only have those fields set. 8708df0bcf0SPaul Dagnelie */ 8718df0bcf0SPaul Dagnelie for (int i = 0; 8728df0bcf0SPaul Dagnelie i < db->db.db_size / sizeof (blkptr_t); 8738df0bcf0SPaul Dagnelie i++) { 8748df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 8758df0bcf0SPaul Dagnelie ASSERT(ZIO_CHECKSUM_IS_ZERO( 8768df0bcf0SPaul Dagnelie &bp->blk_cksum)); 8778df0bcf0SPaul Dagnelie ASSERT( 8788df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[0]) && 8798df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[1]) && 8808df0bcf0SPaul Dagnelie DVA_IS_EMPTY(&bp->blk_dva[2])); 8818df0bcf0SPaul Dagnelie ASSERT0(bp->blk_fill); 8828df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[0]); 8838df0bcf0SPaul Dagnelie ASSERT0(bp->blk_pad[1]); 8848df0bcf0SPaul Dagnelie ASSERT(!BP_IS_EMBEDDED(bp)); 8858df0bcf0SPaul Dagnelie ASSERT(BP_IS_HOLE(bp)); 8868df0bcf0SPaul Dagnelie ASSERT0(bp->blk_phys_birth); 8878df0bcf0SPaul Dagnelie } 8888df0bcf0SPaul Dagnelie } 889fa9e4066Sahrens } 890fa9e4066Sahrens } 891744947dcSTom Erickson DB_DNODE_EXIT(db); 892fa9e4066Sahrens } 8939c9dc39aSek110237 #endif 894fa9e4066Sahrens 895fa9e4066Sahrens static void 896bc9014e6SJustin Gibbs dbuf_clear_data(dmu_buf_impl_t *db) 897fa9e4066Sahrens { 898fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 899ea8dc4b6Seschrock dbuf_evict_user(db); 900dcbf3bd6SGeorge Wilson ASSERT3P(db->db_buf, ==, NULL); 901ea8dc4b6Seschrock db->db.db_data = NULL; 90282c9918fSTim Haley if (db->db_state != DB_NOFILL) 903ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 904ea8dc4b6Seschrock } 905bc9014e6SJustin Gibbs 906bc9014e6SJustin Gibbs static void 907bc9014e6SJustin Gibbs dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 908bc9014e6SJustin Gibbs { 909bc9014e6SJustin Gibbs ASSERT(MUTEX_HELD(&db->db_mtx)); 910bc9014e6SJustin Gibbs ASSERT(buf != NULL); 911bc9014e6SJustin Gibbs 912bc9014e6SJustin Gibbs db->db_buf = buf; 913bc9014e6SJustin Gibbs ASSERT(buf->b_data != NULL); 914bc9014e6SJustin Gibbs db->db.db_data = buf->b_data; 915fa9e4066Sahrens } 916fa9e4066Sahrens 917c242f9a0Schunli zhang - Sun Microsystems - Irvine United States /* 918c242f9a0Schunli zhang - Sun Microsystems - Irvine United States * Loan out an arc_buf for read. Return the loaned arc_buf. 919c242f9a0Schunli zhang - Sun Microsystems - Irvine United States */ 920c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t * 921c242f9a0Schunli zhang - Sun Microsystems - Irvine United States dbuf_loan_arcbuf(dmu_buf_impl_t *db) 922c242f9a0Schunli zhang - Sun Microsystems - Irvine United States { 923c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_buf_t *abuf; 924c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 925dcbf3bd6SGeorge Wilson ASSERT(db->db_blkid != DMU_BONUS_BLKID); 926c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_enter(&db->db_mtx); 927c242f9a0Schunli zhang - Sun Microsystems - Irvine United States if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 928c242f9a0Schunli zhang - Sun Microsystems - Irvine United States int blksz = db->db.db_size; 92943466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 930744947dcSTom Erickson 931c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 9325602294fSDan Kimmel abuf = arc_loan_buf(spa, B_FALSE, blksz); 933c242f9a0Schunli zhang - Sun Microsystems - Irvine United States bcopy(db->db.db_data, abuf->b_data, blksz); 934c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } else { 935c242f9a0Schunli zhang - Sun Microsystems - Irvine United States abuf = db->db_buf; 936c242f9a0Schunli zhang - Sun Microsystems - Irvine United States arc_loan_inuse_buf(abuf, db); 937dcbf3bd6SGeorge Wilson db->db_buf = NULL; 938bc9014e6SJustin Gibbs dbuf_clear_data(db); 939c242f9a0Schunli zhang - Sun Microsystems - Irvine United States mutex_exit(&db->db_mtx); 940c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 941c242f9a0Schunli zhang - Sun Microsystems - Irvine United States return (abuf); 942c242f9a0Schunli zhang - Sun Microsystems - Irvine United States } 943c242f9a0Schunli zhang - Sun Microsystems - Irvine United States 944a2cdcdd2SPaul Dagnelie /* 945a2cdcdd2SPaul Dagnelie * Calculate which level n block references the data at the level 0 offset 946a2cdcdd2SPaul Dagnelie * provided. 947a2cdcdd2SPaul Dagnelie */ 948fa9e4066Sahrens uint64_t 949a2cdcdd2SPaul Dagnelie dbuf_whichblock(dnode_t *dn, int64_t level, uint64_t offset) 950fa9e4066Sahrens { 951a2cdcdd2SPaul Dagnelie if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) { 952a2cdcdd2SPaul Dagnelie /* 953a2cdcdd2SPaul Dagnelie * The level n blkid is equal to the level 0 blkid divided by 954a2cdcdd2SPaul Dagnelie * the number of level 0s in a level n block. 955a2cdcdd2SPaul Dagnelie * 956a2cdcdd2SPaul Dagnelie * The level 0 blkid is offset >> datablkshift = 957a2cdcdd2SPaul Dagnelie * offset / 2^datablkshift. 958a2cdcdd2SPaul Dagnelie * 959a2cdcdd2SPaul Dagnelie * The number of level 0s in a level n is the number of block 960a2cdcdd2SPaul Dagnelie * pointers in an indirect block, raised to the power of level. 961a2cdcdd2SPaul Dagnelie * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level = 962a2cdcdd2SPaul Dagnelie * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)). 963a2cdcdd2SPaul Dagnelie * 964a2cdcdd2SPaul Dagnelie * Thus, the level n blkid is: offset / 965a2cdcdd2SPaul Dagnelie * ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT))) 966a2cdcdd2SPaul Dagnelie * = offset / 2^(datablkshift + level * 967a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 968a2cdcdd2SPaul Dagnelie * = offset >> (datablkshift + level * 969a2cdcdd2SPaul Dagnelie * (indblkshift - SPA_BLKPTRSHIFT)) 970a2cdcdd2SPaul Dagnelie */ 971a2cdcdd2SPaul Dagnelie return (offset >> (dn->dn_datablkshift + level * 972a2cdcdd2SPaul Dagnelie (dn->dn_indblkshift - SPA_BLKPTRSHIFT))); 973fa9e4066Sahrens } else { 974fa9e4066Sahrens ASSERT3U(offset, <, dn->dn_datablksz); 975fa9e4066Sahrens return (0); 976fa9e4066Sahrens } 977fa9e4066Sahrens } 978fa9e4066Sahrens 979fa9e4066Sahrens static void 980fa9e4066Sahrens dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 981fa9e4066Sahrens { 982fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 983fa9e4066Sahrens 984fa9e4066Sahrens mutex_enter(&db->db_mtx); 985fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_READ); 986fa9e4066Sahrens /* 987fa9e4066Sahrens * All reads are synchronous, so we must have a hold on the dbuf 988fa9e4066Sahrens */ 989fa9e4066Sahrens ASSERT(refcount_count(&db->db_holds) > 0); 990ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 991fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 992*fa98e487SMatthew Ahrens if (buf == NULL) { 993*fa98e487SMatthew Ahrens /* i/o error */ 994*fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error != 0); 995*fa98e487SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 996*fa98e487SMatthew Ahrens ASSERT3P(db->db_buf, ==, NULL); 997*fa98e487SMatthew Ahrens db->db_state = DB_UNCACHED; 998*fa98e487SMatthew Ahrens } else if (db->db_level == 0 && db->db_freed_in_flight) { 999*fa98e487SMatthew Ahrens /* freed in flight */ 1000*fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 1001fa9e4066Sahrens arc_release(buf, db); 1002fa9e4066Sahrens bzero(buf->b_data, db->db.db_size); 10036b4acc8bSahrens arc_buf_freeze(buf); 1004c717a561Smaybee db->db_freed_in_flight = FALSE; 1005fa9e4066Sahrens dbuf_set_data(db, buf); 1006fa9e4066Sahrens db->db_state = DB_CACHED; 1007*fa98e487SMatthew Ahrens } else { 1008*fa98e487SMatthew Ahrens /* success */ 1009*fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 1010fa9e4066Sahrens dbuf_set_data(db, buf); 1011fa9e4066Sahrens db->db_state = DB_CACHED; 1012fa9e4066Sahrens } 1013fa9e4066Sahrens cv_broadcast(&db->db_changed); 10143f9d6ad7SLin Ling dbuf_rele_and_unlock(db, NULL); 1015fa9e4066Sahrens } 1016fa9e4066Sahrens 1017ea8dc4b6Seschrock static void 1018cf6106c8SMatthew Ahrens dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1019fa9e4066Sahrens { 1020744947dcSTom Erickson dnode_t *dn; 10217802d7bfSMatthew Ahrens zbookmark_phys_t zb; 10227adb730bSGeorge Wilson arc_flags_t aflags = ARC_FLAG_NOWAIT; 1023fa9e4066Sahrens 1024744947dcSTom Erickson DB_DNODE_ENTER(db); 1025744947dcSTom Erickson dn = DB_DNODE(db); 1026fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1027fa9e4066Sahrens /* We need the struct_rwlock to prevent db_blkptr from changing. */ 1028088f3894Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1029ea8dc4b6Seschrock ASSERT(MUTEX_HELD(&db->db_mtx)); 1030ea8dc4b6Seschrock ASSERT(db->db_state == DB_UNCACHED); 1031ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1032fa9e4066Sahrens 10330a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 1034cf04dda1SMark Maybee int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 10351934e92fSmaybee 10361934e92fSmaybee ASSERT3U(bonuslen, <=, db->db.db_size); 1037ea8dc4b6Seschrock db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 10385a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 10391934e92fSmaybee if (bonuslen < DN_MAX_BONUSLEN) 1040ea8dc4b6Seschrock bzero(db->db.db_data, DN_MAX_BONUSLEN); 1041cf04dda1SMark Maybee if (bonuslen) 1042cf04dda1SMark Maybee bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 1043744947dcSTom Erickson DB_DNODE_EXIT(db); 1044fa9e4066Sahrens db->db_state = DB_CACHED; 1045fa9e4066Sahrens mutex_exit(&db->db_mtx); 1046fa9e4066Sahrens return; 1047fa9e4066Sahrens } 1048fa9e4066Sahrens 10491c8564a7SMark Maybee /* 10501c8564a7SMark Maybee * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 10511c8564a7SMark Maybee * processes the delete record and clears the bp while we are waiting 10521c8564a7SMark Maybee * for the dn_mtx (resulting in a "no" from block_freed). 10531c8564a7SMark Maybee */ 1054088f3894Sahrens if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 10551c8564a7SMark Maybee (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 10561c8564a7SMark Maybee BP_IS_HOLE(db->db_blkptr)))) { 1057ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1058ad23a2dbSjohansen 10595602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type, 10605602294fSDan Kimmel db->db.db_size)); 1061fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 10628df0bcf0SPaul Dagnelie 10638df0bcf0SPaul Dagnelie if (db->db_blkptr != NULL && db->db_level > 0 && 10648df0bcf0SPaul Dagnelie BP_IS_HOLE(db->db_blkptr) && 10658df0bcf0SPaul Dagnelie db->db_blkptr->blk_birth != 0) { 10668df0bcf0SPaul Dagnelie blkptr_t *bps = db->db.db_data; 10678df0bcf0SPaul Dagnelie for (int i = 0; i < ((1 << 10688df0bcf0SPaul Dagnelie DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t)); 10698df0bcf0SPaul Dagnelie i++) { 10708df0bcf0SPaul Dagnelie blkptr_t *bp = &bps[i]; 10718df0bcf0SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 10728df0bcf0SPaul Dagnelie 1 << dn->dn_indblkshift); 10738df0bcf0SPaul Dagnelie BP_SET_LSIZE(bp, 10748df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) == 1 ? 10758df0bcf0SPaul Dagnelie dn->dn_datablksz : 10768df0bcf0SPaul Dagnelie BP_GET_LSIZE(db->db_blkptr)); 10778df0bcf0SPaul Dagnelie BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr)); 10788df0bcf0SPaul Dagnelie BP_SET_LEVEL(bp, 10798df0bcf0SPaul Dagnelie BP_GET_LEVEL(db->db_blkptr) - 1); 10808df0bcf0SPaul Dagnelie BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0); 10818df0bcf0SPaul Dagnelie } 10828df0bcf0SPaul Dagnelie } 10838df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 1084fa9e4066Sahrens db->db_state = DB_CACHED; 1085fa9e4066Sahrens mutex_exit(&db->db_mtx); 1086fa9e4066Sahrens return; 1087fa9e4066Sahrens } 1088fa9e4066Sahrens 1089744947dcSTom Erickson DB_DNODE_EXIT(db); 1090744947dcSTom Erickson 1091fa9e4066Sahrens db->db_state = DB_READ; 1092fa9e4066Sahrens mutex_exit(&db->db_mtx); 1093fa9e4066Sahrens 10943baa08fcSek110237 if (DBUF_IS_L2CACHEABLE(db)) 10957adb730bSGeorge Wilson aflags |= ARC_FLAG_L2CACHE; 10963baa08fcSek110237 1097b24ab676SJeff Bonwick SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 1098b24ab676SJeff Bonwick db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 1099b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 1100ea8dc4b6Seschrock 1101ea8dc4b6Seschrock dbuf_add_ref(db, NULL); 1102088f3894Sahrens 110343466aaeSMax Grossman (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, 1104fa9e4066Sahrens dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 1105cf6106c8SMatthew Ahrens (flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 110613506d1eSmaybee &aflags, &zb); 1107fa9e4066Sahrens } 1108fa9e4066Sahrens 11095602294fSDan Kimmel /* 11105602294fSDan Kimmel * This is our just-in-time copy function. It makes a copy of buffers that 11115602294fSDan Kimmel * have been modified in a previous transaction group before we access them in 11125602294fSDan Kimmel * the current active group. 11135602294fSDan Kimmel * 11145602294fSDan Kimmel * This function is used in three places: when we are dirtying a buffer for the 11155602294fSDan Kimmel * first time in a txg, when we are freeing a range in a dnode that includes 11165602294fSDan Kimmel * this buffer, and when we are accessing a buffer which was received compressed 11175602294fSDan Kimmel * and later referenced in a WRITE_BYREF record. 11185602294fSDan Kimmel * 11195602294fSDan Kimmel * Note that when we are called from dbuf_free_range() we do not put a hold on 11205602294fSDan Kimmel * the buffer, we just traverse the active dbuf list for the dnode. 11215602294fSDan Kimmel */ 11225602294fSDan Kimmel static void 11235602294fSDan Kimmel dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 11245602294fSDan Kimmel { 11255602294fSDan Kimmel dbuf_dirty_record_t *dr = db->db_last_dirty; 11265602294fSDan Kimmel 11275602294fSDan Kimmel ASSERT(MUTEX_HELD(&db->db_mtx)); 11285602294fSDan Kimmel ASSERT(db->db.db_data != NULL); 11295602294fSDan Kimmel ASSERT(db->db_level == 0); 11305602294fSDan Kimmel ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 11315602294fSDan Kimmel 11325602294fSDan Kimmel if (dr == NULL || 11335602294fSDan Kimmel (dr->dt.dl.dr_data != 11345602294fSDan Kimmel ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 11355602294fSDan Kimmel return; 11365602294fSDan Kimmel 11375602294fSDan Kimmel /* 11385602294fSDan Kimmel * If the last dirty record for this dbuf has not yet synced 11395602294fSDan Kimmel * and its referencing the dbuf data, either: 11405602294fSDan Kimmel * reset the reference to point to a new copy, 11415602294fSDan Kimmel * or (if there a no active holders) 11425602294fSDan Kimmel * just null out the current db_data pointer. 11435602294fSDan Kimmel */ 11445602294fSDan Kimmel ASSERT(dr->dr_txg >= txg - 2); 11455602294fSDan Kimmel if (db->db_blkid == DMU_BONUS_BLKID) { 11465602294fSDan Kimmel /* Note that the data bufs here are zio_bufs */ 11475602294fSDan Kimmel dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 11485602294fSDan Kimmel arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 11495602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 11505602294fSDan Kimmel } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 11515602294fSDan Kimmel int size = arc_buf_size(db->db_buf); 11525602294fSDan Kimmel arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 11535602294fSDan Kimmel spa_t *spa = db->db_objset->os_spa; 11545602294fSDan Kimmel enum zio_compress compress_type = 11555602294fSDan Kimmel arc_get_compression(db->db_buf); 11565602294fSDan Kimmel 11575602294fSDan Kimmel if (compress_type == ZIO_COMPRESS_OFF) { 11585602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size); 11595602294fSDan Kimmel } else { 11605602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 11615602294fSDan Kimmel dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db, 11625602294fSDan Kimmel size, arc_buf_lsize(db->db_buf), compress_type); 11635602294fSDan Kimmel } 11645602294fSDan Kimmel bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 11655602294fSDan Kimmel } else { 11665602294fSDan Kimmel db->db_buf = NULL; 11675602294fSDan Kimmel dbuf_clear_data(db); 11685602294fSDan Kimmel } 11695602294fSDan Kimmel } 11705602294fSDan Kimmel 1171ea8dc4b6Seschrock int 1172ea8dc4b6Seschrock dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 1173fa9e4066Sahrens { 1174ea8dc4b6Seschrock int err = 0; 117543466aaeSMax Grossman boolean_t prefetch; 1176744947dcSTom Erickson dnode_t *dn; 1177fa9e4066Sahrens 1178fa9e4066Sahrens /* 1179fa9e4066Sahrens * We don't have to hold the mutex to check db_state because it 1180fa9e4066Sahrens * can't be freed while we have a hold on the buffer. 1181fa9e4066Sahrens */ 1182fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1183fa9e4066Sahrens 118482c9918fSTim Haley if (db->db_state == DB_NOFILL) 1185be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 118682c9918fSTim Haley 1187744947dcSTom Erickson DB_DNODE_ENTER(db); 1188744947dcSTom Erickson dn = DB_DNODE(db); 1189fa9e4066Sahrens if ((flags & DB_RF_HAVESTRUCT) == 0) 1190744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_READER); 1191fa9e4066Sahrens 11920a586ceaSMark Shellenbaum prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1193744947dcSTom Erickson (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 11943baa08fcSek110237 DBUF_IS_CACHEABLE(db); 119513506d1eSmaybee 1196fa9e4066Sahrens mutex_enter(&db->db_mtx); 1197ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 11985602294fSDan Kimmel /* 11995602294fSDan Kimmel * If the arc buf is compressed, we need to decompress it to 12005602294fSDan Kimmel * read the data. This could happen during the "zfs receive" of 12015602294fSDan Kimmel * a stream which is compressed and deduplicated. 12025602294fSDan Kimmel */ 12035602294fSDan Kimmel if (db->db_buf != NULL && 12045602294fSDan Kimmel arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF) { 12055602294fSDan Kimmel dbuf_fix_old_data(db, 12065602294fSDan Kimmel spa_syncing_txg(dmu_objset_spa(db->db_objset))); 12075602294fSDan Kimmel err = arc_decompress(db->db_buf); 12085602294fSDan Kimmel dbuf_set_data(db, db->db_buf); 12095602294fSDan Kimmel } 1210ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 121113506d1eSmaybee if (prefetch) 1212cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1213ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1214744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1215744947dcSTom Erickson DB_DNODE_EXIT(db); 1216ea8dc4b6Seschrock } else if (db->db_state == DB_UNCACHED) { 1217744947dcSTom Erickson spa_t *spa = dn->dn_objset->os_spa; 1218def4fac5SMatthew Ahrens boolean_t need_wait = B_FALSE; 1219744947dcSTom Erickson 1220def4fac5SMatthew Ahrens if (zio == NULL && 1221def4fac5SMatthew Ahrens db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) { 1222744947dcSTom Erickson zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1223def4fac5SMatthew Ahrens need_wait = B_TRUE; 1224def4fac5SMatthew Ahrens } 1225cf6106c8SMatthew Ahrens dbuf_read_impl(db, zio, flags); 122613506d1eSmaybee 1227ea8dc4b6Seschrock /* dbuf_read_impl has dropped db_mtx for us */ 1228ea8dc4b6Seschrock 122913506d1eSmaybee if (prefetch) 1230cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1231ea8dc4b6Seschrock 1232ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1233744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1234744947dcSTom Erickson DB_DNODE_EXIT(db); 1235ea8dc4b6Seschrock 1236def4fac5SMatthew Ahrens if (need_wait) 1237ea8dc4b6Seschrock err = zio_wait(zio); 1238ea8dc4b6Seschrock } else { 12393e30c24aSWill Andrews /* 12403e30c24aSWill Andrews * Another reader came in while the dbuf was in flight 12413e30c24aSWill Andrews * between UNCACHED and CACHED. Either a writer will finish 12423e30c24aSWill Andrews * writing the buffer (sending the dbuf to CACHED) or the 12433e30c24aSWill Andrews * first reader's request will reach the read_done callback 12443e30c24aSWill Andrews * and send the dbuf to CACHED. Otherwise, a failure 12453e30c24aSWill Andrews * occurred and the dbuf went to UNCACHED. 12463e30c24aSWill Andrews */ 124713506d1eSmaybee mutex_exit(&db->db_mtx); 124813506d1eSmaybee if (prefetch) 1249cb92f413SAlexander Motin dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE); 1250ea8dc4b6Seschrock if ((flags & DB_RF_HAVESTRUCT) == 0) 1251744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 1252744947dcSTom Erickson DB_DNODE_EXIT(db); 125313506d1eSmaybee 12543e30c24aSWill Andrews /* Skip the wait per the caller's request. */ 125513506d1eSmaybee mutex_enter(&db->db_mtx); 1256ea8dc4b6Seschrock if ((flags & DB_RF_NEVERWAIT) == 0) { 1257ea8dc4b6Seschrock while (db->db_state == DB_READ || 1258ea8dc4b6Seschrock db->db_state == DB_FILL) { 1259fa9e4066Sahrens ASSERT(db->db_state == DB_READ || 1260fa9e4066Sahrens (flags & DB_RF_HAVESTRUCT) == 0); 1261f6164ad6SAdam H. Leventhal DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, 1262f6164ad6SAdam H. Leventhal db, zio_t *, zio); 1263fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1264fa9e4066Sahrens } 1265ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED) 1266be6fd75aSMatthew Ahrens err = SET_ERROR(EIO); 1267ea8dc4b6Seschrock } 1268fa9e4066Sahrens mutex_exit(&db->db_mtx); 1269fa9e4066Sahrens } 1270fa9e4066Sahrens 1271ea8dc4b6Seschrock return (err); 1272fa9e4066Sahrens } 1273fa9e4066Sahrens 1274fa9e4066Sahrens static void 1275fa9e4066Sahrens dbuf_noread(dmu_buf_impl_t *db) 1276fa9e4066Sahrens { 1277fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 12780a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1279fa9e4066Sahrens mutex_enter(&db->db_mtx); 1280fa9e4066Sahrens while (db->db_state == DB_READ || db->db_state == DB_FILL) 1281fa9e4066Sahrens cv_wait(&db->db_changed, &db->db_mtx); 1282fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 1283ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 128443466aaeSMax Grossman spa_t *spa = db->db_objset->os_spa; 1285ad23a2dbSjohansen 1286ea8dc4b6Seschrock ASSERT(db->db_buf == NULL); 1287fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 12885602294fSDan Kimmel dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size)); 1289fa9e4066Sahrens db->db_state = DB_FILL; 129082c9918fSTim Haley } else if (db->db_state == DB_NOFILL) { 1291bc9014e6SJustin Gibbs dbuf_clear_data(db); 1292fa9e4066Sahrens } else { 1293fa9e4066Sahrens ASSERT3U(db->db_state, ==, DB_CACHED); 1294fa9e4066Sahrens } 1295fa9e4066Sahrens mutex_exit(&db->db_mtx); 1296fa9e4066Sahrens } 1297fa9e4066Sahrens 1298fa9e4066Sahrens void 1299c717a561Smaybee dbuf_unoverride(dbuf_dirty_record_t *dr) 1300fa9e4066Sahrens { 1301c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 1302b24ab676SJeff Bonwick blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 1303c717a561Smaybee uint64_t txg = dr->dr_txg; 1304c5c6ffa0Smaybee 1305c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 130640713f2bSAlan Somers /* 130740713f2bSAlan Somers * This assert is valid because dmu_sync() expects to be called by 130840713f2bSAlan Somers * a zilog's get_data while holding a range lock. This call only 130940713f2bSAlan Somers * comes from dbuf_dirty() callers who must also hold a range lock. 131040713f2bSAlan Somers */ 1311c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 1312c717a561Smaybee ASSERT(db->db_level == 0); 1313c717a561Smaybee 13140a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 1315c717a561Smaybee dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 1316c717a561Smaybee return; 1317c717a561Smaybee 1318b24ab676SJeff Bonwick ASSERT(db->db_data_pending != dr); 1319b24ab676SJeff Bonwick 1320fa9e4066Sahrens /* free this block */ 132143466aaeSMax Grossman if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) 132243466aaeSMax Grossman zio_free(db->db_objset->os_spa, txg, bp); 1323b24ab676SJeff Bonwick 1324c717a561Smaybee dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 132580901aeaSGeorge Wilson dr->dt.dl.dr_nopwrite = B_FALSE; 132680901aeaSGeorge Wilson 13276b4acc8bSahrens /* 13286b4acc8bSahrens * Release the already-written buffer, so we leave it in 13296b4acc8bSahrens * a consistent dirty state. Note that all callers are 13306b4acc8bSahrens * modifying the buffer, so they will immediately do 13316b4acc8bSahrens * another (redundant) arc_release(). Therefore, leave 13326b4acc8bSahrens * the buf thawed to save the effort of freezing & 13336b4acc8bSahrens * immediately re-thawing it. 13346b4acc8bSahrens */ 1335c717a561Smaybee arc_release(dr->dt.dl.dr_data, db); 1336fa9e4066Sahrens } 1337fa9e4066Sahrens 1338cdb0ab79Smaybee /* 1339cdb0ab79Smaybee * Evict (if its unreferenced) or clear (if its referenced) any level-0 1340cdb0ab79Smaybee * data blocks in the free range, so that any future readers will find 134143466aaeSMax Grossman * empty blocks. 1342cdb0ab79Smaybee */ 1343fa9e4066Sahrens void 13440f6d88adSAlex Reece dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid, 13450f6d88adSAlex Reece dmu_tx_t *tx) 1346fa9e4066Sahrens { 1347bc9014e6SJustin Gibbs dmu_buf_impl_t db_search; 1348bc9014e6SJustin Gibbs dmu_buf_impl_t *db, *db_next; 1349fa9e4066Sahrens uint64_t txg = tx->tx_txg; 13500f6d88adSAlex Reece avl_index_t where; 1351fa9e4066Sahrens 1352653af1b8SStephen Blinick if (end_blkid > dn->dn_maxblkid && 1353653af1b8SStephen Blinick !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID)) 13540f6d88adSAlex Reece end_blkid = dn->dn_maxblkid; 13550f6d88adSAlex Reece dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid); 13560f6d88adSAlex Reece 13570f6d88adSAlex Reece db_search.db_level = 0; 13580f6d88adSAlex Reece db_search.db_blkid = start_blkid; 135986bb58aeSAlex Reece db_search.db_state = DB_SEARCH; 13602f3d8780SMatthew Ahrens 1361713d6c20SMatthew Ahrens mutex_enter(&dn->dn_dbufs_mtx); 13620f6d88adSAlex Reece db = avl_find(&dn->dn_dbufs, &db_search, &where); 13630f6d88adSAlex Reece ASSERT3P(db, ==, NULL); 13642f3d8780SMatthew Ahrens 13650f6d88adSAlex Reece db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER); 13660f6d88adSAlex Reece 13670f6d88adSAlex Reece for (; db != NULL; db = db_next) { 13680f6d88adSAlex Reece db_next = AVL_NEXT(&dn->dn_dbufs, db); 13690a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1370cdb0ab79Smaybee 13710f6d88adSAlex Reece if (db->db_level != 0 || db->db_blkid > end_blkid) { 13720f6d88adSAlex Reece break; 13730f6d88adSAlex Reece } 13740f6d88adSAlex Reece ASSERT3U(db->db_blkid, >=, start_blkid); 1375fa9e4066Sahrens 1376fa9e4066Sahrens /* found a level 0 buffer in the range */ 1377fa9e4066Sahrens mutex_enter(&db->db_mtx); 13783b2aab18SMatthew Ahrens if (dbuf_undirty(db, tx)) { 13793b2aab18SMatthew Ahrens /* mutex has been dropped and dbuf destroyed */ 13803b2aab18SMatthew Ahrens continue; 13813b2aab18SMatthew Ahrens } 13823b2aab18SMatthew Ahrens 1383ea8dc4b6Seschrock if (db->db_state == DB_UNCACHED || 138482c9918fSTim Haley db->db_state == DB_NOFILL || 1385ea8dc4b6Seschrock db->db_state == DB_EVICTING) { 1386fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 1387fa9e4066Sahrens mutex_exit(&db->db_mtx); 1388fa9e4066Sahrens continue; 1389fa9e4066Sahrens } 1390c543ec06Sahrens if (db->db_state == DB_READ || db->db_state == DB_FILL) { 1391c543ec06Sahrens /* will be handled in dbuf_read_done or dbuf_rele */ 1392c717a561Smaybee db->db_freed_in_flight = TRUE; 1393fa9e4066Sahrens mutex_exit(&db->db_mtx); 1394fa9e4066Sahrens continue; 1395fa9e4066Sahrens } 1396ea8dc4b6Seschrock if (refcount_count(&db->db_holds) == 0) { 1397ea8dc4b6Seschrock ASSERT(db->db_buf); 1398dcbf3bd6SGeorge Wilson dbuf_destroy(db); 1399ea8dc4b6Seschrock continue; 1400ea8dc4b6Seschrock } 1401c717a561Smaybee /* The dbuf is referenced */ 1402fa9e4066Sahrens 1403c717a561Smaybee if (db->db_last_dirty != NULL) { 1404c717a561Smaybee dbuf_dirty_record_t *dr = db->db_last_dirty; 1405c717a561Smaybee 1406c717a561Smaybee if (dr->dr_txg == txg) { 1407ea8dc4b6Seschrock /* 1408c717a561Smaybee * This buffer is "in-use", re-adjust the file 1409c717a561Smaybee * size to reflect that this buffer may 1410c717a561Smaybee * contain new data when we sync. 1411ea8dc4b6Seschrock */ 141206e0070dSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID && 141306e0070dSMark Shellenbaum db->db_blkid > dn->dn_maxblkid) 141444eda4d7Smaybee dn->dn_maxblkid = db->db_blkid; 1415c717a561Smaybee dbuf_unoverride(dr); 1416c717a561Smaybee } else { 1417c717a561Smaybee /* 1418c717a561Smaybee * This dbuf is not dirty in the open context. 1419c717a561Smaybee * Either uncache it (if its not referenced in 1420c717a561Smaybee * the open context) or reset its contents to 1421c717a561Smaybee * empty. 1422c717a561Smaybee */ 1423c717a561Smaybee dbuf_fix_old_data(db, txg); 142444eda4d7Smaybee } 1425c717a561Smaybee } 1426c717a561Smaybee /* clear the contents if its cached */ 1427ea8dc4b6Seschrock if (db->db_state == DB_CACHED) { 1428ea8dc4b6Seschrock ASSERT(db->db.db_data != NULL); 1429fa9e4066Sahrens arc_release(db->db_buf, db); 1430fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 14316b4acc8bSahrens arc_buf_freeze(db->db_buf); 1432fa9e4066Sahrens } 1433ea8dc4b6Seschrock 1434fa9e4066Sahrens mutex_exit(&db->db_mtx); 1435fa9e4066Sahrens } 1436fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 1437fa9e4066Sahrens } 1438fa9e4066Sahrens 1439fa9e4066Sahrens void 1440fa9e4066Sahrens dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 1441fa9e4066Sahrens { 1442fa9e4066Sahrens arc_buf_t *buf, *obuf; 1443fa9e4066Sahrens int osize = db->db.db_size; 1444ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1445744947dcSTom Erickson dnode_t *dn; 1446fa9e4066Sahrens 14470a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1448ea8dc4b6Seschrock 1449744947dcSTom Erickson DB_DNODE_ENTER(db); 1450744947dcSTom Erickson dn = DB_DNODE(db); 1451744947dcSTom Erickson 1452fa9e4066Sahrens /* XXX does *this* func really need the lock? */ 1453744947dcSTom Erickson ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1454fa9e4066Sahrens 1455fa9e4066Sahrens /* 145643466aaeSMax Grossman * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held 1457fa9e4066Sahrens * is OK, because there can be no other references to the db 1458fa9e4066Sahrens * when we are changing its size, so no concurrent DB_FILL can 1459fa9e4066Sahrens * be happening. 1460fa9e4066Sahrens */ 1461ea8dc4b6Seschrock /* 1462ea8dc4b6Seschrock * XXX we should be doing a dbuf_read, checking the return 1463ea8dc4b6Seschrock * value and returning that up to our callers 1464ea8dc4b6Seschrock */ 146543466aaeSMax Grossman dmu_buf_will_dirty(&db->db, tx); 1466fa9e4066Sahrens 1467fa9e4066Sahrens /* create the data buffer for the new block */ 14685602294fSDan Kimmel buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size); 1469fa9e4066Sahrens 1470fa9e4066Sahrens /* copy old block data to the new block */ 1471fa9e4066Sahrens obuf = db->db_buf; 1472f65e61c0Sahrens bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 1473fa9e4066Sahrens /* zero the remainder */ 1474f65e61c0Sahrens if (size > osize) 1475fa9e4066Sahrens bzero((uint8_t *)buf->b_data + osize, size - osize); 1476fa9e4066Sahrens 1477fa9e4066Sahrens mutex_enter(&db->db_mtx); 1478fa9e4066Sahrens dbuf_set_data(db, buf); 1479dcbf3bd6SGeorge Wilson arc_buf_destroy(obuf, db); 1480fa9e4066Sahrens db->db.db_size = size; 1481fa9e4066Sahrens 1482c717a561Smaybee if (db->db_level == 0) { 1483c717a561Smaybee ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1484c717a561Smaybee db->db_last_dirty->dt.dl.dr_data = buf; 1485c717a561Smaybee } 1486fa9e4066Sahrens mutex_exit(&db->db_mtx); 1487fa9e4066Sahrens 148861e255ceSMatthew Ahrens dmu_objset_willuse_space(dn->dn_objset, size - osize, tx); 1489744947dcSTom Erickson DB_DNODE_EXIT(db); 1490fa9e4066Sahrens } 1491fa9e4066Sahrens 14923f9d6ad7SLin Ling void 14933f9d6ad7SLin Ling dbuf_release_bp(dmu_buf_impl_t *db) 14943f9d6ad7SLin Ling { 149543466aaeSMax Grossman objset_t *os = db->db_objset; 14963f9d6ad7SLin Ling 14973f9d6ad7SLin Ling ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 14983f9d6ad7SLin Ling ASSERT(arc_released(os->os_phys_buf) || 14993f9d6ad7SLin Ling list_link_active(&os->os_dsl_dataset->ds_synced_link)); 15003f9d6ad7SLin Ling ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 15013f9d6ad7SLin Ling 15021b912ec7SGeorge Wilson (void) arc_release(db->db_buf, db); 15033f9d6ad7SLin Ling } 15043f9d6ad7SLin Ling 15050f2e7d03SMatthew Ahrens /* 15060f2e7d03SMatthew Ahrens * We already have a dirty record for this TXG, and we are being 15070f2e7d03SMatthew Ahrens * dirtied again. 15080f2e7d03SMatthew Ahrens */ 15090f2e7d03SMatthew Ahrens static void 15100f2e7d03SMatthew Ahrens dbuf_redirty(dbuf_dirty_record_t *dr) 15110f2e7d03SMatthew Ahrens { 15120f2e7d03SMatthew Ahrens dmu_buf_impl_t *db = dr->dr_dbuf; 15130f2e7d03SMatthew Ahrens 15140f2e7d03SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 15150f2e7d03SMatthew Ahrens 15160f2e7d03SMatthew Ahrens if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 15170f2e7d03SMatthew Ahrens /* 15180f2e7d03SMatthew Ahrens * If this buffer has already been written out, 15190f2e7d03SMatthew Ahrens * we now need to reset its state. 15200f2e7d03SMatthew Ahrens */ 15210f2e7d03SMatthew Ahrens dbuf_unoverride(dr); 15220f2e7d03SMatthew Ahrens if (db->db.db_object != DMU_META_DNODE_OBJECT && 15230f2e7d03SMatthew Ahrens db->db_state != DB_NOFILL) { 15240f2e7d03SMatthew Ahrens /* Already released on initial dirty, so just thaw. */ 15250f2e7d03SMatthew Ahrens ASSERT(arc_released(db->db_buf)); 15260f2e7d03SMatthew Ahrens arc_buf_thaw(db->db_buf); 15270f2e7d03SMatthew Ahrens } 15280f2e7d03SMatthew Ahrens } 15290f2e7d03SMatthew Ahrens } 15300f2e7d03SMatthew Ahrens 1531c717a561Smaybee dbuf_dirty_record_t * 1532fa9e4066Sahrens dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1533fa9e4066Sahrens { 1534744947dcSTom Erickson dnode_t *dn; 1535744947dcSTom Erickson objset_t *os; 1536c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 1537fa9e4066Sahrens int drop_struct_lock = FALSE; 1538fa9e4066Sahrens int txgoff = tx->tx_txg & TXG_MASK; 1539fa9e4066Sahrens 1540fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1541fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 15429c9dc39aSek110237 DMU_TX_DIRTY_BUF(tx, db); 1543fa9e4066Sahrens 1544744947dcSTom Erickson DB_DNODE_ENTER(db); 1545744947dcSTom Erickson dn = DB_DNODE(db); 1546fa9e4066Sahrens /* 1547fa9e4066Sahrens * Shouldn't dirty a regular buffer in syncing context. Private 1548fa9e4066Sahrens * objects may be dirtied in syncing context, but only if they 1549fa9e4066Sahrens * were already pre-dirtied in open context. 1550fa9e4066Sahrens */ 1551c166b69dSPaul Dagnelie #ifdef DEBUG 1552c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1553c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1554c166b69dSPaul Dagnelie RW_READER, FTAG); 1555c166b69dSPaul Dagnelie } 1556c717a561Smaybee ASSERT(!dmu_tx_is_syncing(tx) || 1557c717a561Smaybee BP_IS_HOLE(dn->dn_objset->os_rootbp) || 155814843421SMatthew Ahrens DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 155914843421SMatthew Ahrens dn->dn_objset->os_dsl_dataset == NULL); 1560c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1561c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG); 1562c166b69dSPaul Dagnelie #endif 1563fa9e4066Sahrens /* 1564fa9e4066Sahrens * We make this assert for private objects as well, but after we 1565fa9e4066Sahrens * check if we're already dirty. They are allowed to re-dirty 1566fa9e4066Sahrens * in syncing context. 1567fa9e4066Sahrens */ 1568ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1569c717a561Smaybee dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1570fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1571fa9e4066Sahrens 1572fa9e4066Sahrens mutex_enter(&db->db_mtx); 1573fa9e4066Sahrens /* 1574c717a561Smaybee * XXX make this true for indirects too? The problem is that 1575c717a561Smaybee * transactions created with dmu_tx_create_assigned() from 1576c717a561Smaybee * syncing context don't bother holding ahead. 1577fa9e4066Sahrens */ 1578c717a561Smaybee ASSERT(db->db_level != 0 || 157982c9918fSTim Haley db->db_state == DB_CACHED || db->db_state == DB_FILL || 158082c9918fSTim Haley db->db_state == DB_NOFILL); 1581fa9e4066Sahrens 1582fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1583fa9e4066Sahrens /* 1584fa9e4066Sahrens * Don't set dirtyctx to SYNC if we're just modifying this as we 1585fa9e4066Sahrens * initialize the objset. 1586fa9e4066Sahrens */ 1587c166b69dSPaul Dagnelie if (dn->dn_dirtyctx == DN_UNDIRTIED) { 1588c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1589c166b69dSPaul Dagnelie rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1590c166b69dSPaul Dagnelie RW_READER, FTAG); 1591c166b69dSPaul Dagnelie } 1592c166b69dSPaul Dagnelie if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1593c166b69dSPaul Dagnelie dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ? 1594c166b69dSPaul Dagnelie DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1595fa9e4066Sahrens ASSERT(dn->dn_dirtyctx_firstset == NULL); 1596fa9e4066Sahrens dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1597fa9e4066Sahrens } 1598c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) { 1599c166b69dSPaul Dagnelie rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, 1600c166b69dSPaul Dagnelie FTAG); 1601c166b69dSPaul Dagnelie } 1602c166b69dSPaul Dagnelie } 1603fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1604fa9e4066Sahrens 16050a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 16060a586ceaSMark Shellenbaum dn->dn_have_spill = B_TRUE; 16070a586ceaSMark Shellenbaum 1608fa9e4066Sahrens /* 1609fa9e4066Sahrens * If this buffer is already dirty, we're done. 1610fa9e4066Sahrens */ 1611c717a561Smaybee drp = &db->db_last_dirty; 1612c717a561Smaybee ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1613c717a561Smaybee db->db.db_object == DMU_META_DNODE_OBJECT); 16147e2186e3Sbonwick while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 16157e2186e3Sbonwick drp = &dr->dr_next; 16167e2186e3Sbonwick if (dr && dr->dr_txg == tx->tx_txg) { 1617744947dcSTom Erickson DB_DNODE_EXIT(db); 1618744947dcSTom Erickson 16190f2e7d03SMatthew Ahrens dbuf_redirty(dr); 1620fa9e4066Sahrens mutex_exit(&db->db_mtx); 16217e2186e3Sbonwick return (dr); 1622fa9e4066Sahrens } 1623fa9e4066Sahrens 1624fa9e4066Sahrens /* 1625fa9e4066Sahrens * Only valid if not already dirty. 1626fa9e4066Sahrens */ 162714843421SMatthew Ahrens ASSERT(dn->dn_object == 0 || 162814843421SMatthew Ahrens dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1629fa9e4066Sahrens (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1630fa9e4066Sahrens 1631fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, db->db_level); 1632fa9e4066Sahrens 1633fa9e4066Sahrens /* 1634fa9e4066Sahrens * We should only be dirtying in syncing context if it's the 163514843421SMatthew Ahrens * mos or we're initializing the os or it's a special object. 163614843421SMatthew Ahrens * However, we are allowed to dirty in syncing context provided 163714843421SMatthew Ahrens * we already dirtied it in open context. Hence we must make 163814843421SMatthew Ahrens * this assertion only if we're not already dirty. 1639fa9e4066Sahrens */ 1640744947dcSTom Erickson os = dn->dn_objset; 16413991b535SGeorge Wilson VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa)); 1642c166b69dSPaul Dagnelie #ifdef DEBUG 1643c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1644c166b69dSPaul Dagnelie rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG); 164514843421SMatthew Ahrens ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 164614843421SMatthew Ahrens os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1647c166b69dSPaul Dagnelie if (dn->dn_objset->os_dsl_dataset != NULL) 1648c166b69dSPaul Dagnelie rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); 1649c166b69dSPaul Dagnelie #endif 1650fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1651fa9e4066Sahrens 1652fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1653fa9e4066Sahrens 16540a586ceaSMark Shellenbaum if (db->db_blkid != DMU_BONUS_BLKID) { 165561e255ceSMatthew Ahrens dmu_objset_willuse_space(os, db->db.db_size, tx); 16561934e92fSmaybee } 16571934e92fSmaybee 1658ea8dc4b6Seschrock /* 1659ea8dc4b6Seschrock * If this buffer is dirty in an old transaction group we need 1660ea8dc4b6Seschrock * to make a copy of it so that the changes we make in this 1661ea8dc4b6Seschrock * transaction group won't leak out when we sync the older txg. 1662ea8dc4b6Seschrock */ 1663c717a561Smaybee dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1664c717a561Smaybee if (db->db_level == 0) { 1665c717a561Smaybee void *data_old = db->db_buf; 1666c717a561Smaybee 166782c9918fSTim Haley if (db->db_state != DB_NOFILL) { 16680a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 1669c717a561Smaybee dbuf_fix_old_data(db, tx->tx_txg); 1670c717a561Smaybee data_old = db->db.db_data; 1671c717a561Smaybee } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1672fa9e4066Sahrens /* 167382c9918fSTim Haley * Release the data buffer from the cache so 167482c9918fSTim Haley * that we can modify it without impacting 167582c9918fSTim Haley * possible other users of this cached data 167682c9918fSTim Haley * block. Note that indirect blocks and 167782c9918fSTim Haley * private objects are not released until the 167882c9918fSTim Haley * syncing state (since they are only modified 167982c9918fSTim Haley * then). 1680fa9e4066Sahrens */ 1681fa9e4066Sahrens arc_release(db->db_buf, db); 1682fa9e4066Sahrens dbuf_fix_old_data(db, tx->tx_txg); 1683c717a561Smaybee data_old = db->db_buf; 1684fa9e4066Sahrens } 1685c717a561Smaybee ASSERT(data_old != NULL); 168682c9918fSTim Haley } 1687c717a561Smaybee dr->dt.dl.dr_data = data_old; 1688c717a561Smaybee } else { 1689c717a561Smaybee mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1690c717a561Smaybee list_create(&dr->dt.di.dr_children, 1691c717a561Smaybee sizeof (dbuf_dirty_record_t), 1692c717a561Smaybee offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1693fa9e4066Sahrens } 169469962b56SMatthew Ahrens if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL) 169569962b56SMatthew Ahrens dr->dr_accounted = db->db.db_size; 1696c717a561Smaybee dr->dr_dbuf = db; 1697c717a561Smaybee dr->dr_txg = tx->tx_txg; 1698c717a561Smaybee dr->dr_next = *drp; 1699c717a561Smaybee *drp = dr; 1700fa9e4066Sahrens 1701fa9e4066Sahrens /* 1702fa9e4066Sahrens * We could have been freed_in_flight between the dbuf_noread 1703fa9e4066Sahrens * and dbuf_dirty. We win, as though the dbuf_noread() had 1704fa9e4066Sahrens * happened after the free. 1705fa9e4066Sahrens */ 17060a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 17070a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) { 1708c717a561Smaybee mutex_enter(&dn->dn_mtx); 1709bf16b11eSMatthew Ahrens if (dn->dn_free_ranges[txgoff] != NULL) { 1710bf16b11eSMatthew Ahrens range_tree_clear(dn->dn_free_ranges[txgoff], 1711bf16b11eSMatthew Ahrens db->db_blkid, 1); 1712bf16b11eSMatthew Ahrens } 1713fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1714c717a561Smaybee db->db_freed_in_flight = FALSE; 1715c717a561Smaybee } 1716fa9e4066Sahrens 1717fa9e4066Sahrens /* 1718fa9e4066Sahrens * This buffer is now part of this txg 1719fa9e4066Sahrens */ 1720fa9e4066Sahrens dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1721fa9e4066Sahrens db->db_dirtycnt += 1; 1722fa9e4066Sahrens ASSERT3U(db->db_dirtycnt, <=, 3); 1723fa9e4066Sahrens 1724fa9e4066Sahrens mutex_exit(&db->db_mtx); 1725fa9e4066Sahrens 17260a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID || 17270a586ceaSMark Shellenbaum db->db_blkid == DMU_SPILL_BLKID) { 1728c717a561Smaybee mutex_enter(&dn->dn_mtx); 1729c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1730c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1731c717a561Smaybee mutex_exit(&dn->dn_mtx); 1732fa9e4066Sahrens dnode_setdirty(dn, tx); 1733744947dcSTom Erickson DB_DNODE_EXIT(db); 1734c717a561Smaybee return (dr); 173592654925SMatthew Ahrens } 173692654925SMatthew Ahrens 173792654925SMatthew Ahrens /* 173892654925SMatthew Ahrens * The dn_struct_rwlock prevents db_blkptr from changing 173992654925SMatthew Ahrens * due to a write from syncing context completing 174092654925SMatthew Ahrens * while we are running, so we want to acquire it before 174192654925SMatthew Ahrens * looking at db_blkptr. 174292654925SMatthew Ahrens */ 174392654925SMatthew Ahrens if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 174492654925SMatthew Ahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 174592654925SMatthew Ahrens drop_struct_lock = TRUE; 174692654925SMatthew Ahrens } 174792654925SMatthew Ahrens 1748d3469faaSMark Maybee /* 1749dcb6872cSMatthew Ahrens * We need to hold the dn_struct_rwlock to make this assertion, 1750dcb6872cSMatthew Ahrens * because it protects dn_phys / dn_next_nlevels from changing. 1751dcb6872cSMatthew Ahrens */ 1752dcb6872cSMatthew Ahrens ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1753dcb6872cSMatthew Ahrens dn->dn_phys->dn_nlevels > db->db_level || 1754dcb6872cSMatthew Ahrens dn->dn_next_nlevels[txgoff] > db->db_level || 1755dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1756dcb6872cSMatthew Ahrens dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1757dcb6872cSMatthew Ahrens 1758dcb6872cSMatthew Ahrens /* 175961e255ceSMatthew Ahrens * If we are overwriting a dedup BP, then unless it is snapshotted, 176061e255ceSMatthew Ahrens * when we get to syncing context we will need to decrement its 176161e255ceSMatthew Ahrens * refcount in the DDT. Prefetch the relevant DDT block so that 176261e255ceSMatthew Ahrens * syncing context won't have to wait for the i/o. 1763d3469faaSMark Maybee */ 176461e255ceSMatthew Ahrens ddt_prefetch(os->os_spa, db->db_blkptr); 1765fa9e4066Sahrens 17668346f03fSJonathan W Adams if (db->db_level == 0) { 17678346f03fSJonathan W Adams dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 17688346f03fSJonathan W Adams ASSERT(dn->dn_maxblkid >= db->db_blkid); 17698346f03fSJonathan W Adams } 17708346f03fSJonathan W Adams 177144eda4d7Smaybee if (db->db_level+1 < dn->dn_nlevels) { 1772c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 1773c717a561Smaybee dbuf_dirty_record_t *di; 1774c717a561Smaybee int parent_held = FALSE; 1775c717a561Smaybee 1776c717a561Smaybee if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1777fa9e4066Sahrens int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1778c717a561Smaybee 1779fa9e4066Sahrens parent = dbuf_hold_level(dn, db->db_level+1, 1780fa9e4066Sahrens db->db_blkid >> epbs, FTAG); 178101025c89SJohn Harres ASSERT(parent != NULL); 1782c717a561Smaybee parent_held = TRUE; 1783c717a561Smaybee } 1784fa9e4066Sahrens if (drop_struct_lock) 1785fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1786c717a561Smaybee ASSERT3U(db->db_level+1, ==, parent->db_level); 1787c717a561Smaybee di = dbuf_dirty(parent, tx); 1788c717a561Smaybee if (parent_held) 1789ea8dc4b6Seschrock dbuf_rele(parent, FTAG); 1790c717a561Smaybee 1791c717a561Smaybee mutex_enter(&db->db_mtx); 179269962b56SMatthew Ahrens /* 179369962b56SMatthew Ahrens * Since we've dropped the mutex, it's possible that 179469962b56SMatthew Ahrens * dbuf_undirty() might have changed this out from under us. 179569962b56SMatthew Ahrens */ 1796c717a561Smaybee if (db->db_last_dirty == dr || 1797c717a561Smaybee dn->dn_object == DMU_META_DNODE_OBJECT) { 1798c717a561Smaybee mutex_enter(&di->dt.di.dr_mtx); 1799c717a561Smaybee ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1800c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1801c717a561Smaybee list_insert_tail(&di->dt.di.dr_children, dr); 1802c717a561Smaybee mutex_exit(&di->dt.di.dr_mtx); 1803c717a561Smaybee dr->dr_parent = di; 1804c717a561Smaybee } 1805c717a561Smaybee mutex_exit(&db->db_mtx); 1806fa9e4066Sahrens } else { 1807c717a561Smaybee ASSERT(db->db_level+1 == dn->dn_nlevels); 1808c717a561Smaybee ASSERT(db->db_blkid < dn->dn_nblkptr); 1809744947dcSTom Erickson ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1810c717a561Smaybee mutex_enter(&dn->dn_mtx); 1811c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 1812c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1813c717a561Smaybee mutex_exit(&dn->dn_mtx); 1814fa9e4066Sahrens if (drop_struct_lock) 1815fa9e4066Sahrens rw_exit(&dn->dn_struct_rwlock); 1816fa9e4066Sahrens } 1817fa9e4066Sahrens 1818fa9e4066Sahrens dnode_setdirty(dn, tx); 1819744947dcSTom Erickson DB_DNODE_EXIT(db); 1820c717a561Smaybee return (dr); 1821fa9e4066Sahrens } 1822fa9e4066Sahrens 18233b2aab18SMatthew Ahrens /* 18243e30c24aSWill Andrews * Undirty a buffer in the transaction group referenced by the given 18253e30c24aSWill Andrews * transaction. Return whether this evicted the dbuf. 18263b2aab18SMatthew Ahrens */ 18273b2aab18SMatthew Ahrens static boolean_t 1828fa9e4066Sahrens dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1829fa9e4066Sahrens { 1830744947dcSTom Erickson dnode_t *dn; 1831c717a561Smaybee uint64_t txg = tx->tx_txg; 183217f17c2dSbonwick dbuf_dirty_record_t *dr, **drp; 1833fa9e4066Sahrens 1834c717a561Smaybee ASSERT(txg != 0); 183546e1baa6SMatthew Ahrens 183646e1baa6SMatthew Ahrens /* 183746e1baa6SMatthew Ahrens * Due to our use of dn_nlevels below, this can only be called 183846e1baa6SMatthew Ahrens * in open context, unless we are operating on the MOS. 183946e1baa6SMatthew Ahrens * From syncing context, dn_nlevels may be different from the 184046e1baa6SMatthew Ahrens * dn_nlevels used when dbuf was dirtied. 184146e1baa6SMatthew Ahrens */ 184246e1baa6SMatthew Ahrens ASSERT(db->db_objset == 184346e1baa6SMatthew Ahrens dmu_objset_pool(db->db_objset)->dp_meta_objset || 184446e1baa6SMatthew Ahrens txg != spa_syncing_txg(dmu_objset_spa(db->db_objset))); 18450a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 18463b2aab18SMatthew Ahrens ASSERT0(db->db_level); 18473b2aab18SMatthew Ahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 1848fa9e4066Sahrens 1849fa9e4066Sahrens /* 1850fa9e4066Sahrens * If this buffer is not dirty, we're done. 1851fa9e4066Sahrens */ 185217f17c2dSbonwick for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1853c717a561Smaybee if (dr->dr_txg <= txg) 1854c717a561Smaybee break; 18553b2aab18SMatthew Ahrens if (dr == NULL || dr->dr_txg < txg) 18563b2aab18SMatthew Ahrens return (B_FALSE); 1857c717a561Smaybee ASSERT(dr->dr_txg == txg); 1858b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 1859fa9e4066Sahrens 1860744947dcSTom Erickson DB_DNODE_ENTER(db); 1861744947dcSTom Erickson dn = DB_DNODE(db); 1862744947dcSTom Erickson 1863fa9e4066Sahrens dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1864fa9e4066Sahrens 1865fa9e4066Sahrens ASSERT(db->db.db_size != 0); 1866fa9e4066Sahrens 186746e1baa6SMatthew Ahrens dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset), 186846e1baa6SMatthew Ahrens dr->dr_accounted, txg); 1869fa9e4066Sahrens 187017f17c2dSbonwick *drp = dr->dr_next; 1871c717a561Smaybee 18723f2366c2SGordon Ross /* 18733f2366c2SGordon Ross * Note that there are three places in dbuf_dirty() 18743f2366c2SGordon Ross * where this dirty record may be put on a list. 18753f2366c2SGordon Ross * Make sure to do a list_remove corresponding to 18763f2366c2SGordon Ross * every one of those list_insert calls. 18773f2366c2SGordon Ross */ 1878c717a561Smaybee if (dr->dr_parent) { 1879c717a561Smaybee mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1880c717a561Smaybee list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1881c717a561Smaybee mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 18823f2366c2SGordon Ross } else if (db->db_blkid == DMU_SPILL_BLKID || 18833f2366c2SGordon Ross db->db_level + 1 == dn->dn_nlevels) { 1884cdb0ab79Smaybee ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1885fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 1886c717a561Smaybee list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1887fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 1888c717a561Smaybee } 1889744947dcSTom Erickson DB_DNODE_EXIT(db); 1890c717a561Smaybee 189182c9918fSTim Haley if (db->db_state != DB_NOFILL) { 1892c717a561Smaybee dbuf_unoverride(dr); 1893c717a561Smaybee 1894c717a561Smaybee ASSERT(db->db_buf != NULL); 1895c717a561Smaybee ASSERT(dr->dt.dl.dr_data != NULL); 1896c717a561Smaybee if (dr->dt.dl.dr_data != db->db_buf) 1897dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 1898c717a561Smaybee } 1899d2b3cbbdSJorgen Lundman 1900c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1901fa9e4066Sahrens 1902fa9e4066Sahrens ASSERT(db->db_dirtycnt > 0); 1903fa9e4066Sahrens db->db_dirtycnt -= 1; 1904fa9e4066Sahrens 1905c717a561Smaybee if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1906dcbf3bd6SGeorge Wilson ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf)); 1907dcbf3bd6SGeorge Wilson dbuf_destroy(db); 19083b2aab18SMatthew Ahrens return (B_TRUE); 1909fa9e4066Sahrens } 1910fa9e4066Sahrens 19113b2aab18SMatthew Ahrens return (B_FALSE); 1912fa9e4066Sahrens } 1913fa9e4066Sahrens 1914fa9e4066Sahrens void 191543466aaeSMax Grossman dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx) 1916fa9e4066Sahrens { 191743466aaeSMax Grossman dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 19181ab7f2deSmaybee int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1919fa9e4066Sahrens 1920fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1921fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1922fa9e4066Sahrens 19230f2e7d03SMatthew Ahrens /* 19240f2e7d03SMatthew Ahrens * Quick check for dirtyness. For already dirty blocks, this 19250f2e7d03SMatthew Ahrens * reduces runtime of this function by >90%, and overall performance 19260f2e7d03SMatthew Ahrens * by 50% for some workloads (e.g. file deletion with indirect blocks 19270f2e7d03SMatthew Ahrens * cached). 19280f2e7d03SMatthew Ahrens */ 19290f2e7d03SMatthew Ahrens mutex_enter(&db->db_mtx); 19300f2e7d03SMatthew Ahrens dbuf_dirty_record_t *dr; 19310f2e7d03SMatthew Ahrens for (dr = db->db_last_dirty; 19320f2e7d03SMatthew Ahrens dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) { 19330f2e7d03SMatthew Ahrens /* 19340f2e7d03SMatthew Ahrens * It's possible that it is already dirty but not cached, 19350f2e7d03SMatthew Ahrens * because there are some calls to dbuf_dirty() that don't 19360f2e7d03SMatthew Ahrens * go through dmu_buf_will_dirty(). 19370f2e7d03SMatthew Ahrens */ 19380f2e7d03SMatthew Ahrens if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) { 19390f2e7d03SMatthew Ahrens /* This dbuf is already dirty and cached. */ 19400f2e7d03SMatthew Ahrens dbuf_redirty(dr); 19410f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 19420f2e7d03SMatthew Ahrens return; 19430f2e7d03SMatthew Ahrens } 19440f2e7d03SMatthew Ahrens } 19450f2e7d03SMatthew Ahrens mutex_exit(&db->db_mtx); 19460f2e7d03SMatthew Ahrens 1947744947dcSTom Erickson DB_DNODE_ENTER(db); 1948744947dcSTom Erickson if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1949fa9e4066Sahrens rf |= DB_RF_HAVESTRUCT; 1950744947dcSTom Erickson DB_DNODE_EXIT(db); 1951ea8dc4b6Seschrock (void) dbuf_read(db, NULL, rf); 1952c717a561Smaybee (void) dbuf_dirty(db, tx); 1953fa9e4066Sahrens } 1954fa9e4066Sahrens 1955fa9e4066Sahrens void 195682c9918fSTim Haley dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 195782c9918fSTim Haley { 195882c9918fSTim Haley dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 195982c9918fSTim Haley 196082c9918fSTim Haley db->db_state = DB_NOFILL; 196182c9918fSTim Haley 196282c9918fSTim Haley dmu_buf_will_fill(db_fake, tx); 196382c9918fSTim Haley } 196482c9918fSTim Haley 196582c9918fSTim Haley void 1966ea8dc4b6Seschrock dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1967fa9e4066Sahrens { 1968ea8dc4b6Seschrock dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1969ea8dc4b6Seschrock 19700a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1971fa9e4066Sahrens ASSERT(tx->tx_txg != 0); 1972fa9e4066Sahrens ASSERT(db->db_level == 0); 1973fa9e4066Sahrens ASSERT(!refcount_is_zero(&db->db_holds)); 1974fa9e4066Sahrens 1975ea8dc4b6Seschrock ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1976fa9e4066Sahrens dmu_tx_private_ok(tx)); 1977fa9e4066Sahrens 1978fa9e4066Sahrens dbuf_noread(db); 1979c717a561Smaybee (void) dbuf_dirty(db, tx); 1980fa9e4066Sahrens } 1981fa9e4066Sahrens 1982fa9e4066Sahrens #pragma weak dmu_buf_fill_done = dbuf_fill_done 1983fa9e4066Sahrens /* ARGSUSED */ 1984fa9e4066Sahrens void 1985fa9e4066Sahrens dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1986fa9e4066Sahrens { 1987fa9e4066Sahrens mutex_enter(&db->db_mtx); 19889c9dc39aSek110237 DBUF_VERIFY(db); 1989fa9e4066Sahrens 1990fa9e4066Sahrens if (db->db_state == DB_FILL) { 1991c717a561Smaybee if (db->db_level == 0 && db->db_freed_in_flight) { 19920a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1993fa9e4066Sahrens /* we were freed while filling */ 1994fa9e4066Sahrens /* XXX dbuf_undirty? */ 1995fa9e4066Sahrens bzero(db->db.db_data, db->db.db_size); 1996c717a561Smaybee db->db_freed_in_flight = FALSE; 1997fa9e4066Sahrens } 1998fa9e4066Sahrens db->db_state = DB_CACHED; 1999fa9e4066Sahrens cv_broadcast(&db->db_changed); 2000fa9e4066Sahrens } 2001fa9e4066Sahrens mutex_exit(&db->db_mtx); 2002fa9e4066Sahrens } 2003fa9e4066Sahrens 20045d7b4d43SMatthew Ahrens void 20055d7b4d43SMatthew Ahrens dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data, 20065d7b4d43SMatthew Ahrens bp_embedded_type_t etype, enum zio_compress comp, 20075d7b4d43SMatthew Ahrens int uncompressed_size, int compressed_size, int byteorder, 20085d7b4d43SMatthew Ahrens dmu_tx_t *tx) 20095d7b4d43SMatthew Ahrens { 20105d7b4d43SMatthew Ahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 20115d7b4d43SMatthew Ahrens struct dirty_leaf *dl; 20125d7b4d43SMatthew Ahrens dmu_object_type_t type; 20135d7b4d43SMatthew Ahrens 2014ca0cc391SMatthew Ahrens if (etype == BP_EMBEDDED_TYPE_DATA) { 2015ca0cc391SMatthew Ahrens ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset), 2016ca0cc391SMatthew Ahrens SPA_FEATURE_EMBEDDED_DATA)); 2017ca0cc391SMatthew Ahrens } 2018ca0cc391SMatthew Ahrens 20195d7b4d43SMatthew Ahrens DB_DNODE_ENTER(db); 20205d7b4d43SMatthew Ahrens type = DB_DNODE(db)->dn_type; 20215d7b4d43SMatthew Ahrens DB_DNODE_EXIT(db); 20225d7b4d43SMatthew Ahrens 20235d7b4d43SMatthew Ahrens ASSERT0(db->db_level); 20245d7b4d43SMatthew Ahrens ASSERT(db->db_blkid != DMU_BONUS_BLKID); 20255d7b4d43SMatthew Ahrens 20265d7b4d43SMatthew Ahrens dmu_buf_will_not_fill(dbuf, tx); 20275d7b4d43SMatthew Ahrens 20285d7b4d43SMatthew Ahrens ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 20295d7b4d43SMatthew Ahrens dl = &db->db_last_dirty->dt.dl; 20305d7b4d43SMatthew Ahrens encode_embedded_bp_compressed(&dl->dr_overridden_by, 20315d7b4d43SMatthew Ahrens data, comp, uncompressed_size, compressed_size); 20325d7b4d43SMatthew Ahrens BPE_SET_ETYPE(&dl->dr_overridden_by, etype); 20335d7b4d43SMatthew Ahrens BP_SET_TYPE(&dl->dr_overridden_by, type); 20345d7b4d43SMatthew Ahrens BP_SET_LEVEL(&dl->dr_overridden_by, 0); 20355d7b4d43SMatthew Ahrens BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder); 20365d7b4d43SMatthew Ahrens 20375d7b4d43SMatthew Ahrens dl->dr_override_state = DR_OVERRIDDEN; 20385d7b4d43SMatthew Ahrens dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg; 20395d7b4d43SMatthew Ahrens } 20405d7b4d43SMatthew Ahrens 2041ea8dc4b6Seschrock /* 20422fdbea25SAleksandr Guzovskiy * Directly assign a provided arc buf to a given dbuf if it's not referenced 20432fdbea25SAleksandr Guzovskiy * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 20442fdbea25SAleksandr Guzovskiy */ 20452fdbea25SAleksandr Guzovskiy void 20462fdbea25SAleksandr Guzovskiy dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 20472fdbea25SAleksandr Guzovskiy { 20482fdbea25SAleksandr Guzovskiy ASSERT(!refcount_is_zero(&db->db_holds)); 20490a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 20502fdbea25SAleksandr Guzovskiy ASSERT(db->db_level == 0); 20515602294fSDan Kimmel ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf)); 20522fdbea25SAleksandr Guzovskiy ASSERT(buf != NULL); 20535602294fSDan Kimmel ASSERT(arc_buf_lsize(buf) == db->db.db_size); 20542fdbea25SAleksandr Guzovskiy ASSERT(tx->tx_txg != 0); 20552fdbea25SAleksandr Guzovskiy 20562fdbea25SAleksandr Guzovskiy arc_return_buf(buf, db); 20572fdbea25SAleksandr Guzovskiy ASSERT(arc_released(buf)); 20582fdbea25SAleksandr Guzovskiy 20592fdbea25SAleksandr Guzovskiy mutex_enter(&db->db_mtx); 20602fdbea25SAleksandr Guzovskiy 20612fdbea25SAleksandr Guzovskiy while (db->db_state == DB_READ || db->db_state == DB_FILL) 20622fdbea25SAleksandr Guzovskiy cv_wait(&db->db_changed, &db->db_mtx); 20632fdbea25SAleksandr Guzovskiy 20642fdbea25SAleksandr Guzovskiy ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 20652fdbea25SAleksandr Guzovskiy 20662fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED && 20672fdbea25SAleksandr Guzovskiy refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 20682fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 20692fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 20702fdbea25SAleksandr Guzovskiy bcopy(buf->b_data, db->db.db_data, db->db.db_size); 2071dcbf3bd6SGeorge Wilson arc_buf_destroy(buf, db); 2072c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_copied(); 20732fdbea25SAleksandr Guzovskiy return; 20742fdbea25SAleksandr Guzovskiy } 20752fdbea25SAleksandr Guzovskiy 2076c242f9a0Schunli zhang - Sun Microsystems - Irvine United States xuio_stat_wbuf_nocopy(); 20772fdbea25SAleksandr Guzovskiy if (db->db_state == DB_CACHED) { 20782fdbea25SAleksandr Guzovskiy dbuf_dirty_record_t *dr = db->db_last_dirty; 20792fdbea25SAleksandr Guzovskiy 20802fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf != NULL); 20812fdbea25SAleksandr Guzovskiy if (dr != NULL && dr->dr_txg == tx->tx_txg) { 20822fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_data == db->db_buf); 20832fdbea25SAleksandr Guzovskiy if (!arc_released(db->db_buf)) { 20842fdbea25SAleksandr Guzovskiy ASSERT(dr->dt.dl.dr_override_state == 20852fdbea25SAleksandr Guzovskiy DR_OVERRIDDEN); 20862fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 20872fdbea25SAleksandr Guzovskiy } 20882fdbea25SAleksandr Guzovskiy dr->dt.dl.dr_data = buf; 2089dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 20902fdbea25SAleksandr Guzovskiy } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 20912fdbea25SAleksandr Guzovskiy arc_release(db->db_buf, db); 2092dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 20932fdbea25SAleksandr Guzovskiy } 20942fdbea25SAleksandr Guzovskiy db->db_buf = NULL; 20952fdbea25SAleksandr Guzovskiy } 20962fdbea25SAleksandr Guzovskiy ASSERT(db->db_buf == NULL); 20972fdbea25SAleksandr Guzovskiy dbuf_set_data(db, buf); 20982fdbea25SAleksandr Guzovskiy db->db_state = DB_FILL; 20992fdbea25SAleksandr Guzovskiy mutex_exit(&db->db_mtx); 21002fdbea25SAleksandr Guzovskiy (void) dbuf_dirty(db, tx); 210143466aaeSMax Grossman dmu_buf_fill_done(&db->db, tx); 21022fdbea25SAleksandr Guzovskiy } 21032fdbea25SAleksandr Guzovskiy 2104ea8dc4b6Seschrock void 2105dcbf3bd6SGeorge Wilson dbuf_destroy(dmu_buf_impl_t *db) 2106fa9e4066Sahrens { 2107744947dcSTom Erickson dnode_t *dn; 2108ea8dc4b6Seschrock dmu_buf_impl_t *parent = db->db_parent; 2109744947dcSTom Erickson dmu_buf_impl_t *dndb; 2110fa9e4066Sahrens 2111fa9e4066Sahrens ASSERT(MUTEX_HELD(&db->db_mtx)); 2112fa9e4066Sahrens ASSERT(refcount_is_zero(&db->db_holds)); 2113fa9e4066Sahrens 2114dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) { 2115dcbf3bd6SGeorge Wilson arc_buf_destroy(db->db_buf, db); 2116dcbf3bd6SGeorge Wilson db->db_buf = NULL; 2117dcbf3bd6SGeorge Wilson } 2118ea8dc4b6Seschrock 21190a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 2120dcbf3bd6SGeorge Wilson ASSERT(db->db.db_data != NULL); 2121ea8dc4b6Seschrock zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 21225a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2123fa9e4066Sahrens db->db_state = DB_UNCACHED; 2124fa9e4066Sahrens } 2125fa9e4066Sahrens 2126dcbf3bd6SGeorge Wilson dbuf_clear_data(db); 2127dcbf3bd6SGeorge Wilson 2128dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2129adb52d92SMatthew Ahrens ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2130adb52d92SMatthew Ahrens db->db_caching_status == DB_DBUF_METADATA_CACHE); 2131adb52d92SMatthew Ahrens 2132adb52d92SMatthew Ahrens multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2133adb52d92SMatthew Ahrens (void) refcount_remove_many( 2134adb52d92SMatthew Ahrens &dbuf_caches[db->db_caching_status].size, 2135dcbf3bd6SGeorge Wilson db->db.db_size, db); 2136adb52d92SMatthew Ahrens 2137adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2138dcbf3bd6SGeorge Wilson } 2139dcbf3bd6SGeorge Wilson 214082c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 2141fa9e4066Sahrens ASSERT(db->db_data_pending == NULL); 2142fa9e4066Sahrens 2143ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2144ea8dc4b6Seschrock db->db_blkptr = NULL; 2145ea8dc4b6Seschrock 2146dcbf3bd6SGeorge Wilson /* 2147dcbf3bd6SGeorge Wilson * Now that db_state is DB_EVICTING, nobody else can find this via 2148dcbf3bd6SGeorge Wilson * the hash table. We can now drop db_mtx, which allows us to 2149dcbf3bd6SGeorge Wilson * acquire the dn_dbufs_mtx. 2150dcbf3bd6SGeorge Wilson */ 2151dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 2152dcbf3bd6SGeorge Wilson 2153744947dcSTom Erickson DB_DNODE_ENTER(db); 2154744947dcSTom Erickson dn = DB_DNODE(db); 2155744947dcSTom Erickson dndb = dn->dn_dbuf; 2156dcbf3bd6SGeorge Wilson if (db->db_blkid != DMU_BONUS_BLKID) { 2157dcbf3bd6SGeorge Wilson boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx); 2158dcbf3bd6SGeorge Wilson if (needlock) 2159dcbf3bd6SGeorge Wilson mutex_enter(&dn->dn_dbufs_mtx); 21600f6d88adSAlex Reece avl_remove(&dn->dn_dbufs, db); 2161640c1670SJosef 'Jeff' Sipek atomic_dec_32(&dn->dn_dbufs_count); 2162744947dcSTom Erickson membar_producer(); 2163744947dcSTom Erickson DB_DNODE_EXIT(db); 2164dcbf3bd6SGeorge Wilson if (needlock) 2165dcbf3bd6SGeorge Wilson mutex_exit(&dn->dn_dbufs_mtx); 2166744947dcSTom Erickson /* 2167744947dcSTom Erickson * Decrementing the dbuf count means that the hold corresponding 2168744947dcSTom Erickson * to the removed dbuf is no longer discounted in dnode_move(), 2169744947dcSTom Erickson * so the dnode cannot be moved until after we release the hold. 2170744947dcSTom Erickson * The membar_producer() ensures visibility of the decremented 2171744947dcSTom Erickson * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 2172744947dcSTom Erickson * release any lock. 2173744947dcSTom Erickson */ 2174ea8dc4b6Seschrock dnode_rele(dn, db); 2175744947dcSTom Erickson db->db_dnode_handle = NULL; 2176dcbf3bd6SGeorge Wilson 2177dcbf3bd6SGeorge Wilson dbuf_hash_remove(db); 2178744947dcSTom Erickson } else { 2179744947dcSTom Erickson DB_DNODE_EXIT(db); 2180ea8dc4b6Seschrock } 2181ea8dc4b6Seschrock 2182dcbf3bd6SGeorge Wilson ASSERT(refcount_is_zero(&db->db_holds)); 2183ea8dc4b6Seschrock 2184dcbf3bd6SGeorge Wilson db->db_parent = NULL; 2185dcbf3bd6SGeorge Wilson 2186dcbf3bd6SGeorge Wilson ASSERT(db->db_buf == NULL); 2187dcbf3bd6SGeorge Wilson ASSERT(db->db.db_data == NULL); 2188dcbf3bd6SGeorge Wilson ASSERT(db->db_hash_next == NULL); 2189dcbf3bd6SGeorge Wilson ASSERT(db->db_blkptr == NULL); 2190dcbf3bd6SGeorge Wilson ASSERT(db->db_data_pending == NULL); 2191adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE); 2192dcbf3bd6SGeorge Wilson ASSERT(!multilist_link_active(&db->db_cache_link)); 2193dcbf3bd6SGeorge Wilson 2194dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2195dcbf3bd6SGeorge Wilson arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2196fa9e4066Sahrens 2197fa9e4066Sahrens /* 2198744947dcSTom Erickson * If this dbuf is referenced from an indirect dbuf, 2199fa9e4066Sahrens * decrement the ref count on the indirect dbuf. 2200fa9e4066Sahrens */ 2201c543ec06Sahrens if (parent && parent != dndb) 2202ea8dc4b6Seschrock dbuf_rele(parent, db); 2203fa9e4066Sahrens } 2204fa9e4066Sahrens 2205a2cdcdd2SPaul Dagnelie /* 2206a2cdcdd2SPaul Dagnelie * Note: While bpp will always be updated if the function returns success, 2207a2cdcdd2SPaul Dagnelie * parentp will not be updated if the dnode does not have dn_dbuf filled in; 2208a2cdcdd2SPaul Dagnelie * this happens when the dnode is the meta-dnode, or a userused or groupused 2209a2cdcdd2SPaul Dagnelie * object. 2210a2cdcdd2SPaul Dagnelie */ 2211fa9e4066Sahrens static int 2212fa9e4066Sahrens dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 2213fa9e4066Sahrens dmu_buf_impl_t **parentp, blkptr_t **bpp) 2214fa9e4066Sahrens { 22150b69c2f0Sahrens *parentp = NULL; 22160b69c2f0Sahrens *bpp = NULL; 22170b69c2f0Sahrens 22180a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 22190a586ceaSMark Shellenbaum 22200a586ceaSMark Shellenbaum if (blkid == DMU_SPILL_BLKID) { 22210a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 222206e0070dSMark Shellenbaum if (dn->dn_have_spill && 222306e0070dSMark Shellenbaum (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 22240a586ceaSMark Shellenbaum *bpp = &dn->dn_phys->dn_spill; 22250a586ceaSMark Shellenbaum else 22260a586ceaSMark Shellenbaum *bpp = NULL; 22270a586ceaSMark Shellenbaum dbuf_add_ref(dn->dn_dbuf, NULL); 22280a586ceaSMark Shellenbaum *parentp = dn->dn_dbuf; 22290a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 22300a586ceaSMark Shellenbaum return (0); 22310a586ceaSMark Shellenbaum } 2232ea8dc4b6Seschrock 22337de35a3eSPaul Dagnelie int nlevels = 22347de35a3eSPaul Dagnelie (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels; 22357de35a3eSPaul Dagnelie int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 2236fa9e4066Sahrens 2237fa9e4066Sahrens ASSERT3U(level * epbs, <, 64); 2238fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 22397de35a3eSPaul Dagnelie /* 22407de35a3eSPaul Dagnelie * This assertion shouldn't trip as long as the max indirect block size 22417de35a3eSPaul Dagnelie * is less than 1M. The reason for this is that up to that point, 22427de35a3eSPaul Dagnelie * the number of levels required to address an entire object with blocks 22437de35a3eSPaul Dagnelie * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In 22447de35a3eSPaul Dagnelie * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55 22457de35a3eSPaul Dagnelie * (i.e. we can address the entire object), objects will all use at most 22467de35a3eSPaul Dagnelie * N-1 levels and the assertion won't overflow. However, once epbs is 22477de35a3eSPaul Dagnelie * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be 22487de35a3eSPaul Dagnelie * enough to address an entire object, so objects will have 5 levels, 22497de35a3eSPaul Dagnelie * but then this assertion will overflow. 22507de35a3eSPaul Dagnelie * 22517de35a3eSPaul Dagnelie * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we 22527de35a3eSPaul Dagnelie * need to redo this logic to handle overflows. 22537de35a3eSPaul Dagnelie */ 22547de35a3eSPaul Dagnelie ASSERT(level >= nlevels || 22557de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs) + 22567de35a3eSPaul Dagnelie highbit64(dn->dn_phys->dn_nblkptr) <= 64); 2257ea8dc4b6Seschrock if (level >= nlevels || 22587de35a3eSPaul Dagnelie blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr << 22597de35a3eSPaul Dagnelie ((nlevels - level - 1) * epbs)) || 22607de35a3eSPaul Dagnelie (fail_sparse && 22617de35a3eSPaul Dagnelie blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 2262fa9e4066Sahrens /* the buffer has no parent yet */ 2263be6fd75aSMatthew Ahrens return (SET_ERROR(ENOENT)); 2264fa9e4066Sahrens } else if (level < nlevels-1) { 2265fa9e4066Sahrens /* this block is referenced from an indirect block */ 2266fa9e4066Sahrens int err = dbuf_hold_impl(dn, level+1, 2267a2cdcdd2SPaul Dagnelie blkid >> epbs, fail_sparse, FALSE, NULL, parentp); 2268fa9e4066Sahrens if (err) 2269fa9e4066Sahrens return (err); 2270ea8dc4b6Seschrock err = dbuf_read(*parentp, NULL, 2271ea8dc4b6Seschrock (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 2272c543ec06Sahrens if (err) { 2273c543ec06Sahrens dbuf_rele(*parentp, NULL); 2274c543ec06Sahrens *parentp = NULL; 2275c543ec06Sahrens return (err); 2276c543ec06Sahrens } 2277fa9e4066Sahrens *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 2278fa9e4066Sahrens (blkid & ((1ULL << epbs) - 1)); 22797de35a3eSPaul Dagnelie if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs))) 22807de35a3eSPaul Dagnelie ASSERT(BP_IS_HOLE(*bpp)); 2281c543ec06Sahrens return (0); 2282fa9e4066Sahrens } else { 2283fa9e4066Sahrens /* the block is referenced from the dnode */ 2284fa9e4066Sahrens ASSERT3U(level, ==, nlevels-1); 2285fa9e4066Sahrens ASSERT(dn->dn_phys->dn_nblkptr == 0 || 2286fa9e4066Sahrens blkid < dn->dn_phys->dn_nblkptr); 2287c543ec06Sahrens if (dn->dn_dbuf) { 2288c543ec06Sahrens dbuf_add_ref(dn->dn_dbuf, NULL); 2289fa9e4066Sahrens *parentp = dn->dn_dbuf; 2290c543ec06Sahrens } 2291fa9e4066Sahrens *bpp = &dn->dn_phys->dn_blkptr[blkid]; 2292fa9e4066Sahrens return (0); 2293fa9e4066Sahrens } 2294fa9e4066Sahrens } 2295fa9e4066Sahrens 2296fa9e4066Sahrens static dmu_buf_impl_t * 2297fa9e4066Sahrens dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 2298fa9e4066Sahrens dmu_buf_impl_t *parent, blkptr_t *blkptr) 2299fa9e4066Sahrens { 2300503ad85cSMatthew Ahrens objset_t *os = dn->dn_objset; 2301fa9e4066Sahrens dmu_buf_impl_t *db, *odb; 2302fa9e4066Sahrens 2303fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2304fa9e4066Sahrens ASSERT(dn->dn_type != DMU_OT_NONE); 2305fa9e4066Sahrens 2306dcbf3bd6SGeorge Wilson db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP); 2307fa9e4066Sahrens 2308fa9e4066Sahrens db->db_objset = os; 2309fa9e4066Sahrens db->db.db_object = dn->dn_object; 2310fa9e4066Sahrens db->db_level = level; 2311fa9e4066Sahrens db->db_blkid = blkid; 2312c717a561Smaybee db->db_last_dirty = NULL; 2313ea8dc4b6Seschrock db->db_dirtycnt = 0; 2314744947dcSTom Erickson db->db_dnode_handle = dn->dn_handle; 2315ea8dc4b6Seschrock db->db_parent = parent; 2316ea8dc4b6Seschrock db->db_blkptr = blkptr; 2317fa9e4066Sahrens 2318bc9014e6SJustin Gibbs db->db_user = NULL; 2319d2058105SJustin T. Gibbs db->db_user_immediate_evict = FALSE; 2320d2058105SJustin T. Gibbs db->db_freed_in_flight = FALSE; 2321d2058105SJustin T. Gibbs db->db_pending_evict = FALSE; 2322ea8dc4b6Seschrock 23230a586ceaSMark Shellenbaum if (blkid == DMU_BONUS_BLKID) { 2324ea8dc4b6Seschrock ASSERT3P(parent, ==, dn->dn_dbuf); 23251934e92fSmaybee db->db.db_size = DN_MAX_BONUSLEN - 23261934e92fSmaybee (dn->dn_nblkptr-1) * sizeof (blkptr_t); 23271934e92fSmaybee ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 23280a586ceaSMark Shellenbaum db->db.db_offset = DMU_BONUS_BLKID; 2329ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2330adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2331ea8dc4b6Seschrock /* the bonus dbuf is not placed in the hash table */ 23325a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2333ea8dc4b6Seschrock return (db); 23340a586ceaSMark Shellenbaum } else if (blkid == DMU_SPILL_BLKID) { 23350a586ceaSMark Shellenbaum db->db.db_size = (blkptr != NULL) ? 23360a586ceaSMark Shellenbaum BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 23370a586ceaSMark Shellenbaum db->db.db_offset = 0; 2338fa9e4066Sahrens } else { 2339fa9e4066Sahrens int blocksize = 2340fa9e4066Sahrens db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz; 2341fa9e4066Sahrens db->db.db_size = blocksize; 2342fa9e4066Sahrens db->db.db_offset = db->db_blkid * blocksize; 2343fa9e4066Sahrens } 2344fa9e4066Sahrens 2345fa9e4066Sahrens /* 2346fa9e4066Sahrens * Hold the dn_dbufs_mtx while we get the new dbuf 2347fa9e4066Sahrens * in the hash table *and* added to the dbufs list. 2348fa9e4066Sahrens * This prevents a possible deadlock with someone 2349fa9e4066Sahrens * trying to look up this dbuf before its added to the 2350fa9e4066Sahrens * dn_dbufs list. 2351fa9e4066Sahrens */ 2352fa9e4066Sahrens mutex_enter(&dn->dn_dbufs_mtx); 2353ea8dc4b6Seschrock db->db_state = DB_EVICTING; 2354fa9e4066Sahrens if ((odb = dbuf_hash_insert(db)) != NULL) { 2355fa9e4066Sahrens /* someone else inserted it first */ 2356dcbf3bd6SGeorge Wilson kmem_cache_free(dbuf_kmem_cache, db); 2357fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 2358fa9e4066Sahrens return (odb); 2359fa9e4066Sahrens } 23600f6d88adSAlex Reece avl_add(&dn->dn_dbufs, db); 2361653af1b8SStephen Blinick 2362ea8dc4b6Seschrock db->db_state = DB_UNCACHED; 2363adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2364fa9e4066Sahrens mutex_exit(&dn->dn_dbufs_mtx); 23655a98e54bSBrendan Gregg - Sun Microsystems arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 2366fa9e4066Sahrens 2367fa9e4066Sahrens if (parent && parent != dn->dn_dbuf) 2368fa9e4066Sahrens dbuf_add_ref(parent, db); 2369fa9e4066Sahrens 2370ea8dc4b6Seschrock ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 2371ea8dc4b6Seschrock refcount_count(&dn->dn_holds) > 0); 2372fa9e4066Sahrens (void) refcount_add(&dn->dn_holds, db); 2373640c1670SJosef 'Jeff' Sipek atomic_inc_32(&dn->dn_dbufs_count); 2374fa9e4066Sahrens 2375fa9e4066Sahrens dprintf_dbuf(db, "db=%p\n", db); 2376fa9e4066Sahrens 2377fa9e4066Sahrens return (db); 2378fa9e4066Sahrens } 2379fa9e4066Sahrens 2380a2cdcdd2SPaul Dagnelie typedef struct dbuf_prefetch_arg { 2381a2cdcdd2SPaul Dagnelie spa_t *dpa_spa; /* The spa to issue the prefetch in. */ 2382a2cdcdd2SPaul Dagnelie zbookmark_phys_t dpa_zb; /* The target block to prefetch. */ 2383a2cdcdd2SPaul Dagnelie int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */ 2384a2cdcdd2SPaul Dagnelie int dpa_curlevel; /* The current level that we're reading */ 2385dcbf3bd6SGeorge Wilson dnode_t *dpa_dnode; /* The dnode associated with the prefetch */ 2386a2cdcdd2SPaul Dagnelie zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */ 2387a2cdcdd2SPaul Dagnelie zio_t *dpa_zio; /* The parent zio_t for all prefetches. */ 2388a2cdcdd2SPaul Dagnelie arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */ 2389a2cdcdd2SPaul Dagnelie } dbuf_prefetch_arg_t; 2390a2cdcdd2SPaul Dagnelie 2391a2cdcdd2SPaul Dagnelie /* 2392a2cdcdd2SPaul Dagnelie * Actually issue the prefetch read for the block given. 2393a2cdcdd2SPaul Dagnelie */ 2394a2cdcdd2SPaul Dagnelie static void 2395a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp) 2396fa9e4066Sahrens { 2397a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 2398a2cdcdd2SPaul Dagnelie return; 2399a2cdcdd2SPaul Dagnelie 2400a2cdcdd2SPaul Dagnelie arc_flags_t aflags = 2401a2cdcdd2SPaul Dagnelie dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 2402a2cdcdd2SPaul Dagnelie 2403a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2404a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level); 2405a2cdcdd2SPaul Dagnelie ASSERT(dpa->dpa_zio != NULL); 2406a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL, 2407a2cdcdd2SPaul Dagnelie dpa->dpa_prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2408a2cdcdd2SPaul Dagnelie &aflags, &dpa->dpa_zb); 2409a2cdcdd2SPaul Dagnelie } 2410a2cdcdd2SPaul Dagnelie 2411a2cdcdd2SPaul Dagnelie /* 2412a2cdcdd2SPaul Dagnelie * Called when an indirect block above our prefetch target is read in. This 2413a2cdcdd2SPaul Dagnelie * will either read in the next indirect block down the tree or issue the actual 2414a2cdcdd2SPaul Dagnelie * prefetch if the next block down is our target. 2415a2cdcdd2SPaul Dagnelie */ 2416a2cdcdd2SPaul Dagnelie static void 2417a2cdcdd2SPaul Dagnelie dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private) 2418a2cdcdd2SPaul Dagnelie { 2419a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = private; 2420a2cdcdd2SPaul Dagnelie 2421a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel); 2422a2cdcdd2SPaul Dagnelie ASSERT3S(dpa->dpa_curlevel, >, 0); 2423dcbf3bd6SGeorge Wilson 2424*fa98e487SMatthew Ahrens if (abuf == NULL) { 2425*fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error != 0); 2426*fa98e487SMatthew Ahrens kmem_free(dpa, sizeof (*dpa)); 2427*fa98e487SMatthew Ahrens return; 2428*fa98e487SMatthew Ahrens } 2429*fa98e487SMatthew Ahrens ASSERT(zio == NULL || zio->io_error == 0); 2430*fa98e487SMatthew Ahrens 2431dcbf3bd6SGeorge Wilson /* 2432dcbf3bd6SGeorge Wilson * The dpa_dnode is only valid if we are called with a NULL 2433dcbf3bd6SGeorge Wilson * zio. This indicates that the arc_read() returned without 2434dcbf3bd6SGeorge Wilson * first calling zio_read() to issue a physical read. Once 2435dcbf3bd6SGeorge Wilson * a physical read is made the dpa_dnode must be invalidated 2436dcbf3bd6SGeorge Wilson * as the locks guarding it may have been dropped. If the 2437dcbf3bd6SGeorge Wilson * dpa_dnode is still valid, then we want to add it to the dbuf 2438dcbf3bd6SGeorge Wilson * cache. To do so, we must hold the dbuf associated with the block 2439dcbf3bd6SGeorge Wilson * we just prefetched, read its contents so that we associate it 2440dcbf3bd6SGeorge Wilson * with an arc_buf_t, and then release it. 2441dcbf3bd6SGeorge Wilson */ 2442a2cdcdd2SPaul Dagnelie if (zio != NULL) { 2443a2cdcdd2SPaul Dagnelie ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel); 2444dcbf3bd6SGeorge Wilson if (zio->io_flags & ZIO_FLAG_RAW) { 2445dcbf3bd6SGeorge Wilson ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size); 2446dcbf3bd6SGeorge Wilson } else { 2447a2cdcdd2SPaul Dagnelie ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size); 2448dcbf3bd6SGeorge Wilson } 2449a2cdcdd2SPaul Dagnelie ASSERT3P(zio->io_spa, ==, dpa->dpa_spa); 2450dcbf3bd6SGeorge Wilson 2451dcbf3bd6SGeorge Wilson dpa->dpa_dnode = NULL; 2452dcbf3bd6SGeorge Wilson } else if (dpa->dpa_dnode != NULL) { 2453dcbf3bd6SGeorge Wilson uint64_t curblkid = dpa->dpa_zb.zb_blkid >> 2454dcbf3bd6SGeorge Wilson (dpa->dpa_epbs * (dpa->dpa_curlevel - 2455dcbf3bd6SGeorge Wilson dpa->dpa_zb.zb_level)); 2456dcbf3bd6SGeorge Wilson dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, 2457dcbf3bd6SGeorge Wilson dpa->dpa_curlevel, curblkid, FTAG); 2458dcbf3bd6SGeorge Wilson (void) dbuf_read(db, NULL, 2459dcbf3bd6SGeorge Wilson DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); 2460dcbf3bd6SGeorge Wilson dbuf_rele(db, FTAG); 2461a2cdcdd2SPaul Dagnelie } 2462a2cdcdd2SPaul Dagnelie 2463a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel--; 2464a2cdcdd2SPaul Dagnelie 2465a2cdcdd2SPaul Dagnelie uint64_t nextblkid = dpa->dpa_zb.zb_blkid >> 2466a2cdcdd2SPaul Dagnelie (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level)); 2467a2cdcdd2SPaul Dagnelie blkptr_t *bp = ((blkptr_t *)abuf->b_data) + 2468a2cdcdd2SPaul Dagnelie P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs); 2469*fa98e487SMatthew Ahrens if (BP_IS_HOLE(bp)) { 2470a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2471a2cdcdd2SPaul Dagnelie } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) { 2472a2cdcdd2SPaul Dagnelie ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid); 2473a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, bp); 2474a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2475a2cdcdd2SPaul Dagnelie } else { 2476a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 2477a2cdcdd2SPaul Dagnelie zbookmark_phys_t zb; 2478a2cdcdd2SPaul Dagnelie 247927295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 248027295216Sbenrubson if (dpa->dpa_aflags & ARC_FLAG_L2CACHE) 248127295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 248227295216Sbenrubson 2483a2cdcdd2SPaul Dagnelie ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp)); 2484a2cdcdd2SPaul Dagnelie 2485a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset, 2486a2cdcdd2SPaul Dagnelie dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid); 2487a2cdcdd2SPaul Dagnelie 2488a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2489a2cdcdd2SPaul Dagnelie bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio, 2490a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2491a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2492a2cdcdd2SPaul Dagnelie } 2493dcbf3bd6SGeorge Wilson 2494dcbf3bd6SGeorge Wilson arc_buf_destroy(abuf, private); 2495a2cdcdd2SPaul Dagnelie } 2496a2cdcdd2SPaul Dagnelie 2497a2cdcdd2SPaul Dagnelie /* 2498a2cdcdd2SPaul Dagnelie * Issue prefetch reads for the given block on the given level. If the indirect 2499a2cdcdd2SPaul Dagnelie * blocks above that block are not in memory, we will read them in 2500a2cdcdd2SPaul Dagnelie * asynchronously. As a result, this call never blocks waiting for a read to 2501a2cdcdd2SPaul Dagnelie * complete. 2502a2cdcdd2SPaul Dagnelie */ 2503a2cdcdd2SPaul Dagnelie void 2504a2cdcdd2SPaul Dagnelie dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio, 2505a2cdcdd2SPaul Dagnelie arc_flags_t aflags) 2506a2cdcdd2SPaul Dagnelie { 2507a2cdcdd2SPaul Dagnelie blkptr_t bp; 2508a2cdcdd2SPaul Dagnelie int epbs, nlevels, curlevel; 2509a2cdcdd2SPaul Dagnelie uint64_t curblkid; 2510fa9e4066Sahrens 25110a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2512fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2513fa9e4066Sahrens 2514cf6106c8SMatthew Ahrens if (blkid > dn->dn_maxblkid) 2515cf6106c8SMatthew Ahrens return; 2516cf6106c8SMatthew Ahrens 2517fa9e4066Sahrens if (dnode_block_freed(dn, blkid)) 2518fa9e4066Sahrens return; 2519fa9e4066Sahrens 2520fa9e4066Sahrens /* 2521a2cdcdd2SPaul Dagnelie * This dnode hasn't been written to disk yet, so there's nothing to 2522a2cdcdd2SPaul Dagnelie * prefetch. 2523fa9e4066Sahrens */ 2524a2cdcdd2SPaul Dagnelie nlevels = dn->dn_phys->dn_nlevels; 2525a2cdcdd2SPaul Dagnelie if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0) 2526a2cdcdd2SPaul Dagnelie return; 2527a2cdcdd2SPaul Dagnelie 2528a2cdcdd2SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2529a2cdcdd2SPaul Dagnelie if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level)) 2530a2cdcdd2SPaul Dagnelie return; 2531a2cdcdd2SPaul Dagnelie 2532a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object, 2533a2cdcdd2SPaul Dagnelie level, blkid); 2534a2cdcdd2SPaul Dagnelie if (db != NULL) { 2535fa9e4066Sahrens mutex_exit(&db->db_mtx); 2536a2cdcdd2SPaul Dagnelie /* 2537a2cdcdd2SPaul Dagnelie * This dbuf already exists. It is either CACHED, or 2538a2cdcdd2SPaul Dagnelie * (we assume) about to be read or filled. 2539a2cdcdd2SPaul Dagnelie */ 2540fa9e4066Sahrens return; 2541fa9e4066Sahrens } 2542fa9e4066Sahrens 2543a2cdcdd2SPaul Dagnelie /* 2544a2cdcdd2SPaul Dagnelie * Find the closest ancestor (indirect block) of the target block 2545a2cdcdd2SPaul Dagnelie * that is present in the cache. In this indirect block, we will 2546a2cdcdd2SPaul Dagnelie * find the bp that is at curlevel, curblkid. 2547a2cdcdd2SPaul Dagnelie */ 2548a2cdcdd2SPaul Dagnelie curlevel = level; 2549a2cdcdd2SPaul Dagnelie curblkid = blkid; 2550a2cdcdd2SPaul Dagnelie while (curlevel < nlevels - 1) { 2551a2cdcdd2SPaul Dagnelie int parent_level = curlevel + 1; 2552a2cdcdd2SPaul Dagnelie uint64_t parent_blkid = curblkid >> epbs; 2553a2cdcdd2SPaul Dagnelie dmu_buf_impl_t *db; 2554a2cdcdd2SPaul Dagnelie 2555a2cdcdd2SPaul Dagnelie if (dbuf_hold_impl(dn, parent_level, parent_blkid, 2556a2cdcdd2SPaul Dagnelie FALSE, TRUE, FTAG, &db) == 0) { 2557a2cdcdd2SPaul Dagnelie blkptr_t *bpp = db->db_buf->b_data; 2558a2cdcdd2SPaul Dagnelie bp = bpp[P2PHASE(curblkid, 1 << epbs)]; 2559a2cdcdd2SPaul Dagnelie dbuf_rele(db, FTAG); 2560a2cdcdd2SPaul Dagnelie break; 2561a2cdcdd2SPaul Dagnelie } 2562a2cdcdd2SPaul Dagnelie 2563a2cdcdd2SPaul Dagnelie curlevel = parent_level; 2564a2cdcdd2SPaul Dagnelie curblkid = parent_blkid; 2565a2cdcdd2SPaul Dagnelie } 2566a2cdcdd2SPaul Dagnelie 2567a2cdcdd2SPaul Dagnelie if (curlevel == nlevels - 1) { 2568a2cdcdd2SPaul Dagnelie /* No cached indirect blocks found. */ 2569a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr); 2570a2cdcdd2SPaul Dagnelie bp = dn->dn_phys->dn_blkptr[curblkid]; 2571a2cdcdd2SPaul Dagnelie } 2572a2cdcdd2SPaul Dagnelie if (BP_IS_HOLE(&bp)) 2573a2cdcdd2SPaul Dagnelie return; 2574a2cdcdd2SPaul Dagnelie 2575a2cdcdd2SPaul Dagnelie ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp)); 2576a2cdcdd2SPaul Dagnelie 2577a2cdcdd2SPaul Dagnelie zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL, 2578a2cdcdd2SPaul Dagnelie ZIO_FLAG_CANFAIL); 2579a2cdcdd2SPaul Dagnelie 2580a2cdcdd2SPaul Dagnelie dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP); 2581b24ab676SJeff Bonwick dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 2582a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2583a2cdcdd2SPaul Dagnelie dn->dn_object, level, blkid); 2584a2cdcdd2SPaul Dagnelie dpa->dpa_curlevel = curlevel; 2585a2cdcdd2SPaul Dagnelie dpa->dpa_prio = prio; 2586a2cdcdd2SPaul Dagnelie dpa->dpa_aflags = aflags; 2587a2cdcdd2SPaul Dagnelie dpa->dpa_spa = dn->dn_objset->os_spa; 2588dcbf3bd6SGeorge Wilson dpa->dpa_dnode = dn; 2589a2cdcdd2SPaul Dagnelie dpa->dpa_epbs = epbs; 2590a2cdcdd2SPaul Dagnelie dpa->dpa_zio = pio; 2591a2cdcdd2SPaul Dagnelie 259227295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 259327295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 259427295216Sbenrubson dpa->dpa_aflags |= ARC_FLAG_L2CACHE; 259527295216Sbenrubson 2596a2cdcdd2SPaul Dagnelie /* 2597a2cdcdd2SPaul Dagnelie * If we have the indirect just above us, no need to do the asynchronous 2598a2cdcdd2SPaul Dagnelie * prefetch chain; we'll just run the last step ourselves. If we're at 2599a2cdcdd2SPaul Dagnelie * a higher level, though, we want to issue the prefetches for all the 2600a2cdcdd2SPaul Dagnelie * indirect blocks asynchronously, so we can go on with whatever we were 2601a2cdcdd2SPaul Dagnelie * doing. 2602a2cdcdd2SPaul Dagnelie */ 2603a2cdcdd2SPaul Dagnelie if (curlevel == level) { 2604a2cdcdd2SPaul Dagnelie ASSERT3U(curblkid, ==, blkid); 2605a2cdcdd2SPaul Dagnelie dbuf_issue_final_prefetch(dpa, &bp); 2606a2cdcdd2SPaul Dagnelie kmem_free(dpa, sizeof (*dpa)); 2607a2cdcdd2SPaul Dagnelie } else { 2608a2cdcdd2SPaul Dagnelie arc_flags_t iter_aflags = ARC_FLAG_NOWAIT; 26097802d7bfSMatthew Ahrens zbookmark_phys_t zb; 2610b24ab676SJeff Bonwick 261127295216Sbenrubson /* flag if L2ARC eligible, l2arc_noprefetch then decides */ 261227295216Sbenrubson if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level)) 261327295216Sbenrubson iter_aflags |= ARC_FLAG_L2CACHE; 261427295216Sbenrubson 2615a2cdcdd2SPaul Dagnelie SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET, 2616a2cdcdd2SPaul Dagnelie dn->dn_object, curlevel, curblkid); 2617a2cdcdd2SPaul Dagnelie (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, 2618a2cdcdd2SPaul Dagnelie &bp, dbuf_prefetch_indirect_done, dpa, prio, 2619fa9e4066Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 2620a2cdcdd2SPaul Dagnelie &iter_aflags, &zb); 2621fa9e4066Sahrens } 2622a2cdcdd2SPaul Dagnelie /* 2623a2cdcdd2SPaul Dagnelie * We use pio here instead of dpa_zio since it's possible that 2624a2cdcdd2SPaul Dagnelie * dpa may have already been freed. 2625a2cdcdd2SPaul Dagnelie */ 2626a2cdcdd2SPaul Dagnelie zio_nowait(pio); 2627fa9e4066Sahrens } 2628fa9e4066Sahrens 2629fa9e4066Sahrens /* 2630fa9e4066Sahrens * Returns with db_holds incremented, and db_mtx not held. 2631fa9e4066Sahrens * Note: dn_struct_rwlock must be held. 2632fa9e4066Sahrens */ 2633fa9e4066Sahrens int 2634a2cdcdd2SPaul Dagnelie dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, 2635a2cdcdd2SPaul Dagnelie boolean_t fail_sparse, boolean_t fail_uncached, 2636fa9e4066Sahrens void *tag, dmu_buf_impl_t **dbp) 2637fa9e4066Sahrens { 2638fa9e4066Sahrens dmu_buf_impl_t *db, *parent = NULL; 2639fa9e4066Sahrens 26400a586ceaSMark Shellenbaum ASSERT(blkid != DMU_BONUS_BLKID); 2641fa9e4066Sahrens ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 2642fa9e4066Sahrens ASSERT3U(dn->dn_nlevels, >, level); 2643fa9e4066Sahrens 2644fa9e4066Sahrens *dbp = NULL; 2645ea8dc4b6Seschrock top: 2646fa9e4066Sahrens /* dbuf_find() returns with db_mtx held */ 2647e57a022bSJustin T. Gibbs db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid); 2648fa9e4066Sahrens 2649fa9e4066Sahrens if (db == NULL) { 2650fa9e4066Sahrens blkptr_t *bp = NULL; 2651fa9e4066Sahrens int err; 2652fa9e4066Sahrens 2653a2cdcdd2SPaul Dagnelie if (fail_uncached) 2654a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2655a2cdcdd2SPaul Dagnelie 2656c543ec06Sahrens ASSERT3P(parent, ==, NULL); 2657fa9e4066Sahrens err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 2658fa9e4066Sahrens if (fail_sparse) { 2659fa9e4066Sahrens if (err == 0 && bp && BP_IS_HOLE(bp)) 2660be6fd75aSMatthew Ahrens err = SET_ERROR(ENOENT); 2661fa9e4066Sahrens if (err) { 2662c543ec06Sahrens if (parent) 2663ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2664fa9e4066Sahrens return (err); 2665fa9e4066Sahrens } 2666fa9e4066Sahrens } 2667ea8dc4b6Seschrock if (err && err != ENOENT) 2668ea8dc4b6Seschrock return (err); 2669fa9e4066Sahrens db = dbuf_create(dn, level, blkid, parent, bp); 2670fa9e4066Sahrens } 2671fa9e4066Sahrens 2672a2cdcdd2SPaul Dagnelie if (fail_uncached && db->db_state != DB_CACHED) { 2673a2cdcdd2SPaul Dagnelie mutex_exit(&db->db_mtx); 2674a2cdcdd2SPaul Dagnelie return (SET_ERROR(ENOENT)); 2675a2cdcdd2SPaul Dagnelie } 2676a2cdcdd2SPaul Dagnelie 2677dcbf3bd6SGeorge Wilson if (db->db_buf != NULL) 2678ea8dc4b6Seschrock ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 2679ea8dc4b6Seschrock 2680ea8dc4b6Seschrock ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 2681ea8dc4b6Seschrock 2682fa9e4066Sahrens /* 2683c717a561Smaybee * If this buffer is currently syncing out, and we are are 2684c717a561Smaybee * still referencing it from db_data, we need to make a copy 2685c717a561Smaybee * of it in case we decide we want to dirty it again in this txg. 2686fa9e4066Sahrens */ 26870a586ceaSMark Shellenbaum if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 2688ea8dc4b6Seschrock dn->dn_object != DMU_META_DNODE_OBJECT && 2689c717a561Smaybee db->db_state == DB_CACHED && db->db_data_pending) { 2690c717a561Smaybee dbuf_dirty_record_t *dr = db->db_data_pending; 2691c717a561Smaybee 2692c717a561Smaybee if (dr->dt.dl.dr_data == db->db_buf) { 2693ad23a2dbSjohansen arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2694fa9e4066Sahrens 2695c717a561Smaybee dbuf_set_data(db, 26965602294fSDan Kimmel arc_alloc_buf(dn->dn_objset->os_spa, db, type, 26975602294fSDan Kimmel db->db.db_size)); 2698c717a561Smaybee bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 2699fa9e4066Sahrens db->db.db_size); 2700fa9e4066Sahrens } 2701c717a561Smaybee } 2702fa9e4066Sahrens 2703dcbf3bd6SGeorge Wilson if (multilist_link_active(&db->db_cache_link)) { 2704dcbf3bd6SGeorge Wilson ASSERT(refcount_is_zero(&db->db_holds)); 2705adb52d92SMatthew Ahrens ASSERT(db->db_caching_status == DB_DBUF_CACHE || 2706adb52d92SMatthew Ahrens db->db_caching_status == DB_DBUF_METADATA_CACHE); 2707adb52d92SMatthew Ahrens 2708adb52d92SMatthew Ahrens multilist_remove(dbuf_caches[db->db_caching_status].cache, db); 2709adb52d92SMatthew Ahrens (void) refcount_remove_many( 2710adb52d92SMatthew Ahrens &dbuf_caches[db->db_caching_status].size, 2711dcbf3bd6SGeorge Wilson db->db.db_size, db); 2712adb52d92SMatthew Ahrens 2713adb52d92SMatthew Ahrens db->db_caching_status = DB_NO_CACHE; 2714dcbf3bd6SGeorge Wilson } 2715ea8dc4b6Seschrock (void) refcount_add(&db->db_holds, tag); 27169c9dc39aSek110237 DBUF_VERIFY(db); 2717fa9e4066Sahrens mutex_exit(&db->db_mtx); 2718fa9e4066Sahrens 2719fa9e4066Sahrens /* NOTE: we can't rele the parent until after we drop the db_mtx */ 2720c543ec06Sahrens if (parent) 2721ea8dc4b6Seschrock dbuf_rele(parent, NULL); 2722fa9e4066Sahrens 2723744947dcSTom Erickson ASSERT3P(DB_DNODE(db), ==, dn); 2724fa9e4066Sahrens ASSERT3U(db->db_blkid, ==, blkid); 2725fa9e4066Sahrens ASSERT3U(db->db_level, ==, level); 2726fa9e4066Sahrens *dbp = db; 2727fa9e4066Sahrens 2728fa9e4066Sahrens return (0); 2729fa9e4066Sahrens } 2730fa9e4066Sahrens 2731fa9e4066Sahrens dmu_buf_impl_t * 2732ea8dc4b6Seschrock dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 2733fa9e4066Sahrens { 2734a2cdcdd2SPaul Dagnelie return (dbuf_hold_level(dn, 0, blkid, tag)); 2735fa9e4066Sahrens } 2736fa9e4066Sahrens 2737fa9e4066Sahrens dmu_buf_impl_t * 2738fa9e4066Sahrens dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 2739fa9e4066Sahrens { 2740fa9e4066Sahrens dmu_buf_impl_t *db; 2741a2cdcdd2SPaul Dagnelie int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db); 2742ea8dc4b6Seschrock return (err ? NULL : db); 2743fa9e4066Sahrens } 2744fa9e4066Sahrens 27451934e92fSmaybee void 2746ea8dc4b6Seschrock dbuf_create_bonus(dnode_t *dn) 2747fa9e4066Sahrens { 2748ea8dc4b6Seschrock ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 2749ea8dc4b6Seschrock 2750ea8dc4b6Seschrock ASSERT(dn->dn_bonus == NULL); 27510a586ceaSMark Shellenbaum dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 27520a586ceaSMark Shellenbaum } 27530a586ceaSMark Shellenbaum 27540a586ceaSMark Shellenbaum int 27550a586ceaSMark Shellenbaum dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 27560a586ceaSMark Shellenbaum { 27570a586ceaSMark Shellenbaum dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2758744947dcSTom Erickson dnode_t *dn; 2759744947dcSTom Erickson 27600a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 2761be6fd75aSMatthew Ahrens return (SET_ERROR(ENOTSUP)); 27620a586ceaSMark Shellenbaum if (blksz == 0) 27630a586ceaSMark Shellenbaum blksz = SPA_MINBLOCKSIZE; 2764b5152584SMatthew Ahrens ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset))); 27650a586ceaSMark Shellenbaum blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 27660a586ceaSMark Shellenbaum 2767744947dcSTom Erickson DB_DNODE_ENTER(db); 2768744947dcSTom Erickson dn = DB_DNODE(db); 2769744947dcSTom Erickson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 27700a586ceaSMark Shellenbaum dbuf_new_size(db, blksz, tx); 2771744947dcSTom Erickson rw_exit(&dn->dn_struct_rwlock); 2772744947dcSTom Erickson DB_DNODE_EXIT(db); 27730a586ceaSMark Shellenbaum 27740a586ceaSMark Shellenbaum return (0); 27750a586ceaSMark Shellenbaum } 27760a586ceaSMark Shellenbaum 27770a586ceaSMark Shellenbaum void 27780a586ceaSMark Shellenbaum dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 27790a586ceaSMark Shellenbaum { 27800a586ceaSMark Shellenbaum dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2781fa9e4066Sahrens } 2782fa9e4066Sahrens 2783ea8dc4b6Seschrock #pragma weak dmu_buf_add_ref = dbuf_add_ref 2784fa9e4066Sahrens void 2785fa9e4066Sahrens dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2786fa9e4066Sahrens { 2787ea8dc4b6Seschrock int64_t holds = refcount_add(&db->db_holds, tag); 2788dcbf3bd6SGeorge Wilson ASSERT3S(holds, >, 1); 2789fa9e4066Sahrens } 2790fa9e4066Sahrens 2791e57a022bSJustin T. Gibbs #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref 2792e57a022bSJustin T. Gibbs boolean_t 2793e57a022bSJustin T. Gibbs dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid, 2794e57a022bSJustin T. Gibbs void *tag) 2795e57a022bSJustin T. Gibbs { 2796e57a022bSJustin T. Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2797e57a022bSJustin T. Gibbs dmu_buf_impl_t *found_db; 2798e57a022bSJustin T. Gibbs boolean_t result = B_FALSE; 2799e57a022bSJustin T. Gibbs 2800e57a022bSJustin T. Gibbs if (db->db_blkid == DMU_BONUS_BLKID) 2801e57a022bSJustin T. Gibbs found_db = dbuf_find_bonus(os, obj); 2802e57a022bSJustin T. Gibbs else 2803e57a022bSJustin T. Gibbs found_db = dbuf_find(os, obj, 0, blkid); 2804e57a022bSJustin T. Gibbs 2805e57a022bSJustin T. Gibbs if (found_db != NULL) { 2806e57a022bSJustin T. Gibbs if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) { 2807e57a022bSJustin T. Gibbs (void) refcount_add(&db->db_holds, tag); 2808e57a022bSJustin T. Gibbs result = B_TRUE; 2809e57a022bSJustin T. Gibbs } 2810e57a022bSJustin T. Gibbs mutex_exit(&db->db_mtx); 2811e57a022bSJustin T. Gibbs } 2812e57a022bSJustin T. Gibbs return (result); 2813e57a022bSJustin T. Gibbs } 2814e57a022bSJustin T. Gibbs 2815744947dcSTom Erickson /* 2816744947dcSTom Erickson * If you call dbuf_rele() you had better not be referencing the dnode handle 2817744947dcSTom Erickson * unless you have some other direct or indirect hold on the dnode. (An indirect 2818744947dcSTom Erickson * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2819744947dcSTom Erickson * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2820744947dcSTom Erickson * dnode's parent dbuf evicting its dnode handles. 2821744947dcSTom Erickson */ 2822fa9e4066Sahrens void 2823ea8dc4b6Seschrock dbuf_rele(dmu_buf_impl_t *db, void *tag) 2824fa9e4066Sahrens { 2825b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 2826b24ab676SJeff Bonwick dbuf_rele_and_unlock(db, tag); 2827b24ab676SJeff Bonwick } 2828b24ab676SJeff Bonwick 282943466aaeSMax Grossman void 283043466aaeSMax Grossman dmu_buf_rele(dmu_buf_t *db, void *tag) 283143466aaeSMax Grossman { 283243466aaeSMax Grossman dbuf_rele((dmu_buf_impl_t *)db, tag); 283343466aaeSMax Grossman } 283443466aaeSMax Grossman 2835b24ab676SJeff Bonwick /* 2836b24ab676SJeff Bonwick * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2837b24ab676SJeff Bonwick * db_dirtycnt and db_holds to be updated atomically. 2838b24ab676SJeff Bonwick */ 2839b24ab676SJeff Bonwick void 2840b24ab676SJeff Bonwick dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2841b24ab676SJeff Bonwick { 2842fa9e4066Sahrens int64_t holds; 2843fa9e4066Sahrens 2844b24ab676SJeff Bonwick ASSERT(MUTEX_HELD(&db->db_mtx)); 28459c9dc39aSek110237 DBUF_VERIFY(db); 2846fa9e4066Sahrens 2847744947dcSTom Erickson /* 2848744947dcSTom Erickson * Remove the reference to the dbuf before removing its hold on the 2849744947dcSTom Erickson * dnode so we can guarantee in dnode_move() that a referenced bonus 2850744947dcSTom Erickson * buffer has a corresponding dnode hold. 2851744947dcSTom Erickson */ 2852fa9e4066Sahrens holds = refcount_remove(&db->db_holds, tag); 2853ea8dc4b6Seschrock ASSERT(holds >= 0); 2854fa9e4066Sahrens 2855c717a561Smaybee /* 2856c717a561Smaybee * We can't freeze indirects if there is a possibility that they 2857c717a561Smaybee * may be modified in the current syncing context. 2858c717a561Smaybee */ 2859dcbf3bd6SGeorge Wilson if (db->db_buf != NULL && 2860dcbf3bd6SGeorge Wilson holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) { 28616b4acc8bSahrens arc_buf_freeze(db->db_buf); 2862dcbf3bd6SGeorge Wilson } 28636b4acc8bSahrens 2864fa9e4066Sahrens if (holds == db->db_dirtycnt && 2865d2058105SJustin T. Gibbs db->db_level == 0 && db->db_user_immediate_evict) 2866fa9e4066Sahrens dbuf_evict_user(db); 2867ea8dc4b6Seschrock 2868ea8dc4b6Seschrock if (holds == 0) { 28690a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 2870cd485b49SJustin T. Gibbs dnode_t *dn; 2871d2058105SJustin T. Gibbs boolean_t evict_dbuf = db->db_pending_evict; 2872cd485b49SJustin T. Gibbs 2873cd485b49SJustin T. Gibbs /* 2874cd485b49SJustin T. Gibbs * If the dnode moves here, we cannot cross this 2875cd485b49SJustin T. Gibbs * barrier until the move completes. 2876cd485b49SJustin T. Gibbs */ 2877cd485b49SJustin T. Gibbs DB_DNODE_ENTER(db); 2878cd485b49SJustin T. Gibbs 2879cd485b49SJustin T. Gibbs dn = DB_DNODE(db); 2880cd485b49SJustin T. Gibbs atomic_dec_32(&dn->dn_dbufs_count); 2881cd485b49SJustin T. Gibbs 2882cd485b49SJustin T. Gibbs /* 2883cd485b49SJustin T. Gibbs * Decrementing the dbuf count means that the bonus 2884cd485b49SJustin T. Gibbs * buffer's dnode hold is no longer discounted in 2885cd485b49SJustin T. Gibbs * dnode_move(). The dnode cannot move until after 2886d2058105SJustin T. Gibbs * the dnode_rele() below. 2887cd485b49SJustin T. Gibbs */ 2888cd485b49SJustin T. Gibbs DB_DNODE_EXIT(db); 2889cd485b49SJustin T. Gibbs 2890cd485b49SJustin T. Gibbs /* 2891cd485b49SJustin T. Gibbs * Do not reference db after its lock is dropped. 2892cd485b49SJustin T. Gibbs * Another thread may evict it. 2893cd485b49SJustin T. Gibbs */ 2894ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 2895744947dcSTom Erickson 2896d2058105SJustin T. Gibbs if (evict_dbuf) 2897cd485b49SJustin T. Gibbs dnode_evict_bonus(dn); 2898d2058105SJustin T. Gibbs 2899d2058105SJustin T. Gibbs dnode_rele(dn, db); 2900ea8dc4b6Seschrock } else if (db->db_buf == NULL) { 2901ea8dc4b6Seschrock /* 2902ea8dc4b6Seschrock * This is a special case: we never associated this 2903ea8dc4b6Seschrock * dbuf with any data allocated from the ARC. 2904ea8dc4b6Seschrock */ 290582c9918fSTim Haley ASSERT(db->db_state == DB_UNCACHED || 290682c9918fSTim Haley db->db_state == DB_NOFILL); 2907dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2908ea8dc4b6Seschrock } else if (arc_released(db->db_buf)) { 2909ea8dc4b6Seschrock /* 2910ea8dc4b6Seschrock * This dbuf has anonymous data associated with it. 2911ea8dc4b6Seschrock */ 2912dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2913ea8dc4b6Seschrock } else { 2914dcbf3bd6SGeorge Wilson boolean_t do_arc_evict = B_FALSE; 2915dcbf3bd6SGeorge Wilson blkptr_t bp; 2916dcbf3bd6SGeorge Wilson spa_t *spa = dmu_objset_spa(db->db_objset); 29179253d63dSGeorge Wilson 2918dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) && 2919dcbf3bd6SGeorge Wilson db->db_blkptr != NULL && 2920bbfa8ea8SMatthew Ahrens !BP_IS_HOLE(db->db_blkptr) && 2921bbfa8ea8SMatthew Ahrens !BP_IS_EMBEDDED(db->db_blkptr)) { 2922dcbf3bd6SGeorge Wilson do_arc_evict = B_TRUE; 2923dcbf3bd6SGeorge Wilson bp = *db->db_blkptr; 2924dcbf3bd6SGeorge Wilson } 2925dcbf3bd6SGeorge Wilson 2926dcbf3bd6SGeorge Wilson if (!DBUF_IS_CACHEABLE(db) || 2927dcbf3bd6SGeorge Wilson db->db_pending_evict) { 2928dcbf3bd6SGeorge Wilson dbuf_destroy(db); 2929dcbf3bd6SGeorge Wilson } else if (!multilist_link_active(&db->db_cache_link)) { 2930adb52d92SMatthew Ahrens ASSERT3U(db->db_caching_status, ==, 2931adb52d92SMatthew Ahrens DB_NO_CACHE); 2932adb52d92SMatthew Ahrens 2933adb52d92SMatthew Ahrens dbuf_cached_state_t dcs = 2934adb52d92SMatthew Ahrens dbuf_include_in_metadata_cache(db) ? 2935adb52d92SMatthew Ahrens DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE; 2936adb52d92SMatthew Ahrens db->db_caching_status = dcs; 2937adb52d92SMatthew Ahrens 2938adb52d92SMatthew Ahrens multilist_insert(dbuf_caches[dcs].cache, db); 2939adb52d92SMatthew Ahrens (void) refcount_add_many(&dbuf_caches[dcs].size, 2940dcbf3bd6SGeorge Wilson db->db.db_size, db); 2941dcbf3bd6SGeorge Wilson mutex_exit(&db->db_mtx); 2942dcbf3bd6SGeorge Wilson 2943adb52d92SMatthew Ahrens if (db->db_caching_status == DB_DBUF_CACHE) { 2944dcbf3bd6SGeorge Wilson dbuf_evict_notify(); 2945dcbf3bd6SGeorge Wilson } 2946adb52d92SMatthew Ahrens } 2947dcbf3bd6SGeorge Wilson 2948dcbf3bd6SGeorge Wilson if (do_arc_evict) 2949bbfa8ea8SMatthew Ahrens arc_freed(spa, &bp); 2950bbfa8ea8SMatthew Ahrens } 2951ea8dc4b6Seschrock } else { 2952ea8dc4b6Seschrock mutex_exit(&db->db_mtx); 2953fa9e4066Sahrens } 2954dcbf3bd6SGeorge Wilson 2955fa9e4066Sahrens } 2956fa9e4066Sahrens 2957fa9e4066Sahrens #pragma weak dmu_buf_refcount = dbuf_refcount 2958fa9e4066Sahrens uint64_t 2959fa9e4066Sahrens dbuf_refcount(dmu_buf_impl_t *db) 2960fa9e4066Sahrens { 2961fa9e4066Sahrens return (refcount_count(&db->db_holds)); 2962fa9e4066Sahrens } 2963fa9e4066Sahrens 2964fa9e4066Sahrens void * 2965bc9014e6SJustin Gibbs dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user, 2966bc9014e6SJustin Gibbs dmu_buf_user_t *new_user) 2967fa9e4066Sahrens { 2968bc9014e6SJustin Gibbs dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2969bc9014e6SJustin Gibbs 2970bc9014e6SJustin Gibbs mutex_enter(&db->db_mtx); 2971bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2972bc9014e6SJustin Gibbs if (db->db_user == old_user) 2973bc9014e6SJustin Gibbs db->db_user = new_user; 2974bc9014e6SJustin Gibbs else 2975bc9014e6SJustin Gibbs old_user = db->db_user; 2976bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 2977bc9014e6SJustin Gibbs mutex_exit(&db->db_mtx); 2978bc9014e6SJustin Gibbs 2979bc9014e6SJustin Gibbs return (old_user); 2980fa9e4066Sahrens } 2981fa9e4066Sahrens 2982fa9e4066Sahrens void * 2983bc9014e6SJustin Gibbs dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2984bc9014e6SJustin Gibbs { 2985bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, NULL, user)); 2986bc9014e6SJustin Gibbs } 2987bc9014e6SJustin Gibbs 2988bc9014e6SJustin Gibbs void * 2989bc9014e6SJustin Gibbs dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2990fa9e4066Sahrens { 2991fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2992fa9e4066Sahrens 2993d2058105SJustin T. Gibbs db->db_user_immediate_evict = TRUE; 2994bc9014e6SJustin Gibbs return (dmu_buf_set_user(db_fake, user)); 2995fa9e4066Sahrens } 2996fa9e4066Sahrens 2997fa9e4066Sahrens void * 2998bc9014e6SJustin Gibbs dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user) 2999fa9e4066Sahrens { 3000bc9014e6SJustin Gibbs return (dmu_buf_replace_user(db_fake, user, NULL)); 3001fa9e4066Sahrens } 3002fa9e4066Sahrens 3003fa9e4066Sahrens void * 3004fa9e4066Sahrens dmu_buf_get_user(dmu_buf_t *db_fake) 3005fa9e4066Sahrens { 3006fa9e4066Sahrens dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 3007fa9e4066Sahrens 3008bc9014e6SJustin Gibbs dbuf_verify_user(db, DBVU_NOT_EVICTING); 3009bc9014e6SJustin Gibbs return (db->db_user); 3010bc9014e6SJustin Gibbs } 3011bc9014e6SJustin Gibbs 3012bc9014e6SJustin Gibbs void 3013bc9014e6SJustin Gibbs dmu_buf_user_evict_wait() 3014bc9014e6SJustin Gibbs { 3015bc9014e6SJustin Gibbs taskq_wait(dbu_evict_taskq); 3016fa9e4066Sahrens } 3017fa9e4066Sahrens 301880901aeaSGeorge Wilson blkptr_t * 301980901aeaSGeorge Wilson dmu_buf_get_blkptr(dmu_buf_t *db) 302080901aeaSGeorge Wilson { 302180901aeaSGeorge Wilson dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 302280901aeaSGeorge Wilson return (dbi->db_blkptr); 302380901aeaSGeorge Wilson } 302480901aeaSGeorge Wilson 3025ae972795SMatthew Ahrens objset_t * 3026ae972795SMatthew Ahrens dmu_buf_get_objset(dmu_buf_t *db) 3027ae972795SMatthew Ahrens { 3028ae972795SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 3029ae972795SMatthew Ahrens return (dbi->db_objset); 3030ae972795SMatthew Ahrens } 3031ae972795SMatthew Ahrens 303279d72832SMatthew Ahrens dnode_t * 303379d72832SMatthew Ahrens dmu_buf_dnode_enter(dmu_buf_t *db) 303479d72832SMatthew Ahrens { 303579d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 303679d72832SMatthew Ahrens DB_DNODE_ENTER(dbi); 303779d72832SMatthew Ahrens return (DB_DNODE(dbi)); 303879d72832SMatthew Ahrens } 303979d72832SMatthew Ahrens 304079d72832SMatthew Ahrens void 304179d72832SMatthew Ahrens dmu_buf_dnode_exit(dmu_buf_t *db) 304279d72832SMatthew Ahrens { 304379d72832SMatthew Ahrens dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 304479d72832SMatthew Ahrens DB_DNODE_EXIT(dbi); 304579d72832SMatthew Ahrens } 304679d72832SMatthew Ahrens 3047c717a561Smaybee static void 3048c717a561Smaybee dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 3049fa9e4066Sahrens { 3050c717a561Smaybee /* ASSERT(dmu_tx_is_syncing(tx) */ 3051c717a561Smaybee ASSERT(MUTEX_HELD(&db->db_mtx)); 3052c717a561Smaybee 3053c717a561Smaybee if (db->db_blkptr != NULL) 3054c717a561Smaybee return; 3055c717a561Smaybee 30560a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 30570a586ceaSMark Shellenbaum db->db_blkptr = &dn->dn_phys->dn_spill; 30580a586ceaSMark Shellenbaum BP_ZERO(db->db_blkptr); 30590a586ceaSMark Shellenbaum return; 30600a586ceaSMark Shellenbaum } 3061c717a561Smaybee if (db->db_level == dn->dn_phys->dn_nlevels-1) { 3062c717a561Smaybee /* 3063c717a561Smaybee * This buffer was allocated at a time when there was 3064c717a561Smaybee * no available blkptrs from the dnode, or it was 3065c717a561Smaybee * inappropriate to hook it in (i.e., nlevels mis-match). 3066c717a561Smaybee */ 3067c717a561Smaybee ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 3068c717a561Smaybee ASSERT(db->db_parent == NULL); 3069c717a561Smaybee db->db_parent = dn->dn_dbuf; 3070c717a561Smaybee db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 3071c717a561Smaybee DBUF_VERIFY(db); 3072c717a561Smaybee } else { 3073c717a561Smaybee dmu_buf_impl_t *parent = db->db_parent; 3074c717a561Smaybee int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 3075c717a561Smaybee 3076c717a561Smaybee ASSERT(dn->dn_phys->dn_nlevels > 1); 3077c717a561Smaybee if (parent == NULL) { 3078c717a561Smaybee mutex_exit(&db->db_mtx); 3079c717a561Smaybee rw_enter(&dn->dn_struct_rwlock, RW_READER); 3080a2cdcdd2SPaul Dagnelie parent = dbuf_hold_level(dn, db->db_level + 1, 3081a2cdcdd2SPaul Dagnelie db->db_blkid >> epbs, db); 3082c717a561Smaybee rw_exit(&dn->dn_struct_rwlock); 3083c717a561Smaybee mutex_enter(&db->db_mtx); 3084c717a561Smaybee db->db_parent = parent; 3085c717a561Smaybee } 3086c717a561Smaybee db->db_blkptr = (blkptr_t *)parent->db.db_data + 3087c717a561Smaybee (db->db_blkid & ((1ULL << epbs) - 1)); 3088c717a561Smaybee DBUF_VERIFY(db); 3089c717a561Smaybee } 3090c717a561Smaybee } 3091c717a561Smaybee 3092c717a561Smaybee static void 3093c717a561Smaybee dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3094c717a561Smaybee { 3095c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3096744947dcSTom Erickson dnode_t *dn; 3097c717a561Smaybee zio_t *zio; 3098c717a561Smaybee 3099c717a561Smaybee ASSERT(dmu_tx_is_syncing(tx)); 3100c717a561Smaybee 3101c717a561Smaybee dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3102c717a561Smaybee 3103c717a561Smaybee mutex_enter(&db->db_mtx); 3104c717a561Smaybee 3105c717a561Smaybee ASSERT(db->db_level > 0); 3106c717a561Smaybee DBUF_VERIFY(db); 3107c717a561Smaybee 31083e30c24aSWill Andrews /* Read the block if it hasn't been read yet. */ 3109c717a561Smaybee if (db->db_buf == NULL) { 3110c717a561Smaybee mutex_exit(&db->db_mtx); 3111c717a561Smaybee (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 3112c717a561Smaybee mutex_enter(&db->db_mtx); 3113c717a561Smaybee } 3114c717a561Smaybee ASSERT3U(db->db_state, ==, DB_CACHED); 3115c717a561Smaybee ASSERT(db->db_buf != NULL); 3116c717a561Smaybee 3117744947dcSTom Erickson DB_DNODE_ENTER(db); 3118744947dcSTom Erickson dn = DB_DNODE(db); 31193e30c24aSWill Andrews /* Indirect block size must match what the dnode thinks it is. */ 3120744947dcSTom Erickson ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3121c717a561Smaybee dbuf_check_blkptr(dn, db); 3122744947dcSTom Erickson DB_DNODE_EXIT(db); 3123c717a561Smaybee 31243e30c24aSWill Andrews /* Provide the pending dirty record to child dbufs */ 3125c717a561Smaybee db->db_data_pending = dr; 3126c717a561Smaybee 3127af2c4821Smaybee mutex_exit(&db->db_mtx); 31285cabbc6bSPrashanth Sreenivasa 3129088f3894Sahrens dbuf_write(dr, db->db_buf, tx); 3130c717a561Smaybee 3131c717a561Smaybee zio = dr->dr_zio; 3132c717a561Smaybee mutex_enter(&dr->dt.di.dr_mtx); 313346e1baa6SMatthew Ahrens dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx); 3134c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3135c717a561Smaybee mutex_exit(&dr->dt.di.dr_mtx); 3136c717a561Smaybee zio_nowait(zio); 3137c717a561Smaybee } 3138c717a561Smaybee 3139c717a561Smaybee static void 3140c717a561Smaybee dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 3141c717a561Smaybee { 3142c717a561Smaybee arc_buf_t **datap = &dr->dt.dl.dr_data; 3143c717a561Smaybee dmu_buf_impl_t *db = dr->dr_dbuf; 3144744947dcSTom Erickson dnode_t *dn; 3145744947dcSTom Erickson objset_t *os; 3146c717a561Smaybee uint64_t txg = tx->tx_txg; 3147fa9e4066Sahrens 3148fa9e4066Sahrens ASSERT(dmu_tx_is_syncing(tx)); 3149fa9e4066Sahrens 3150fa9e4066Sahrens dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 3151fa9e4066Sahrens 3152fa9e4066Sahrens mutex_enter(&db->db_mtx); 3153fa9e4066Sahrens /* 3154fa9e4066Sahrens * To be synced, we must be dirtied. But we 3155fa9e4066Sahrens * might have been freed after the dirty. 3156fa9e4066Sahrens */ 3157fa9e4066Sahrens if (db->db_state == DB_UNCACHED) { 3158fa9e4066Sahrens /* This buffer has been freed since it was dirtied */ 3159fa9e4066Sahrens ASSERT(db->db.db_data == NULL); 3160fa9e4066Sahrens } else if (db->db_state == DB_FILL) { 3161fa9e4066Sahrens /* This buffer was freed and is now being re-filled */ 3162c717a561Smaybee ASSERT(db->db.db_data != dr->dt.dl.dr_data); 3163fa9e4066Sahrens } else { 316482c9918fSTim Haley ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 3165fa9e4066Sahrens } 31669c9dc39aSek110237 DBUF_VERIFY(db); 3167fa9e4066Sahrens 3168744947dcSTom Erickson DB_DNODE_ENTER(db); 3169744947dcSTom Erickson dn = DB_DNODE(db); 3170744947dcSTom Erickson 31710a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 31720a586ceaSMark Shellenbaum mutex_enter(&dn->dn_mtx); 31730a586ceaSMark Shellenbaum dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 31740a586ceaSMark Shellenbaum mutex_exit(&dn->dn_mtx); 31750a586ceaSMark Shellenbaum } 31760a586ceaSMark Shellenbaum 3177fa9e4066Sahrens /* 3178c717a561Smaybee * If this is a bonus buffer, simply copy the bonus data into the 3179c717a561Smaybee * dnode. It will be written out when the dnode is synced (and it 3180c717a561Smaybee * will be synced, since it must have been dirty for dbuf_sync to 3181c717a561Smaybee * be called). 3182fa9e4066Sahrens */ 31830a586ceaSMark Shellenbaum if (db->db_blkid == DMU_BONUS_BLKID) { 3184c717a561Smaybee dbuf_dirty_record_t **drp; 31851934e92fSmaybee 3186ea8dc4b6Seschrock ASSERT(*datap != NULL); 3187fb09f5aaSMadhav Suresh ASSERT0(db->db_level); 3188ea8dc4b6Seschrock ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 3189ea8dc4b6Seschrock bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 3190744947dcSTom Erickson DB_DNODE_EXIT(db); 3191744947dcSTom Erickson 31920e8c6158Smaybee if (*datap != db->db.db_data) { 3193ea8dc4b6Seschrock zio_buf_free(*datap, DN_MAX_BONUSLEN); 31945a98e54bSBrendan Gregg - Sun Microsystems arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 31950e8c6158Smaybee } 3196ea8dc4b6Seschrock db->db_data_pending = NULL; 3197c717a561Smaybee drp = &db->db_last_dirty; 3198c717a561Smaybee while (*drp != dr) 3199c717a561Smaybee drp = &(*drp)->dr_next; 320017f17c2dSbonwick ASSERT(dr->dr_next == NULL); 3201b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 320217f17c2dSbonwick *drp = dr->dr_next; 3203c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3204ea8dc4b6Seschrock ASSERT(db->db_dirtycnt > 0); 3205ea8dc4b6Seschrock db->db_dirtycnt -= 1; 3206b24ab676SJeff Bonwick dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 3207ea8dc4b6Seschrock return; 3208ea8dc4b6Seschrock } 3209ea8dc4b6Seschrock 3210744947dcSTom Erickson os = dn->dn_objset; 3211744947dcSTom Erickson 3212c5c6ffa0Smaybee /* 3213f82bfe17Sgw25295 * This function may have dropped the db_mtx lock allowing a dmu_sync 3214f82bfe17Sgw25295 * operation to sneak in. As a result, we need to ensure that we 3215f82bfe17Sgw25295 * don't check the dr_override_state until we have returned from 3216f82bfe17Sgw25295 * dbuf_check_blkptr. 3217f82bfe17Sgw25295 */ 3218f82bfe17Sgw25295 dbuf_check_blkptr(dn, db); 3219f82bfe17Sgw25295 3220f82bfe17Sgw25295 /* 3221744947dcSTom Erickson * If this buffer is in the middle of an immediate write, 3222c717a561Smaybee * wait for the synchronous IO to complete. 3223c5c6ffa0Smaybee */ 3224c717a561Smaybee while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 3225c5c6ffa0Smaybee ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 3226c5c6ffa0Smaybee cv_wait(&db->db_changed, &db->db_mtx); 3227c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 3228c5c6ffa0Smaybee } 3229c717a561Smaybee 3230ab69d62fSMatthew Ahrens if (db->db_state != DB_NOFILL && 3231ab69d62fSMatthew Ahrens dn->dn_object != DMU_META_DNODE_OBJECT && 3232ab69d62fSMatthew Ahrens refcount_count(&db->db_holds) > 1 && 3233b24ab676SJeff Bonwick dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 3234ab69d62fSMatthew Ahrens *datap == db->db_buf) { 3235fa9e4066Sahrens /* 323682c9918fSTim Haley * If this buffer is currently "in use" (i.e., there 323782c9918fSTim Haley * are active holds and db_data still references it), 323882c9918fSTim Haley * then make a copy before we start the write so that 323982c9918fSTim Haley * any modifications from the open txg will not leak 324082c9918fSTim Haley * into this write. 3241fa9e4066Sahrens * 324282c9918fSTim Haley * NOTE: this copy does not need to be made for 324382c9918fSTim Haley * objects only modified in the syncing context (e.g. 324482c9918fSTim Haley * DNONE_DNODE blocks). 3245fa9e4066Sahrens */ 32465602294fSDan Kimmel int psize = arc_buf_size(*datap); 3247ab69d62fSMatthew Ahrens arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 32485602294fSDan Kimmel enum zio_compress compress_type = arc_get_compression(*datap); 32495602294fSDan Kimmel 32505602294fSDan Kimmel if (compress_type == ZIO_COMPRESS_OFF) { 32515602294fSDan Kimmel *datap = arc_alloc_buf(os->os_spa, db, type, psize); 32525602294fSDan Kimmel } else { 32535602294fSDan Kimmel ASSERT3U(type, ==, ARC_BUFC_DATA); 32545602294fSDan Kimmel int lsize = arc_buf_lsize(*datap); 32555602294fSDan Kimmel *datap = arc_alloc_compressed_buf(os->os_spa, db, 32565602294fSDan Kimmel psize, lsize, compress_type); 32575602294fSDan Kimmel } 32585602294fSDan Kimmel bcopy(db->db.db_data, (*datap)->b_data, psize); 3259fa9e4066Sahrens } 3260c717a561Smaybee db->db_data_pending = dr; 3261fa9e4066Sahrens 3262fa9e4066Sahrens mutex_exit(&db->db_mtx); 3263fa9e4066Sahrens 3264088f3894Sahrens dbuf_write(dr, *datap, tx); 3265c717a561Smaybee 3266c717a561Smaybee ASSERT(!list_link_active(&dr->dr_dirty_node)); 3267744947dcSTom Erickson if (dn->dn_object == DMU_META_DNODE_OBJECT) { 3268c717a561Smaybee list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 3269744947dcSTom Erickson DB_DNODE_EXIT(db); 3270744947dcSTom Erickson } else { 3271744947dcSTom Erickson /* 3272744947dcSTom Erickson * Although zio_nowait() does not "wait for an IO", it does 3273744947dcSTom Erickson * initiate the IO. If this is an empty write it seems plausible 3274744947dcSTom Erickson * that the IO could actually be completed before the nowait 3275744947dcSTom Erickson * returns. We need to DB_DNODE_EXIT() first in case 3276744947dcSTom Erickson * zio_nowait() invalidates the dbuf. 3277744947dcSTom Erickson */ 3278744947dcSTom Erickson DB_DNODE_EXIT(db); 3279c717a561Smaybee zio_nowait(dr->dr_zio); 3280fa9e4066Sahrens } 3281744947dcSTom Erickson } 3282c717a561Smaybee 3283c717a561Smaybee void 328446e1baa6SMatthew Ahrens dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx) 3285c717a561Smaybee { 3286c717a561Smaybee dbuf_dirty_record_t *dr; 3287c717a561Smaybee 3288c717a561Smaybee while (dr = list_head(list)) { 3289c717a561Smaybee if (dr->dr_zio != NULL) { 3290c717a561Smaybee /* 3291c717a561Smaybee * If we find an already initialized zio then we 3292c717a561Smaybee * are processing the meta-dnode, and we have finished. 3293c717a561Smaybee * The dbufs for all dnodes are put back on the list 3294c717a561Smaybee * during processing, so that we can zio_wait() 3295c717a561Smaybee * these IOs after initiating all child IOs. 3296c717a561Smaybee */ 3297c717a561Smaybee ASSERT3U(dr->dr_dbuf->db.db_object, ==, 3298c717a561Smaybee DMU_META_DNODE_OBJECT); 3299c717a561Smaybee break; 3300fa9e4066Sahrens } 330146e1baa6SMatthew Ahrens if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID && 330246e1baa6SMatthew Ahrens dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) { 330346e1baa6SMatthew Ahrens VERIFY3U(dr->dr_dbuf->db_level, ==, level); 330446e1baa6SMatthew Ahrens } 3305c717a561Smaybee list_remove(list, dr); 3306c717a561Smaybee if (dr->dr_dbuf->db_level > 0) 3307c717a561Smaybee dbuf_sync_indirect(dr, tx); 3308c717a561Smaybee else 3309c717a561Smaybee dbuf_sync_leaf(dr, tx); 3310c717a561Smaybee } 3311c717a561Smaybee } 3312c717a561Smaybee 3313fa9e4066Sahrens /* ARGSUSED */ 3314fa9e4066Sahrens static void 3315c717a561Smaybee dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 3316fa9e4066Sahrens { 3317fa9e4066Sahrens dmu_buf_impl_t *db = vdb; 3318744947dcSTom Erickson dnode_t *dn; 3319e14bb325SJeff Bonwick blkptr_t *bp = zio->io_bp; 3320c717a561Smaybee blkptr_t *bp_orig = &zio->io_bp_orig; 3321b24ab676SJeff Bonwick spa_t *spa = zio->io_spa; 3322b24ab676SJeff Bonwick int64_t delta; 3323fa9e4066Sahrens uint64_t fill = 0; 3324b24ab676SJeff Bonwick int i; 3325fa9e4066Sahrens 332611ceac77SAlex Reece ASSERT3P(db->db_blkptr, !=, NULL); 332711ceac77SAlex Reece ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp); 3328e14bb325SJeff Bonwick 3329744947dcSTom Erickson DB_DNODE_ENTER(db); 3330744947dcSTom Erickson dn = DB_DNODE(db); 3331b24ab676SJeff Bonwick delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 3332b24ab676SJeff Bonwick dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 3333b24ab676SJeff Bonwick zio->io_prev_space_delta = delta; 3334fa9e4066Sahrens 333543466aaeSMax Grossman if (bp->blk_birth != 0) { 33360a586ceaSMark Shellenbaum ASSERT((db->db_blkid != DMU_SPILL_BLKID && 33370a586ceaSMark Shellenbaum BP_GET_TYPE(bp) == dn->dn_type) || 33380a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && 33395d7b4d43SMatthew Ahrens BP_GET_TYPE(bp) == dn->dn_bonustype) || 33405d7b4d43SMatthew Ahrens BP_IS_EMBEDDED(bp)); 3341e14bb325SJeff Bonwick ASSERT(BP_GET_LEVEL(bp) == db->db_level); 334243466aaeSMax Grossman } 3343e14bb325SJeff Bonwick 3344fa9e4066Sahrens mutex_enter(&db->db_mtx); 3345fa9e4066Sahrens 33460a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 33470a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 33480a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 334911ceac77SAlex Reece ASSERT(!(BP_IS_HOLE(bp)) && 33500a586ceaSMark Shellenbaum db->db_blkptr == &dn->dn_phys->dn_spill); 33510a586ceaSMark Shellenbaum } 33520a586ceaSMark Shellenbaum #endif 33530a586ceaSMark Shellenbaum 3354fa9e4066Sahrens if (db->db_level == 0) { 3355fa9e4066Sahrens mutex_enter(&dn->dn_mtx); 33560a586ceaSMark Shellenbaum if (db->db_blkid > dn->dn_phys->dn_maxblkid && 33570a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) 3358fa9e4066Sahrens dn->dn_phys->dn_maxblkid = db->db_blkid; 3359fa9e4066Sahrens mutex_exit(&dn->dn_mtx); 3360fa9e4066Sahrens 3361fa9e4066Sahrens if (dn->dn_type == DMU_OT_DNODE) { 3362fa9e4066Sahrens dnode_phys_t *dnp = db->db.db_data; 3363fa9e4066Sahrens for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 3364fa9e4066Sahrens i--, dnp++) { 3365fa9e4066Sahrens if (dnp->dn_type != DMU_OT_NONE) 3366fa9e4066Sahrens fill++; 3367fa9e4066Sahrens } 3368fa9e4066Sahrens } else { 336943466aaeSMax Grossman if (BP_IS_HOLE(bp)) { 337043466aaeSMax Grossman fill = 0; 337143466aaeSMax Grossman } else { 3372fa9e4066Sahrens fill = 1; 3373fa9e4066Sahrens } 337443466aaeSMax Grossman } 3375fa9e4066Sahrens } else { 3376e14bb325SJeff Bonwick blkptr_t *ibp = db->db.db_data; 3377fa9e4066Sahrens ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 3378e14bb325SJeff Bonwick for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 3379e14bb325SJeff Bonwick if (BP_IS_HOLE(ibp)) 3380fa9e4066Sahrens continue; 33815d7b4d43SMatthew Ahrens fill += BP_GET_FILL(ibp); 3382fa9e4066Sahrens } 3383fa9e4066Sahrens } 3384744947dcSTom Erickson DB_DNODE_EXIT(db); 3385fa9e4066Sahrens 33865d7b4d43SMatthew Ahrens if (!BP_IS_EMBEDDED(bp)) 3387e14bb325SJeff Bonwick bp->blk_fill = fill; 3388fa9e4066Sahrens 3389fa9e4066Sahrens mutex_exit(&db->db_mtx); 339011ceac77SAlex Reece 339111ceac77SAlex Reece rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 339211ceac77SAlex Reece *db->db_blkptr = *bp; 339311ceac77SAlex Reece rw_exit(&dn->dn_struct_rwlock); 3394fa9e4066Sahrens } 3395fa9e4066Sahrens 33968df0bcf0SPaul Dagnelie /* ARGSUSED */ 33978df0bcf0SPaul Dagnelie /* 33988df0bcf0SPaul Dagnelie * This function gets called just prior to running through the compression 33998df0bcf0SPaul Dagnelie * stage of the zio pipeline. If we're an indirect block comprised of only 34008df0bcf0SPaul Dagnelie * holes, then we want this indirect to be compressed away to a hole. In 34018df0bcf0SPaul Dagnelie * order to do that we must zero out any information about the holes that 34028df0bcf0SPaul Dagnelie * this indirect points to prior to before we try to compress it. 34038df0bcf0SPaul Dagnelie */ 34048df0bcf0SPaul Dagnelie static void 34058df0bcf0SPaul Dagnelie dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 34068df0bcf0SPaul Dagnelie { 34078df0bcf0SPaul Dagnelie dmu_buf_impl_t *db = vdb; 34088df0bcf0SPaul Dagnelie dnode_t *dn; 34098df0bcf0SPaul Dagnelie blkptr_t *bp; 34101a01181fSGeorge Wilson unsigned int epbs, i; 34118df0bcf0SPaul Dagnelie 34128df0bcf0SPaul Dagnelie ASSERT3U(db->db_level, >, 0); 34138df0bcf0SPaul Dagnelie DB_DNODE_ENTER(db); 34148df0bcf0SPaul Dagnelie dn = DB_DNODE(db); 34158df0bcf0SPaul Dagnelie epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 34161a01181fSGeorge Wilson ASSERT3U(epbs, <, 31); 34178df0bcf0SPaul Dagnelie 34188df0bcf0SPaul Dagnelie /* Determine if all our children are holes */ 34198df0bcf0SPaul Dagnelie for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) { 34208df0bcf0SPaul Dagnelie if (!BP_IS_HOLE(bp)) 34218df0bcf0SPaul Dagnelie break; 34228df0bcf0SPaul Dagnelie } 34238df0bcf0SPaul Dagnelie 34248df0bcf0SPaul Dagnelie /* 34258df0bcf0SPaul Dagnelie * If all the children are holes, then zero them all out so that 34268df0bcf0SPaul Dagnelie * we may get compressed away. 34278df0bcf0SPaul Dagnelie */ 34288df0bcf0SPaul Dagnelie if (i == 1 << epbs) { 34291a01181fSGeorge Wilson /* 34301a01181fSGeorge Wilson * We only found holes. Grab the rwlock to prevent 34311a01181fSGeorge Wilson * anybody from reading the blocks we're about to 34321a01181fSGeorge Wilson * zero out. 34331a01181fSGeorge Wilson */ 34341a01181fSGeorge Wilson rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 34358df0bcf0SPaul Dagnelie bzero(db->db.db_data, db->db.db_size); 34361a01181fSGeorge Wilson rw_exit(&dn->dn_struct_rwlock); 34378df0bcf0SPaul Dagnelie } 34388df0bcf0SPaul Dagnelie DB_DNODE_EXIT(db); 34398df0bcf0SPaul Dagnelie } 34408df0bcf0SPaul Dagnelie 344169962b56SMatthew Ahrens /* 344269962b56SMatthew Ahrens * The SPA will call this callback several times for each zio - once 344369962b56SMatthew Ahrens * for every physical child i/o (zio->io_phys_children times). This 344469962b56SMatthew Ahrens * allows the DMU to monitor the progress of each logical i/o. For example, 344569962b56SMatthew Ahrens * there may be 2 copies of an indirect block, or many fragments of a RAID-Z 344669962b56SMatthew Ahrens * block. There may be a long delay before all copies/fragments are completed, 344769962b56SMatthew Ahrens * so this callback allows us to retire dirty space gradually, as the physical 344869962b56SMatthew Ahrens * i/os complete. 344969962b56SMatthew Ahrens */ 345069962b56SMatthew Ahrens /* ARGSUSED */ 345169962b56SMatthew Ahrens static void 345269962b56SMatthew Ahrens dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg) 345369962b56SMatthew Ahrens { 345469962b56SMatthew Ahrens dmu_buf_impl_t *db = arg; 345569962b56SMatthew Ahrens objset_t *os = db->db_objset; 345669962b56SMatthew Ahrens dsl_pool_t *dp = dmu_objset_pool(os); 345769962b56SMatthew Ahrens dbuf_dirty_record_t *dr; 345869962b56SMatthew Ahrens int delta = 0; 345969962b56SMatthew Ahrens 346069962b56SMatthew Ahrens dr = db->db_data_pending; 346169962b56SMatthew Ahrens ASSERT3U(dr->dr_txg, ==, zio->io_txg); 346269962b56SMatthew Ahrens 346369962b56SMatthew Ahrens /* 346469962b56SMatthew Ahrens * The callback will be called io_phys_children times. Retire one 346569962b56SMatthew Ahrens * portion of our dirty space each time we are called. Any rounding 346669962b56SMatthew Ahrens * error will be cleaned up by dsl_pool_sync()'s call to 346769962b56SMatthew Ahrens * dsl_pool_undirty_space(). 346869962b56SMatthew Ahrens */ 346969962b56SMatthew Ahrens delta = dr->dr_accounted / zio->io_phys_children; 347069962b56SMatthew Ahrens dsl_pool_undirty_space(dp, delta, zio->io_txg); 347169962b56SMatthew Ahrens } 347269962b56SMatthew Ahrens 3473c717a561Smaybee /* ARGSUSED */ 3474c717a561Smaybee static void 3475c717a561Smaybee dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 3476c717a561Smaybee { 3477c717a561Smaybee dmu_buf_impl_t *db = vdb; 3478b24ab676SJeff Bonwick blkptr_t *bp_orig = &zio->io_bp_orig; 347943466aaeSMax Grossman blkptr_t *bp = db->db_blkptr; 348043466aaeSMax Grossman objset_t *os = db->db_objset; 348143466aaeSMax Grossman dmu_tx_t *tx = os->os_synctx; 3482c717a561Smaybee dbuf_dirty_record_t **drp, *dr; 3483c717a561Smaybee 3484fb09f5aaSMadhav Suresh ASSERT0(zio->io_error); 3485b24ab676SJeff Bonwick ASSERT(db->db_blkptr == bp); 3486b24ab676SJeff Bonwick 348780901aeaSGeorge Wilson /* 348880901aeaSGeorge Wilson * For nopwrites and rewrites we ensure that the bp matches our 348980901aeaSGeorge Wilson * original and bypass all the accounting. 349080901aeaSGeorge Wilson */ 349180901aeaSGeorge Wilson if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 3492b24ab676SJeff Bonwick ASSERT(BP_EQUAL(bp, bp_orig)); 3493b24ab676SJeff Bonwick } else { 349443466aaeSMax Grossman dsl_dataset_t *ds = os->os_dsl_dataset; 3495b24ab676SJeff Bonwick (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 3496b24ab676SJeff Bonwick dsl_dataset_block_born(ds, bp, tx); 3497b24ab676SJeff Bonwick } 3498c717a561Smaybee 3499c717a561Smaybee mutex_enter(&db->db_mtx); 3500c717a561Smaybee 3501b24ab676SJeff Bonwick DBUF_VERIFY(db); 3502b24ab676SJeff Bonwick 3503c717a561Smaybee drp = &db->db_last_dirty; 350417f17c2dSbonwick while ((dr = *drp) != db->db_data_pending) 350517f17c2dSbonwick drp = &dr->dr_next; 350617f17c2dSbonwick ASSERT(!list_link_active(&dr->dr_dirty_node)); 3507b24ab676SJeff Bonwick ASSERT(dr->dr_dbuf == db); 350817f17c2dSbonwick ASSERT(dr->dr_next == NULL); 350917f17c2dSbonwick *drp = dr->dr_next; 3510c717a561Smaybee 35110a586ceaSMark Shellenbaum #ifdef ZFS_DEBUG 35120a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) { 3513744947dcSTom Erickson dnode_t *dn; 3514744947dcSTom Erickson 3515744947dcSTom Erickson DB_DNODE_ENTER(db); 3516744947dcSTom Erickson dn = DB_DNODE(db); 35170a586ceaSMark Shellenbaum ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 35180a586ceaSMark Shellenbaum ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 35190a586ceaSMark Shellenbaum db->db_blkptr == &dn->dn_phys->dn_spill); 3520744947dcSTom Erickson DB_DNODE_EXIT(db); 35210a586ceaSMark Shellenbaum } 35220a586ceaSMark Shellenbaum #endif 35230a586ceaSMark Shellenbaum 3524c717a561Smaybee if (db->db_level == 0) { 35250a586ceaSMark Shellenbaum ASSERT(db->db_blkid != DMU_BONUS_BLKID); 3526c717a561Smaybee ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 352782c9918fSTim Haley if (db->db_state != DB_NOFILL) { 3528c717a561Smaybee if (dr->dt.dl.dr_data != db->db_buf) 3529dcbf3bd6SGeorge Wilson arc_buf_destroy(dr->dt.dl.dr_data, db); 353082c9918fSTim Haley } 3531c717a561Smaybee } else { 3532744947dcSTom Erickson dnode_t *dn; 3533744947dcSTom Erickson 3534744947dcSTom Erickson DB_DNODE_ENTER(db); 3535744947dcSTom Erickson dn = DB_DNODE(db); 3536c717a561Smaybee ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 3537c717a561Smaybee ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift); 3538c717a561Smaybee if (!BP_IS_HOLE(db->db_blkptr)) { 3539c717a561Smaybee int epbs = 3540c717a561Smaybee dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 354143466aaeSMax Grossman ASSERT3U(db->db_blkid, <=, 354243466aaeSMax Grossman dn->dn_phys->dn_maxblkid >> (db->db_level * epbs)); 3543c717a561Smaybee ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 3544c717a561Smaybee db->db.db_size); 3545c717a561Smaybee } 3546744947dcSTom Erickson DB_DNODE_EXIT(db); 3547c25056deSgw25295 mutex_destroy(&dr->dt.di.dr_mtx); 3548c25056deSgw25295 list_destroy(&dr->dt.di.dr_children); 3549c717a561Smaybee } 3550c717a561Smaybee kmem_free(dr, sizeof (dbuf_dirty_record_t)); 3551c717a561Smaybee 3552c717a561Smaybee cv_broadcast(&db->db_changed); 3553c717a561Smaybee ASSERT(db->db_dirtycnt > 0); 3554c717a561Smaybee db->db_dirtycnt -= 1; 3555c717a561Smaybee db->db_data_pending = NULL; 355643466aaeSMax Grossman dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg); 3557b24ab676SJeff Bonwick } 3558b24ab676SJeff Bonwick 3559b24ab676SJeff Bonwick static void 3560b24ab676SJeff Bonwick dbuf_write_nofill_ready(zio_t *zio) 3561b24ab676SJeff Bonwick { 3562b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, zio->io_private); 3563b24ab676SJeff Bonwick } 3564b24ab676SJeff Bonwick 3565b24ab676SJeff Bonwick static void 3566b24ab676SJeff Bonwick dbuf_write_nofill_done(zio_t *zio) 3567b24ab676SJeff Bonwick { 3568b24ab676SJeff Bonwick dbuf_write_done(zio, NULL, zio->io_private); 3569b24ab676SJeff Bonwick } 3570b24ab676SJeff Bonwick 3571b24ab676SJeff Bonwick static void 3572b24ab676SJeff Bonwick dbuf_write_override_ready(zio_t *zio) 3573b24ab676SJeff Bonwick { 3574b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3575b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3576b24ab676SJeff Bonwick 3577b24ab676SJeff Bonwick dbuf_write_ready(zio, NULL, db); 3578b24ab676SJeff Bonwick } 3579b24ab676SJeff Bonwick 3580b24ab676SJeff Bonwick static void 3581b24ab676SJeff Bonwick dbuf_write_override_done(zio_t *zio) 3582b24ab676SJeff Bonwick { 3583b24ab676SJeff Bonwick dbuf_dirty_record_t *dr = zio->io_private; 3584b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3585b24ab676SJeff Bonwick blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 3586b24ab676SJeff Bonwick 3587b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3588b24ab676SJeff Bonwick if (!BP_EQUAL(zio->io_bp, obp)) { 3589b24ab676SJeff Bonwick if (!BP_IS_HOLE(obp)) 3590b24ab676SJeff Bonwick dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 3591b24ab676SJeff Bonwick arc_release(dr->dt.dl.dr_data, db); 3592b24ab676SJeff Bonwick } 3593c717a561Smaybee mutex_exit(&db->db_mtx); 35944ee0199eSRobert Mustacchi dbuf_write_done(zio, NULL, db); 3595770499e1SDan Kimmel 3596770499e1SDan Kimmel if (zio->io_abd != NULL) 3597770499e1SDan Kimmel abd_put(zio->io_abd); 3598b24ab676SJeff Bonwick } 3599c717a561Smaybee 36005cabbc6bSPrashanth Sreenivasa typedef struct dbuf_remap_impl_callback_arg { 36015cabbc6bSPrashanth Sreenivasa objset_t *drica_os; 36025cabbc6bSPrashanth Sreenivasa uint64_t drica_blk_birth; 36035cabbc6bSPrashanth Sreenivasa dmu_tx_t *drica_tx; 36045cabbc6bSPrashanth Sreenivasa } dbuf_remap_impl_callback_arg_t; 36055cabbc6bSPrashanth Sreenivasa 36065cabbc6bSPrashanth Sreenivasa static void 36075cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size, 36085cabbc6bSPrashanth Sreenivasa void *arg) 36095cabbc6bSPrashanth Sreenivasa { 36105cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t *drica = arg; 36115cabbc6bSPrashanth Sreenivasa objset_t *os = drica->drica_os; 36125cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(os); 36135cabbc6bSPrashanth Sreenivasa dmu_tx_t *tx = drica->drica_tx; 36145cabbc6bSPrashanth Sreenivasa 36155cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 36165cabbc6bSPrashanth Sreenivasa 36175cabbc6bSPrashanth Sreenivasa if (os == spa_meta_objset(spa)) { 36185cabbc6bSPrashanth Sreenivasa spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx); 36195cabbc6bSPrashanth Sreenivasa } else { 36205cabbc6bSPrashanth Sreenivasa dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset, 36215cabbc6bSPrashanth Sreenivasa size, drica->drica_blk_birth, tx); 36225cabbc6bSPrashanth Sreenivasa } 36235cabbc6bSPrashanth Sreenivasa } 36245cabbc6bSPrashanth Sreenivasa 36255cabbc6bSPrashanth Sreenivasa static void 36265cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx) 36275cabbc6bSPrashanth Sreenivasa { 36285cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = *bp; 36295cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 36305cabbc6bSPrashanth Sreenivasa dbuf_remap_impl_callback_arg_t drica; 36315cabbc6bSPrashanth Sreenivasa 36325cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 36335cabbc6bSPrashanth Sreenivasa 36345cabbc6bSPrashanth Sreenivasa drica.drica_os = dn->dn_objset; 36355cabbc6bSPrashanth Sreenivasa drica.drica_blk_birth = bp->blk_birth; 36365cabbc6bSPrashanth Sreenivasa drica.drica_tx = tx; 36375cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback, 36385cabbc6bSPrashanth Sreenivasa &drica)) { 36395cabbc6bSPrashanth Sreenivasa /* 36405cabbc6bSPrashanth Sreenivasa * The struct_rwlock prevents dbuf_read_impl() from 36415cabbc6bSPrashanth Sreenivasa * dereferencing the BP while we are changing it. To 36425cabbc6bSPrashanth Sreenivasa * avoid lock contention, only grab it when we are actually 36435cabbc6bSPrashanth Sreenivasa * changing the BP. 36445cabbc6bSPrashanth Sreenivasa */ 36455cabbc6bSPrashanth Sreenivasa rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 36465cabbc6bSPrashanth Sreenivasa *bp = bp_copy; 36475cabbc6bSPrashanth Sreenivasa rw_exit(&dn->dn_struct_rwlock); 36485cabbc6bSPrashanth Sreenivasa } 36495cabbc6bSPrashanth Sreenivasa } 36505cabbc6bSPrashanth Sreenivasa 36515cabbc6bSPrashanth Sreenivasa /* 36525cabbc6bSPrashanth Sreenivasa * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting 36535cabbc6bSPrashanth Sreenivasa * to remap a copy of every bp in the dbuf. 36545cabbc6bSPrashanth Sreenivasa */ 36555cabbc6bSPrashanth Sreenivasa boolean_t 36565cabbc6bSPrashanth Sreenivasa dbuf_can_remap(const dmu_buf_impl_t *db) 36575cabbc6bSPrashanth Sreenivasa { 36585cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 36595cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 36605cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 36615cabbc6bSPrashanth Sreenivasa 36625cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_level, >, 0); 36635cabbc6bSPrashanth Sreenivasa ASSERT3S(db->db_state, ==, DB_CACHED); 36645cabbc6bSPrashanth Sreenivasa 36655cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 36665cabbc6bSPrashanth Sreenivasa 36675cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 36685cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 36695cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = bp[i]; 36705cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 36715cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 36725cabbc6bSPrashanth Sreenivasa break; 36735cabbc6bSPrashanth Sreenivasa } 36745cabbc6bSPrashanth Sreenivasa } 36755cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 36765cabbc6bSPrashanth Sreenivasa 36775cabbc6bSPrashanth Sreenivasa return (ret); 36785cabbc6bSPrashanth Sreenivasa } 36795cabbc6bSPrashanth Sreenivasa 36805cabbc6bSPrashanth Sreenivasa boolean_t 36815cabbc6bSPrashanth Sreenivasa dnode_needs_remap(const dnode_t *dn) 36825cabbc6bSPrashanth Sreenivasa { 36835cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(dn->dn_objset); 36845cabbc6bSPrashanth Sreenivasa boolean_t ret = B_FALSE; 36855cabbc6bSPrashanth Sreenivasa 36865cabbc6bSPrashanth Sreenivasa if (dn->dn_phys->dn_nlevels == 0) { 36875cabbc6bSPrashanth Sreenivasa return (B_FALSE); 36885cabbc6bSPrashanth Sreenivasa } 36895cabbc6bSPrashanth Sreenivasa 36905cabbc6bSPrashanth Sreenivasa ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 36915cabbc6bSPrashanth Sreenivasa 36925cabbc6bSPrashanth Sreenivasa spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 36935cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) { 36945cabbc6bSPrashanth Sreenivasa blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j]; 36955cabbc6bSPrashanth Sreenivasa if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) { 36965cabbc6bSPrashanth Sreenivasa ret = B_TRUE; 36975cabbc6bSPrashanth Sreenivasa break; 36985cabbc6bSPrashanth Sreenivasa } 36995cabbc6bSPrashanth Sreenivasa } 37005cabbc6bSPrashanth Sreenivasa spa_config_exit(spa, SCL_VDEV, FTAG); 37015cabbc6bSPrashanth Sreenivasa 37025cabbc6bSPrashanth Sreenivasa return (ret); 37035cabbc6bSPrashanth Sreenivasa } 37045cabbc6bSPrashanth Sreenivasa 37055cabbc6bSPrashanth Sreenivasa /* 37065cabbc6bSPrashanth Sreenivasa * Remap any existing BP's to concrete vdevs, if possible. 37075cabbc6bSPrashanth Sreenivasa */ 37085cabbc6bSPrashanth Sreenivasa static void 37095cabbc6bSPrashanth Sreenivasa dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx) 37105cabbc6bSPrashanth Sreenivasa { 37115cabbc6bSPrashanth Sreenivasa spa_t *spa = dmu_objset_spa(db->db_objset); 37125cabbc6bSPrashanth Sreenivasa ASSERT(dsl_pool_sync_context(spa_get_dsl(spa))); 37135cabbc6bSPrashanth Sreenivasa 37145cabbc6bSPrashanth Sreenivasa if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)) 37155cabbc6bSPrashanth Sreenivasa return; 37165cabbc6bSPrashanth Sreenivasa 37175cabbc6bSPrashanth Sreenivasa if (db->db_level > 0) { 37185cabbc6bSPrashanth Sreenivasa blkptr_t *bp = db->db.db_data; 37195cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) { 37205cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &bp[i], tx); 37215cabbc6bSPrashanth Sreenivasa } 37225cabbc6bSPrashanth Sreenivasa } else if (db->db.db_object == DMU_META_DNODE_OBJECT) { 37235cabbc6bSPrashanth Sreenivasa dnode_phys_t *dnp = db->db.db_data; 37245cabbc6bSPrashanth Sreenivasa ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==, 37255cabbc6bSPrashanth Sreenivasa DMU_OT_DNODE); 37265cabbc6bSPrashanth Sreenivasa for (int i = 0; i < db->db.db_size >> DNODE_SHIFT; i++) { 37275cabbc6bSPrashanth Sreenivasa for (int j = 0; j < dnp[i].dn_nblkptr; j++) { 37285cabbc6bSPrashanth Sreenivasa dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx); 37295cabbc6bSPrashanth Sreenivasa } 37305cabbc6bSPrashanth Sreenivasa } 37315cabbc6bSPrashanth Sreenivasa } 37325cabbc6bSPrashanth Sreenivasa } 37335cabbc6bSPrashanth Sreenivasa 37345cabbc6bSPrashanth Sreenivasa 37353e30c24aSWill Andrews /* Issue I/O to commit a dirty buffer to disk. */ 3736b24ab676SJeff Bonwick static void 3737b24ab676SJeff Bonwick dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 3738b24ab676SJeff Bonwick { 3739b24ab676SJeff Bonwick dmu_buf_impl_t *db = dr->dr_dbuf; 3740744947dcSTom Erickson dnode_t *dn; 3741744947dcSTom Erickson objset_t *os; 3742b24ab676SJeff Bonwick dmu_buf_impl_t *parent = db->db_parent; 3743b24ab676SJeff Bonwick uint64_t txg = tx->tx_txg; 37447802d7bfSMatthew Ahrens zbookmark_phys_t zb; 3745b24ab676SJeff Bonwick zio_prop_t zp; 3746b24ab676SJeff Bonwick zio_t *zio; 37470a586ceaSMark Shellenbaum int wp_flag = 0; 3748b24ab676SJeff Bonwick 374911ceac77SAlex Reece ASSERT(dmu_tx_is_syncing(tx)); 375011ceac77SAlex Reece 3751744947dcSTom Erickson DB_DNODE_ENTER(db); 3752744947dcSTom Erickson dn = DB_DNODE(db); 3753744947dcSTom Erickson os = dn->dn_objset; 3754744947dcSTom Erickson 3755b24ab676SJeff Bonwick if (db->db_state != DB_NOFILL) { 3756b24ab676SJeff Bonwick if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 3757b24ab676SJeff Bonwick /* 3758b24ab676SJeff Bonwick * Private object buffers are released here rather 3759b24ab676SJeff Bonwick * than in dbuf_dirty() since they are only modified 3760b24ab676SJeff Bonwick * in the syncing context and we don't want the 3761b24ab676SJeff Bonwick * overhead of making multiple copies of the data. 3762b24ab676SJeff Bonwick */ 3763b24ab676SJeff Bonwick if (BP_IS_HOLE(db->db_blkptr)) { 3764b24ab676SJeff Bonwick arc_buf_thaw(data); 3765b24ab676SJeff Bonwick } else { 37663f9d6ad7SLin Ling dbuf_release_bp(db); 3767b24ab676SJeff Bonwick } 37685cabbc6bSPrashanth Sreenivasa dbuf_remap(dn, db, tx); 3769b24ab676SJeff Bonwick } 3770b24ab676SJeff Bonwick } 3771b24ab676SJeff Bonwick 3772b24ab676SJeff Bonwick if (parent != dn->dn_dbuf) { 37733e30c24aSWill Andrews /* Our parent is an indirect block. */ 37743e30c24aSWill Andrews /* We have a dirty parent that has been scheduled for write. */ 3775b24ab676SJeff Bonwick ASSERT(parent && parent->db_data_pending); 37763e30c24aSWill Andrews /* Our parent's buffer is one level closer to the dnode. */ 3777b24ab676SJeff Bonwick ASSERT(db->db_level == parent->db_level-1); 37783e30c24aSWill Andrews /* 37793e30c24aSWill Andrews * We're about to modify our parent's db_data by modifying 37803e30c24aSWill Andrews * our block pointer, so the parent must be released. 37813e30c24aSWill Andrews */ 3782b24ab676SJeff Bonwick ASSERT(arc_released(parent->db_buf)); 3783b24ab676SJeff Bonwick zio = parent->db_data_pending->dr_zio; 3784b24ab676SJeff Bonwick } else { 37853e30c24aSWill Andrews /* Our parent is the dnode itself. */ 37860a586ceaSMark Shellenbaum ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 37870a586ceaSMark Shellenbaum db->db_blkid != DMU_SPILL_BLKID) || 37880a586ceaSMark Shellenbaum (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 37890a586ceaSMark Shellenbaum if (db->db_blkid != DMU_SPILL_BLKID) 3790b24ab676SJeff Bonwick ASSERT3P(db->db_blkptr, ==, 3791b24ab676SJeff Bonwick &dn->dn_phys->dn_blkptr[db->db_blkid]); 3792b24ab676SJeff Bonwick zio = dn->dn_zio; 3793b24ab676SJeff Bonwick } 3794b24ab676SJeff Bonwick 3795b24ab676SJeff Bonwick ASSERT(db->db_level == 0 || data == db->db_buf); 3796b24ab676SJeff Bonwick ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 3797b24ab676SJeff Bonwick ASSERT(zio); 3798b24ab676SJeff Bonwick 3799b24ab676SJeff Bonwick SET_BOOKMARK(&zb, os->os_dsl_dataset ? 3800b24ab676SJeff Bonwick os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 3801b24ab676SJeff Bonwick db->db.db_object, db->db_level, db->db_blkid); 3802b24ab676SJeff Bonwick 38030a586ceaSMark Shellenbaum if (db->db_blkid == DMU_SPILL_BLKID) 38040a586ceaSMark Shellenbaum wp_flag = WP_SPILL; 38050a586ceaSMark Shellenbaum wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 38060a586ceaSMark Shellenbaum 3807adaec86aSMatthew Ahrens dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 3808744947dcSTom Erickson DB_DNODE_EXIT(db); 3809b24ab676SJeff Bonwick 381011ceac77SAlex Reece /* 381111ceac77SAlex Reece * We copy the blkptr now (rather than when we instantiate the dirty 381211ceac77SAlex Reece * record), because its value can change between open context and 381311ceac77SAlex Reece * syncing context. We do not need to hold dn_struct_rwlock to read 381411ceac77SAlex Reece * db_blkptr because we are in syncing context. 381511ceac77SAlex Reece */ 381611ceac77SAlex Reece dr->dr_bp_copy = *db->db_blkptr; 381711ceac77SAlex Reece 38185d7b4d43SMatthew Ahrens if (db->db_level == 0 && 38195d7b4d43SMatthew Ahrens dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 38205d7b4d43SMatthew Ahrens /* 38215d7b4d43SMatthew Ahrens * The BP for this block has been provided by open context 38225d7b4d43SMatthew Ahrens * (by dmu_sync() or dmu_buf_write_embedded()). 38235d7b4d43SMatthew Ahrens */ 3824770499e1SDan Kimmel abd_t *contents = (data != NULL) ? 3825770499e1SDan Kimmel abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL; 38265d7b4d43SMatthew Ahrens 38275602294fSDan Kimmel dr->dr_zio = zio_write(zio, os->os_spa, txg, &dr->dr_bp_copy, 38285602294fSDan Kimmel contents, db->db.db_size, db->db.db_size, &zp, 38298df0bcf0SPaul Dagnelie dbuf_write_override_ready, NULL, NULL, 38308df0bcf0SPaul Dagnelie dbuf_write_override_done, 383169962b56SMatthew Ahrens dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3832b24ab676SJeff Bonwick mutex_enter(&db->db_mtx); 3833b24ab676SJeff Bonwick dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 3834b24ab676SJeff Bonwick zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 383580901aeaSGeorge Wilson dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 3836b24ab676SJeff Bonwick mutex_exit(&db->db_mtx); 3837b24ab676SJeff Bonwick } else if (db->db_state == DB_NOFILL) { 3838810e43b2SBill Pijewski ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 3839810e43b2SBill Pijewski zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 3840b24ab676SJeff Bonwick dr->dr_zio = zio_write(zio, os->os_spa, txg, 38415602294fSDan Kimmel &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp, 38428df0bcf0SPaul Dagnelie dbuf_write_nofill_ready, NULL, NULL, 38438df0bcf0SPaul Dagnelie dbuf_write_nofill_done, db, 3844b24ab676SJeff Bonwick ZIO_PRIORITY_ASYNC_WRITE, 3845b24ab676SJeff Bonwick ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 3846b24ab676SJeff Bonwick } else { 3847b24ab676SJeff Bonwick ASSERT(arc_released(data)); 38488df0bcf0SPaul Dagnelie 38498df0bcf0SPaul Dagnelie /* 38508df0bcf0SPaul Dagnelie * For indirect blocks, we want to setup the children 38518df0bcf0SPaul Dagnelie * ready callback so that we can properly handle an indirect 38528df0bcf0SPaul Dagnelie * block that only contains holes. 38538df0bcf0SPaul Dagnelie */ 38548df0bcf0SPaul Dagnelie arc_done_func_t *children_ready_cb = NULL; 38558df0bcf0SPaul Dagnelie if (db->db_level != 0) 38568df0bcf0SPaul Dagnelie children_ready_cb = dbuf_write_children_ready; 38578df0bcf0SPaul Dagnelie 3858b24ab676SJeff Bonwick dr->dr_zio = arc_write(zio, os->os_spa, txg, 385911ceac77SAlex Reece &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db), 3860dcbf3bd6SGeorge Wilson &zp, dbuf_write_ready, children_ready_cb, 386169962b56SMatthew Ahrens dbuf_write_physdone, dbuf_write_done, db, 386269962b56SMatthew Ahrens ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 3863b24ab676SJeff Bonwick } 3864fa9e4066Sahrens } 3865