xref: /freebsd/sys/contrib/openzfs/module/zfs/dbuf.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2019, Klara Inc.
28  * Copyright (c) 2019, Allan Jude
29  */
30 
31 #include <sys/zfs_context.h>
32 #include <sys/arc.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/dbuf.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/spa.h>
42 #include <sys/zio.h>
43 #include <sys/dmu_zfetch.h>
44 #include <sys/sa.h>
45 #include <sys/sa_impl.h>
46 #include <sys/zfeature.h>
47 #include <sys/blkptr.h>
48 #include <sys/range_tree.h>
49 #include <sys/trace_zfs.h>
50 #include <sys/callb.h>
51 #include <sys/abd.h>
52 #include <sys/vdev.h>
53 #include <cityhash.h>
54 #include <sys/spa_impl.h>
55 
56 kstat_t *dbuf_ksp;
57 
58 typedef struct dbuf_stats {
59 	/*
60 	 * Various statistics about the size of the dbuf cache.
61 	 */
62 	kstat_named_t cache_count;
63 	kstat_named_t cache_size_bytes;
64 	kstat_named_t cache_size_bytes_max;
65 	/*
66 	 * Statistics regarding the bounds on the dbuf cache size.
67 	 */
68 	kstat_named_t cache_target_bytes;
69 	kstat_named_t cache_lowater_bytes;
70 	kstat_named_t cache_hiwater_bytes;
71 	/*
72 	 * Total number of dbuf cache evictions that have occurred.
73 	 */
74 	kstat_named_t cache_total_evicts;
75 	/*
76 	 * The distribution of dbuf levels in the dbuf cache and
77 	 * the total size of all dbufs at each level.
78 	 */
79 	kstat_named_t cache_levels[DN_MAX_LEVELS];
80 	kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
81 	/*
82 	 * Statistics about the dbuf hash table.
83 	 */
84 	kstat_named_t hash_hits;
85 	kstat_named_t hash_misses;
86 	kstat_named_t hash_collisions;
87 	kstat_named_t hash_elements;
88 	kstat_named_t hash_elements_max;
89 	/*
90 	 * Number of sublists containing more than one dbuf in the dbuf
91 	 * hash table. Keep track of the longest hash chain.
92 	 */
93 	kstat_named_t hash_chains;
94 	kstat_named_t hash_chain_max;
95 	/*
96 	 * Number of times a dbuf_create() discovers that a dbuf was
97 	 * already created and in the dbuf hash table.
98 	 */
99 	kstat_named_t hash_insert_race;
100 	/*
101 	 * Statistics about the size of the metadata dbuf cache.
102 	 */
103 	kstat_named_t metadata_cache_count;
104 	kstat_named_t metadata_cache_size_bytes;
105 	kstat_named_t metadata_cache_size_bytes_max;
106 	/*
107 	 * For diagnostic purposes, this is incremented whenever we can't add
108 	 * something to the metadata cache because it's full, and instead put
109 	 * the data in the regular dbuf cache.
110 	 */
111 	kstat_named_t metadata_cache_overflow;
112 } dbuf_stats_t;
113 
114 dbuf_stats_t dbuf_stats = {
115 	{ "cache_count",			KSTAT_DATA_UINT64 },
116 	{ "cache_size_bytes",			KSTAT_DATA_UINT64 },
117 	{ "cache_size_bytes_max",		KSTAT_DATA_UINT64 },
118 	{ "cache_target_bytes",			KSTAT_DATA_UINT64 },
119 	{ "cache_lowater_bytes",		KSTAT_DATA_UINT64 },
120 	{ "cache_hiwater_bytes",		KSTAT_DATA_UINT64 },
121 	{ "cache_total_evicts",			KSTAT_DATA_UINT64 },
122 	{ { "cache_levels_N",			KSTAT_DATA_UINT64 } },
123 	{ { "cache_levels_bytes_N",		KSTAT_DATA_UINT64 } },
124 	{ "hash_hits",				KSTAT_DATA_UINT64 },
125 	{ "hash_misses",			KSTAT_DATA_UINT64 },
126 	{ "hash_collisions",			KSTAT_DATA_UINT64 },
127 	{ "hash_elements",			KSTAT_DATA_UINT64 },
128 	{ "hash_elements_max",			KSTAT_DATA_UINT64 },
129 	{ "hash_chains",			KSTAT_DATA_UINT64 },
130 	{ "hash_chain_max",			KSTAT_DATA_UINT64 },
131 	{ "hash_insert_race",			KSTAT_DATA_UINT64 },
132 	{ "metadata_cache_count",		KSTAT_DATA_UINT64 },
133 	{ "metadata_cache_size_bytes",		KSTAT_DATA_UINT64 },
134 	{ "metadata_cache_size_bytes_max",	KSTAT_DATA_UINT64 },
135 	{ "metadata_cache_overflow",		KSTAT_DATA_UINT64 }
136 };
137 
138 #define	DBUF_STAT_INCR(stat, val)	\
139 	atomic_add_64(&dbuf_stats.stat.value.ui64, (val));
140 #define	DBUF_STAT_DECR(stat, val)	\
141 	DBUF_STAT_INCR(stat, -(val));
142 #define	DBUF_STAT_BUMP(stat)		\
143 	DBUF_STAT_INCR(stat, 1);
144 #define	DBUF_STAT_BUMPDOWN(stat)	\
145 	DBUF_STAT_INCR(stat, -1);
146 #define	DBUF_STAT_MAX(stat, v) {					\
147 	uint64_t _m;							\
148 	while ((v) > (_m = dbuf_stats.stat.value.ui64) &&		\
149 	    (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
150 		continue;						\
151 }
152 
153 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
154 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
155 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
156 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
157 
158 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
159     dmu_buf_evict_func_t *evict_func_sync,
160     dmu_buf_evict_func_t *evict_func_async,
161     dmu_buf_t **clear_on_evict_dbufp);
162 
163 /*
164  * Global data structures and functions for the dbuf cache.
165  */
166 static kmem_cache_t *dbuf_kmem_cache;
167 static taskq_t *dbu_evict_taskq;
168 
169 static kthread_t *dbuf_cache_evict_thread;
170 static kmutex_t dbuf_evict_lock;
171 static kcondvar_t dbuf_evict_cv;
172 static boolean_t dbuf_evict_thread_exit;
173 
174 /*
175  * There are two dbuf caches; each dbuf can only be in one of them at a time.
176  *
177  * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
178  *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
179  *    that represent the metadata that describes filesystems/snapshots/
180  *    bookmarks/properties/etc. We only evict from this cache when we export a
181  *    pool, to short-circuit as much I/O as possible for all administrative
182  *    commands that need the metadata. There is no eviction policy for this
183  *    cache, because we try to only include types in it which would occupy a
184  *    very small amount of space per object but create a large impact on the
185  *    performance of these commands. Instead, after it reaches a maximum size
186  *    (which should only happen on very small memory systems with a very large
187  *    number of filesystem objects), we stop taking new dbufs into the
188  *    metadata cache, instead putting them in the normal dbuf cache.
189  *
190  * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
191  *    are not currently held but have been recently released. These dbufs
192  *    are not eligible for arc eviction until they are aged out of the cache.
193  *    Dbufs that are aged out of the cache will be immediately destroyed and
194  *    become eligible for arc eviction.
195  *
196  * Dbufs are added to these caches once the last hold is released. If a dbuf is
197  * later accessed and still exists in the dbuf cache, then it will be removed
198  * from the cache and later re-added to the head of the cache.
199  *
200  * If a given dbuf meets the requirements for the metadata cache, it will go
201  * there, otherwise it will be considered for the generic LRU dbuf cache. The
202  * caches and the refcounts tracking their sizes are stored in an array indexed
203  * by those caches' matching enum values (from dbuf_cached_state_t).
204  */
205 typedef struct dbuf_cache {
206 	multilist_t *cache;
207 	zfs_refcount_t size;
208 } dbuf_cache_t;
209 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
210 
211 /* Size limits for the caches */
212 unsigned long dbuf_cache_max_bytes = ULONG_MAX;
213 unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX;
214 
215 /* Set the default sizes of the caches to log2 fraction of arc size */
216 int dbuf_cache_shift = 5;
217 int dbuf_metadata_cache_shift = 6;
218 
219 static unsigned long dbuf_cache_target_bytes(void);
220 static unsigned long dbuf_metadata_cache_target_bytes(void);
221 
222 /*
223  * The LRU dbuf cache uses a three-stage eviction policy:
224  *	- A low water marker designates when the dbuf eviction thread
225  *	should stop evicting from the dbuf cache.
226  *	- When we reach the maximum size (aka mid water mark), we
227  *	signal the eviction thread to run.
228  *	- The high water mark indicates when the eviction thread
229  *	is unable to keep up with the incoming load and eviction must
230  *	happen in the context of the calling thread.
231  *
232  * The dbuf cache:
233  *                                                 (max size)
234  *                                      low water   mid water   hi water
235  * +----------------------------------------+----------+----------+
236  * |                                        |          |          |
237  * |                                        |          |          |
238  * |                                        |          |          |
239  * |                                        |          |          |
240  * +----------------------------------------+----------+----------+
241  *                                        stop        signal     evict
242  *                                      evicting     eviction   directly
243  *                                                    thread
244  *
245  * The high and low water marks indicate the operating range for the eviction
246  * thread. The low water mark is, by default, 90% of the total size of the
247  * cache and the high water mark is at 110% (both of these percentages can be
248  * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
249  * respectively). The eviction thread will try to ensure that the cache remains
250  * within this range by waking up every second and checking if the cache is
251  * above the low water mark. The thread can also be woken up by callers adding
252  * elements into the cache if the cache is larger than the mid water (i.e max
253  * cache size). Once the eviction thread is woken up and eviction is required,
254  * it will continue evicting buffers until it's able to reduce the cache size
255  * to the low water mark. If the cache size continues to grow and hits the high
256  * water mark, then callers adding elements to the cache will begin to evict
257  * directly from the cache until the cache is no longer above the high water
258  * mark.
259  */
260 
261 /*
262  * The percentage above and below the maximum cache size.
263  */
264 uint_t dbuf_cache_hiwater_pct = 10;
265 uint_t dbuf_cache_lowater_pct = 10;
266 
267 /* ARGSUSED */
268 static int
269 dbuf_cons(void *vdb, void *unused, int kmflag)
270 {
271 	dmu_buf_impl_t *db = vdb;
272 	bzero(db, sizeof (dmu_buf_impl_t));
273 
274 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
275 	rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
276 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
277 	multilist_link_init(&db->db_cache_link);
278 	zfs_refcount_create(&db->db_holds);
279 
280 	return (0);
281 }
282 
283 /* ARGSUSED */
284 static void
285 dbuf_dest(void *vdb, void *unused)
286 {
287 	dmu_buf_impl_t *db = vdb;
288 	mutex_destroy(&db->db_mtx);
289 	rw_destroy(&db->db_rwlock);
290 	cv_destroy(&db->db_changed);
291 	ASSERT(!multilist_link_active(&db->db_cache_link));
292 	zfs_refcount_destroy(&db->db_holds);
293 }
294 
295 /*
296  * dbuf hash table routines
297  */
298 static dbuf_hash_table_t dbuf_hash_table;
299 
300 static uint64_t dbuf_hash_count;
301 
302 /*
303  * We use Cityhash for this. It's fast, and has good hash properties without
304  * requiring any large static buffers.
305  */
306 static uint64_t
307 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
308 {
309 	return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
310 }
311 
312 #define	DTRACE_SET_STATE(db, why) \
313 	DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db,	\
314 	    const char *, why)
315 
316 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
317 	((dbuf)->db.db_object == (obj) &&		\
318 	(dbuf)->db_objset == (os) &&			\
319 	(dbuf)->db_level == (level) &&			\
320 	(dbuf)->db_blkid == (blkid))
321 
322 dmu_buf_impl_t *
323 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
324 {
325 	dbuf_hash_table_t *h = &dbuf_hash_table;
326 	uint64_t hv;
327 	uint64_t idx;
328 	dmu_buf_impl_t *db;
329 
330 	hv = dbuf_hash(os, obj, level, blkid);
331 	idx = hv & h->hash_table_mask;
332 
333 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
334 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
335 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
336 			mutex_enter(&db->db_mtx);
337 			if (db->db_state != DB_EVICTING) {
338 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
339 				return (db);
340 			}
341 			mutex_exit(&db->db_mtx);
342 		}
343 	}
344 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
345 	return (NULL);
346 }
347 
348 static dmu_buf_impl_t *
349 dbuf_find_bonus(objset_t *os, uint64_t object)
350 {
351 	dnode_t *dn;
352 	dmu_buf_impl_t *db = NULL;
353 
354 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
355 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
356 		if (dn->dn_bonus != NULL) {
357 			db = dn->dn_bonus;
358 			mutex_enter(&db->db_mtx);
359 		}
360 		rw_exit(&dn->dn_struct_rwlock);
361 		dnode_rele(dn, FTAG);
362 	}
363 	return (db);
364 }
365 
366 /*
367  * Insert an entry into the hash table.  If there is already an element
368  * equal to elem in the hash table, then the already existing element
369  * will be returned and the new element will not be inserted.
370  * Otherwise returns NULL.
371  */
372 static dmu_buf_impl_t *
373 dbuf_hash_insert(dmu_buf_impl_t *db)
374 {
375 	dbuf_hash_table_t *h = &dbuf_hash_table;
376 	objset_t *os = db->db_objset;
377 	uint64_t obj = db->db.db_object;
378 	int level = db->db_level;
379 	uint64_t blkid, hv, idx;
380 	dmu_buf_impl_t *dbf;
381 	uint32_t i;
382 
383 	blkid = db->db_blkid;
384 	hv = dbuf_hash(os, obj, level, blkid);
385 	idx = hv & h->hash_table_mask;
386 
387 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
388 	for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
389 	    dbf = dbf->db_hash_next, i++) {
390 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
391 			mutex_enter(&dbf->db_mtx);
392 			if (dbf->db_state != DB_EVICTING) {
393 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
394 				return (dbf);
395 			}
396 			mutex_exit(&dbf->db_mtx);
397 		}
398 	}
399 
400 	if (i > 0) {
401 		DBUF_STAT_BUMP(hash_collisions);
402 		if (i == 1)
403 			DBUF_STAT_BUMP(hash_chains);
404 
405 		DBUF_STAT_MAX(hash_chain_max, i);
406 	}
407 
408 	mutex_enter(&db->db_mtx);
409 	db->db_hash_next = h->hash_table[idx];
410 	h->hash_table[idx] = db;
411 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
412 	atomic_inc_64(&dbuf_hash_count);
413 	DBUF_STAT_MAX(hash_elements_max, dbuf_hash_count);
414 
415 	return (NULL);
416 }
417 
418 /*
419  * This returns whether this dbuf should be stored in the metadata cache, which
420  * is based on whether it's from one of the dnode types that store data related
421  * to traversing dataset hierarchies.
422  */
423 static boolean_t
424 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
425 {
426 	DB_DNODE_ENTER(db);
427 	dmu_object_type_t type = DB_DNODE(db)->dn_type;
428 	DB_DNODE_EXIT(db);
429 
430 	/* Check if this dbuf is one of the types we care about */
431 	if (DMU_OT_IS_METADATA_CACHED(type)) {
432 		/* If we hit this, then we set something up wrong in dmu_ot */
433 		ASSERT(DMU_OT_IS_METADATA(type));
434 
435 		/*
436 		 * Sanity check for small-memory systems: don't allocate too
437 		 * much memory for this purpose.
438 		 */
439 		if (zfs_refcount_count(
440 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
441 		    dbuf_metadata_cache_target_bytes()) {
442 			DBUF_STAT_BUMP(metadata_cache_overflow);
443 			return (B_FALSE);
444 		}
445 
446 		return (B_TRUE);
447 	}
448 
449 	return (B_FALSE);
450 }
451 
452 /*
453  * Remove an entry from the hash table.  It must be in the EVICTING state.
454  */
455 static void
456 dbuf_hash_remove(dmu_buf_impl_t *db)
457 {
458 	dbuf_hash_table_t *h = &dbuf_hash_table;
459 	uint64_t hv, idx;
460 	dmu_buf_impl_t *dbf, **dbp;
461 
462 	hv = dbuf_hash(db->db_objset, db->db.db_object,
463 	    db->db_level, db->db_blkid);
464 	idx = hv & h->hash_table_mask;
465 
466 	/*
467 	 * We mustn't hold db_mtx to maintain lock ordering:
468 	 * DBUF_HASH_MUTEX > db_mtx.
469 	 */
470 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
471 	ASSERT(db->db_state == DB_EVICTING);
472 	ASSERT(!MUTEX_HELD(&db->db_mtx));
473 
474 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
475 	dbp = &h->hash_table[idx];
476 	while ((dbf = *dbp) != db) {
477 		dbp = &dbf->db_hash_next;
478 		ASSERT(dbf != NULL);
479 	}
480 	*dbp = db->db_hash_next;
481 	db->db_hash_next = NULL;
482 	if (h->hash_table[idx] &&
483 	    h->hash_table[idx]->db_hash_next == NULL)
484 		DBUF_STAT_BUMPDOWN(hash_chains);
485 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
486 	atomic_dec_64(&dbuf_hash_count);
487 }
488 
489 typedef enum {
490 	DBVU_EVICTING,
491 	DBVU_NOT_EVICTING
492 } dbvu_verify_type_t;
493 
494 static void
495 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
496 {
497 #ifdef ZFS_DEBUG
498 	int64_t holds;
499 
500 	if (db->db_user == NULL)
501 		return;
502 
503 	/* Only data blocks support the attachment of user data. */
504 	ASSERT(db->db_level == 0);
505 
506 	/* Clients must resolve a dbuf before attaching user data. */
507 	ASSERT(db->db.db_data != NULL);
508 	ASSERT3U(db->db_state, ==, DB_CACHED);
509 
510 	holds = zfs_refcount_count(&db->db_holds);
511 	if (verify_type == DBVU_EVICTING) {
512 		/*
513 		 * Immediate eviction occurs when holds == dirtycnt.
514 		 * For normal eviction buffers, holds is zero on
515 		 * eviction, except when dbuf_fix_old_data() calls
516 		 * dbuf_clear_data().  However, the hold count can grow
517 		 * during eviction even though db_mtx is held (see
518 		 * dmu_bonus_hold() for an example), so we can only
519 		 * test the generic invariant that holds >= dirtycnt.
520 		 */
521 		ASSERT3U(holds, >=, db->db_dirtycnt);
522 	} else {
523 		if (db->db_user_immediate_evict == TRUE)
524 			ASSERT3U(holds, >=, db->db_dirtycnt);
525 		else
526 			ASSERT3U(holds, >, 0);
527 	}
528 #endif
529 }
530 
531 static void
532 dbuf_evict_user(dmu_buf_impl_t *db)
533 {
534 	dmu_buf_user_t *dbu = db->db_user;
535 
536 	ASSERT(MUTEX_HELD(&db->db_mtx));
537 
538 	if (dbu == NULL)
539 		return;
540 
541 	dbuf_verify_user(db, DBVU_EVICTING);
542 	db->db_user = NULL;
543 
544 #ifdef ZFS_DEBUG
545 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
546 		*dbu->dbu_clear_on_evict_dbufp = NULL;
547 #endif
548 
549 	/*
550 	 * There are two eviction callbacks - one that we call synchronously
551 	 * and one that we invoke via a taskq.  The async one is useful for
552 	 * avoiding lock order reversals and limiting stack depth.
553 	 *
554 	 * Note that if we have a sync callback but no async callback,
555 	 * it's likely that the sync callback will free the structure
556 	 * containing the dbu.  In that case we need to take care to not
557 	 * dereference dbu after calling the sync evict func.
558 	 */
559 	boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
560 
561 	if (dbu->dbu_evict_func_sync != NULL)
562 		dbu->dbu_evict_func_sync(dbu);
563 
564 	if (has_async) {
565 		taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
566 		    dbu, 0, &dbu->dbu_tqent);
567 	}
568 }
569 
570 boolean_t
571 dbuf_is_metadata(dmu_buf_impl_t *db)
572 {
573 	/*
574 	 * Consider indirect blocks and spill blocks to be meta data.
575 	 */
576 	if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
577 		return (B_TRUE);
578 	} else {
579 		boolean_t is_metadata;
580 
581 		DB_DNODE_ENTER(db);
582 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
583 		DB_DNODE_EXIT(db);
584 
585 		return (is_metadata);
586 	}
587 }
588 
589 
590 /*
591  * This function *must* return indices evenly distributed between all
592  * sublists of the multilist. This is needed due to how the dbuf eviction
593  * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
594  * distributed between all sublists and uses this assumption when
595  * deciding which sublist to evict from and how much to evict from it.
596  */
597 static unsigned int
598 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
599 {
600 	dmu_buf_impl_t *db = obj;
601 
602 	/*
603 	 * The assumption here, is the hash value for a given
604 	 * dmu_buf_impl_t will remain constant throughout it's lifetime
605 	 * (i.e. it's objset, object, level and blkid fields don't change).
606 	 * Thus, we don't need to store the dbuf's sublist index
607 	 * on insertion, as this index can be recalculated on removal.
608 	 *
609 	 * Also, the low order bits of the hash value are thought to be
610 	 * distributed evenly. Otherwise, in the case that the multilist
611 	 * has a power of two number of sublists, each sublists' usage
612 	 * would not be evenly distributed.
613 	 */
614 	return (dbuf_hash(db->db_objset, db->db.db_object,
615 	    db->db_level, db->db_blkid) %
616 	    multilist_get_num_sublists(ml));
617 }
618 
619 /*
620  * The target size of the dbuf cache can grow with the ARC target,
621  * unless limited by the tunable dbuf_cache_max_bytes.
622  */
623 static inline unsigned long
624 dbuf_cache_target_bytes(void)
625 {
626 	return (MIN(dbuf_cache_max_bytes,
627 	    arc_target_bytes() >> dbuf_cache_shift));
628 }
629 
630 /*
631  * The target size of the dbuf metadata cache can grow with the ARC target,
632  * unless limited by the tunable dbuf_metadata_cache_max_bytes.
633  */
634 static inline unsigned long
635 dbuf_metadata_cache_target_bytes(void)
636 {
637 	return (MIN(dbuf_metadata_cache_max_bytes,
638 	    arc_target_bytes() >> dbuf_metadata_cache_shift));
639 }
640 
641 static inline uint64_t
642 dbuf_cache_hiwater_bytes(void)
643 {
644 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
645 	return (dbuf_cache_target +
646 	    (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
647 }
648 
649 static inline uint64_t
650 dbuf_cache_lowater_bytes(void)
651 {
652 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
653 	return (dbuf_cache_target -
654 	    (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
655 }
656 
657 static inline boolean_t
658 dbuf_cache_above_lowater(void)
659 {
660 	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
661 	    dbuf_cache_lowater_bytes());
662 }
663 
664 /*
665  * Evict the oldest eligible dbuf from the dbuf cache.
666  */
667 static void
668 dbuf_evict_one(void)
669 {
670 	int idx = multilist_get_random_index(dbuf_caches[DB_DBUF_CACHE].cache);
671 	multilist_sublist_t *mls = multilist_sublist_lock(
672 	    dbuf_caches[DB_DBUF_CACHE].cache, idx);
673 
674 	ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
675 
676 	dmu_buf_impl_t *db = multilist_sublist_tail(mls);
677 	while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
678 		db = multilist_sublist_prev(mls, db);
679 	}
680 
681 	DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
682 	    multilist_sublist_t *, mls);
683 
684 	if (db != NULL) {
685 		multilist_sublist_remove(mls, db);
686 		multilist_sublist_unlock(mls);
687 		(void) zfs_refcount_remove_many(
688 		    &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
689 		DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
690 		DBUF_STAT_BUMPDOWN(cache_count);
691 		DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
692 		    db->db.db_size);
693 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
694 		db->db_caching_status = DB_NO_CACHE;
695 		dbuf_destroy(db);
696 		DBUF_STAT_BUMP(cache_total_evicts);
697 	} else {
698 		multilist_sublist_unlock(mls);
699 	}
700 }
701 
702 /*
703  * The dbuf evict thread is responsible for aging out dbufs from the
704  * cache. Once the cache has reached it's maximum size, dbufs are removed
705  * and destroyed. The eviction thread will continue running until the size
706  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
707  * out of the cache it is destroyed and becomes eligible for arc eviction.
708  */
709 /* ARGSUSED */
710 static void
711 dbuf_evict_thread(void *unused)
712 {
713 	callb_cpr_t cpr;
714 
715 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
716 
717 	mutex_enter(&dbuf_evict_lock);
718 	while (!dbuf_evict_thread_exit) {
719 		while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
720 			CALLB_CPR_SAFE_BEGIN(&cpr);
721 			(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
722 			    &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
723 			CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
724 		}
725 		mutex_exit(&dbuf_evict_lock);
726 
727 		/*
728 		 * Keep evicting as long as we're above the low water mark
729 		 * for the cache. We do this without holding the locks to
730 		 * minimize lock contention.
731 		 */
732 		while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
733 			dbuf_evict_one();
734 		}
735 
736 		mutex_enter(&dbuf_evict_lock);
737 	}
738 
739 	dbuf_evict_thread_exit = B_FALSE;
740 	cv_broadcast(&dbuf_evict_cv);
741 	CALLB_CPR_EXIT(&cpr);	/* drops dbuf_evict_lock */
742 	thread_exit();
743 }
744 
745 /*
746  * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
747  * If the dbuf cache is at its high water mark, then evict a dbuf from the
748  * dbuf cache using the callers context.
749  */
750 static void
751 dbuf_evict_notify(uint64_t size)
752 {
753 	/*
754 	 * We check if we should evict without holding the dbuf_evict_lock,
755 	 * because it's OK to occasionally make the wrong decision here,
756 	 * and grabbing the lock results in massive lock contention.
757 	 */
758 	if (size > dbuf_cache_target_bytes()) {
759 		if (size > dbuf_cache_hiwater_bytes())
760 			dbuf_evict_one();
761 		cv_signal(&dbuf_evict_cv);
762 	}
763 }
764 
765 static int
766 dbuf_kstat_update(kstat_t *ksp, int rw)
767 {
768 	dbuf_stats_t *ds = ksp->ks_data;
769 
770 	if (rw == KSTAT_WRITE) {
771 		return (SET_ERROR(EACCES));
772 	} else {
773 		ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
774 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
775 		ds->cache_size_bytes.value.ui64 =
776 		    zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
777 		ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
778 		ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
779 		ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
780 		ds->hash_elements.value.ui64 = dbuf_hash_count;
781 	}
782 
783 	return (0);
784 }
785 
786 void
787 dbuf_init(void)
788 {
789 	uint64_t hsize = 1ULL << 16;
790 	dbuf_hash_table_t *h = &dbuf_hash_table;
791 	int i;
792 
793 	/*
794 	 * The hash table is big enough to fill all of physical memory
795 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
796 	 * By default, the table will take up
797 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
798 	 */
799 	while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
800 		hsize <<= 1;
801 
802 retry:
803 	h->hash_table_mask = hsize - 1;
804 #if defined(_KERNEL)
805 	/*
806 	 * Large allocations which do not require contiguous pages
807 	 * should be using vmem_alloc() in the linux kernel
808 	 */
809 	h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
810 #else
811 	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
812 #endif
813 	if (h->hash_table == NULL) {
814 		/* XXX - we should really return an error instead of assert */
815 		ASSERT(hsize > (1ULL << 10));
816 		hsize >>= 1;
817 		goto retry;
818 	}
819 
820 	dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
821 	    sizeof (dmu_buf_impl_t),
822 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
823 
824 	for (i = 0; i < DBUF_MUTEXES; i++)
825 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
826 
827 	dbuf_stats_init(h);
828 
829 	/*
830 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
831 	 * configuration is not required.
832 	 */
833 	dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
834 
835 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
836 		dbuf_caches[dcs].cache =
837 		    multilist_create(sizeof (dmu_buf_impl_t),
838 		    offsetof(dmu_buf_impl_t, db_cache_link),
839 		    dbuf_cache_multilist_index_func);
840 		zfs_refcount_create(&dbuf_caches[dcs].size);
841 	}
842 
843 	dbuf_evict_thread_exit = B_FALSE;
844 	mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
845 	cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
846 	dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
847 	    NULL, 0, &p0, TS_RUN, minclsyspri);
848 
849 	dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
850 	    KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
851 	    KSTAT_FLAG_VIRTUAL);
852 	if (dbuf_ksp != NULL) {
853 		for (i = 0; i < DN_MAX_LEVELS; i++) {
854 			snprintf(dbuf_stats.cache_levels[i].name,
855 			    KSTAT_STRLEN, "cache_level_%d", i);
856 			dbuf_stats.cache_levels[i].data_type =
857 			    KSTAT_DATA_UINT64;
858 			snprintf(dbuf_stats.cache_levels_bytes[i].name,
859 			    KSTAT_STRLEN, "cache_level_%d_bytes", i);
860 			dbuf_stats.cache_levels_bytes[i].data_type =
861 			    KSTAT_DATA_UINT64;
862 		}
863 		dbuf_ksp->ks_data = &dbuf_stats;
864 		dbuf_ksp->ks_update = dbuf_kstat_update;
865 		kstat_install(dbuf_ksp);
866 	}
867 }
868 
869 void
870 dbuf_fini(void)
871 {
872 	dbuf_hash_table_t *h = &dbuf_hash_table;
873 	int i;
874 
875 	dbuf_stats_destroy();
876 
877 	for (i = 0; i < DBUF_MUTEXES; i++)
878 		mutex_destroy(&h->hash_mutexes[i]);
879 #if defined(_KERNEL)
880 	/*
881 	 * Large allocations which do not require contiguous pages
882 	 * should be using vmem_free() in the linux kernel
883 	 */
884 	vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
885 #else
886 	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
887 #endif
888 	kmem_cache_destroy(dbuf_kmem_cache);
889 	taskq_destroy(dbu_evict_taskq);
890 
891 	mutex_enter(&dbuf_evict_lock);
892 	dbuf_evict_thread_exit = B_TRUE;
893 	while (dbuf_evict_thread_exit) {
894 		cv_signal(&dbuf_evict_cv);
895 		cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
896 	}
897 	mutex_exit(&dbuf_evict_lock);
898 
899 	mutex_destroy(&dbuf_evict_lock);
900 	cv_destroy(&dbuf_evict_cv);
901 
902 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
903 		zfs_refcount_destroy(&dbuf_caches[dcs].size);
904 		multilist_destroy(dbuf_caches[dcs].cache);
905 	}
906 
907 	if (dbuf_ksp != NULL) {
908 		kstat_delete(dbuf_ksp);
909 		dbuf_ksp = NULL;
910 	}
911 }
912 
913 /*
914  * Other stuff.
915  */
916 
917 #ifdef ZFS_DEBUG
918 static void
919 dbuf_verify(dmu_buf_impl_t *db)
920 {
921 	dnode_t *dn;
922 	dbuf_dirty_record_t *dr;
923 	uint32_t txg_prev;
924 
925 	ASSERT(MUTEX_HELD(&db->db_mtx));
926 
927 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
928 		return;
929 
930 	ASSERT(db->db_objset != NULL);
931 	DB_DNODE_ENTER(db);
932 	dn = DB_DNODE(db);
933 	if (dn == NULL) {
934 		ASSERT(db->db_parent == NULL);
935 		ASSERT(db->db_blkptr == NULL);
936 	} else {
937 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
938 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
939 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
940 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
941 		    db->db_blkid == DMU_SPILL_BLKID ||
942 		    !avl_is_empty(&dn->dn_dbufs));
943 	}
944 	if (db->db_blkid == DMU_BONUS_BLKID) {
945 		ASSERT(dn != NULL);
946 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
947 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
948 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
949 		ASSERT(dn != NULL);
950 		ASSERT0(db->db.db_offset);
951 	} else {
952 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
953 	}
954 
955 	if ((dr = list_head(&db->db_dirty_records)) != NULL) {
956 		ASSERT(dr->dr_dbuf == db);
957 		txg_prev = dr->dr_txg;
958 		for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
959 		    dr = list_next(&db->db_dirty_records, dr)) {
960 			ASSERT(dr->dr_dbuf == db);
961 			ASSERT(txg_prev > dr->dr_txg);
962 			txg_prev = dr->dr_txg;
963 		}
964 	}
965 
966 	/*
967 	 * We can't assert that db_size matches dn_datablksz because it
968 	 * can be momentarily different when another thread is doing
969 	 * dnode_set_blksz().
970 	 */
971 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
972 		dr = db->db_data_pending;
973 		/*
974 		 * It should only be modified in syncing context, so
975 		 * make sure we only have one copy of the data.
976 		 */
977 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
978 	}
979 
980 	/* verify db->db_blkptr */
981 	if (db->db_blkptr) {
982 		if (db->db_parent == dn->dn_dbuf) {
983 			/* db is pointed to by the dnode */
984 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
985 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
986 				ASSERT(db->db_parent == NULL);
987 			else
988 				ASSERT(db->db_parent != NULL);
989 			if (db->db_blkid != DMU_SPILL_BLKID)
990 				ASSERT3P(db->db_blkptr, ==,
991 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
992 		} else {
993 			/* db is pointed to by an indirect block */
994 			int epb __maybe_unused = db->db_parent->db.db_size >>
995 			    SPA_BLKPTRSHIFT;
996 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
997 			ASSERT3U(db->db_parent->db.db_object, ==,
998 			    db->db.db_object);
999 			/*
1000 			 * dnode_grow_indblksz() can make this fail if we don't
1001 			 * have the parent's rwlock.  XXX indblksz no longer
1002 			 * grows.  safe to do this now?
1003 			 */
1004 			if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1005 				ASSERT3P(db->db_blkptr, ==,
1006 				    ((blkptr_t *)db->db_parent->db.db_data +
1007 				    db->db_blkid % epb));
1008 			}
1009 		}
1010 	}
1011 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1012 	    (db->db_buf == NULL || db->db_buf->b_data) &&
1013 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1014 	    db->db_state != DB_FILL && !dn->dn_free_txg) {
1015 		/*
1016 		 * If the blkptr isn't set but they have nonzero data,
1017 		 * it had better be dirty, otherwise we'll lose that
1018 		 * data when we evict this buffer.
1019 		 *
1020 		 * There is an exception to this rule for indirect blocks; in
1021 		 * this case, if the indirect block is a hole, we fill in a few
1022 		 * fields on each of the child blocks (importantly, birth time)
1023 		 * to prevent hole birth times from being lost when you
1024 		 * partially fill in a hole.
1025 		 */
1026 		if (db->db_dirtycnt == 0) {
1027 			if (db->db_level == 0) {
1028 				uint64_t *buf = db->db.db_data;
1029 				int i;
1030 
1031 				for (i = 0; i < db->db.db_size >> 3; i++) {
1032 					ASSERT(buf[i] == 0);
1033 				}
1034 			} else {
1035 				blkptr_t *bps = db->db.db_data;
1036 				ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1037 				    db->db.db_size);
1038 				/*
1039 				 * We want to verify that all the blkptrs in the
1040 				 * indirect block are holes, but we may have
1041 				 * automatically set up a few fields for them.
1042 				 * We iterate through each blkptr and verify
1043 				 * they only have those fields set.
1044 				 */
1045 				for (int i = 0;
1046 				    i < db->db.db_size / sizeof (blkptr_t);
1047 				    i++) {
1048 					blkptr_t *bp = &bps[i];
1049 					ASSERT(ZIO_CHECKSUM_IS_ZERO(
1050 					    &bp->blk_cksum));
1051 					ASSERT(
1052 					    DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1053 					    DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1054 					    DVA_IS_EMPTY(&bp->blk_dva[2]));
1055 					ASSERT0(bp->blk_fill);
1056 					ASSERT0(bp->blk_pad[0]);
1057 					ASSERT0(bp->blk_pad[1]);
1058 					ASSERT(!BP_IS_EMBEDDED(bp));
1059 					ASSERT(BP_IS_HOLE(bp));
1060 					ASSERT0(bp->blk_phys_birth);
1061 				}
1062 			}
1063 		}
1064 	}
1065 	DB_DNODE_EXIT(db);
1066 }
1067 #endif
1068 
1069 static void
1070 dbuf_clear_data(dmu_buf_impl_t *db)
1071 {
1072 	ASSERT(MUTEX_HELD(&db->db_mtx));
1073 	dbuf_evict_user(db);
1074 	ASSERT3P(db->db_buf, ==, NULL);
1075 	db->db.db_data = NULL;
1076 	if (db->db_state != DB_NOFILL) {
1077 		db->db_state = DB_UNCACHED;
1078 		DTRACE_SET_STATE(db, "clear data");
1079 	}
1080 }
1081 
1082 static void
1083 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1084 {
1085 	ASSERT(MUTEX_HELD(&db->db_mtx));
1086 	ASSERT(buf != NULL);
1087 
1088 	db->db_buf = buf;
1089 	ASSERT(buf->b_data != NULL);
1090 	db->db.db_data = buf->b_data;
1091 }
1092 
1093 static arc_buf_t *
1094 dbuf_alloc_arcbuf_from_arcbuf(dmu_buf_impl_t *db, arc_buf_t *data)
1095 {
1096 	objset_t *os = db->db_objset;
1097 	spa_t *spa = os->os_spa;
1098 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1099 	enum zio_compress compress_type;
1100 	uint8_t complevel;
1101 	int psize, lsize;
1102 
1103 	psize = arc_buf_size(data);
1104 	lsize = arc_buf_lsize(data);
1105 	compress_type = arc_get_compression(data);
1106 	complevel = arc_get_complevel(data);
1107 
1108 	if (arc_is_encrypted(data)) {
1109 		boolean_t byteorder;
1110 		uint8_t salt[ZIO_DATA_SALT_LEN];
1111 		uint8_t iv[ZIO_DATA_IV_LEN];
1112 		uint8_t mac[ZIO_DATA_MAC_LEN];
1113 		dnode_t *dn = DB_DNODE(db);
1114 
1115 		arc_get_raw_params(data, &byteorder, salt, iv, mac);
1116 		data = arc_alloc_raw_buf(spa, db, dmu_objset_id(os),
1117 		    byteorder, salt, iv, mac, dn->dn_type, psize, lsize,
1118 		    compress_type, complevel);
1119 	} else if (compress_type != ZIO_COMPRESS_OFF) {
1120 		ASSERT3U(type, ==, ARC_BUFC_DATA);
1121 		data = arc_alloc_compressed_buf(spa, db,
1122 		    psize, lsize, compress_type, complevel);
1123 	} else {
1124 		data = arc_alloc_buf(spa, db, type, psize);
1125 	}
1126 	return (data);
1127 }
1128 
1129 static arc_buf_t *
1130 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1131 {
1132 	spa_t *spa = db->db_objset->os_spa;
1133 
1134 	return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1135 }
1136 
1137 /*
1138  * Loan out an arc_buf for read.  Return the loaned arc_buf.
1139  */
1140 arc_buf_t *
1141 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1142 {
1143 	arc_buf_t *abuf;
1144 
1145 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1146 	mutex_enter(&db->db_mtx);
1147 	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1148 		int blksz = db->db.db_size;
1149 		spa_t *spa = db->db_objset->os_spa;
1150 
1151 		mutex_exit(&db->db_mtx);
1152 		abuf = arc_loan_buf(spa, B_FALSE, blksz);
1153 		bcopy(db->db.db_data, abuf->b_data, blksz);
1154 	} else {
1155 		abuf = db->db_buf;
1156 		arc_loan_inuse_buf(abuf, db);
1157 		db->db_buf = NULL;
1158 		dbuf_clear_data(db);
1159 		mutex_exit(&db->db_mtx);
1160 	}
1161 	return (abuf);
1162 }
1163 
1164 /*
1165  * Calculate which level n block references the data at the level 0 offset
1166  * provided.
1167  */
1168 uint64_t
1169 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1170 {
1171 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1172 		/*
1173 		 * The level n blkid is equal to the level 0 blkid divided by
1174 		 * the number of level 0s in a level n block.
1175 		 *
1176 		 * The level 0 blkid is offset >> datablkshift =
1177 		 * offset / 2^datablkshift.
1178 		 *
1179 		 * The number of level 0s in a level n is the number of block
1180 		 * pointers in an indirect block, raised to the power of level.
1181 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1182 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1183 		 *
1184 		 * Thus, the level n blkid is: offset /
1185 		 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1186 		 * = offset / 2^(datablkshift + level *
1187 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1188 		 * = offset >> (datablkshift + level *
1189 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1190 		 */
1191 
1192 		const unsigned exp = dn->dn_datablkshift +
1193 		    level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1194 
1195 		if (exp >= 8 * sizeof (offset)) {
1196 			/* This only happens on the highest indirection level */
1197 			ASSERT3U(level, ==, dn->dn_nlevels - 1);
1198 			return (0);
1199 		}
1200 
1201 		ASSERT3U(exp, <, 8 * sizeof (offset));
1202 
1203 		return (offset >> exp);
1204 	} else {
1205 		ASSERT3U(offset, <, dn->dn_datablksz);
1206 		return (0);
1207 	}
1208 }
1209 
1210 /*
1211  * This function is used to lock the parent of the provided dbuf. This should be
1212  * used when modifying or reading db_blkptr.
1213  */
1214 db_lock_type_t
1215 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
1216 {
1217 	enum db_lock_type ret = DLT_NONE;
1218 	if (db->db_parent != NULL) {
1219 		rw_enter(&db->db_parent->db_rwlock, rw);
1220 		ret = DLT_PARENT;
1221 	} else if (dmu_objset_ds(db->db_objset) != NULL) {
1222 		rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1223 		    tag);
1224 		ret = DLT_OBJSET;
1225 	}
1226 	/*
1227 	 * We only return a DLT_NONE lock when it's the top-most indirect block
1228 	 * of the meta-dnode of the MOS.
1229 	 */
1230 	return (ret);
1231 }
1232 
1233 /*
1234  * We need to pass the lock type in because it's possible that the block will
1235  * move from being the topmost indirect block in a dnode (and thus, have no
1236  * parent) to not the top-most via an indirection increase. This would cause a
1237  * panic if we didn't pass the lock type in.
1238  */
1239 void
1240 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag)
1241 {
1242 	if (type == DLT_PARENT)
1243 		rw_exit(&db->db_parent->db_rwlock);
1244 	else if (type == DLT_OBJSET)
1245 		rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1246 }
1247 
1248 static void
1249 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1250     arc_buf_t *buf, void *vdb)
1251 {
1252 	dmu_buf_impl_t *db = vdb;
1253 
1254 	mutex_enter(&db->db_mtx);
1255 	ASSERT3U(db->db_state, ==, DB_READ);
1256 	/*
1257 	 * All reads are synchronous, so we must have a hold on the dbuf
1258 	 */
1259 	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1260 	ASSERT(db->db_buf == NULL);
1261 	ASSERT(db->db.db_data == NULL);
1262 	if (buf == NULL) {
1263 		/* i/o error */
1264 		ASSERT(zio == NULL || zio->io_error != 0);
1265 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1266 		ASSERT3P(db->db_buf, ==, NULL);
1267 		db->db_state = DB_UNCACHED;
1268 		DTRACE_SET_STATE(db, "i/o error");
1269 	} else if (db->db_level == 0 && db->db_freed_in_flight) {
1270 		/* freed in flight */
1271 		ASSERT(zio == NULL || zio->io_error == 0);
1272 		arc_release(buf, db);
1273 		bzero(buf->b_data, db->db.db_size);
1274 		arc_buf_freeze(buf);
1275 		db->db_freed_in_flight = FALSE;
1276 		dbuf_set_data(db, buf);
1277 		db->db_state = DB_CACHED;
1278 		DTRACE_SET_STATE(db, "freed in flight");
1279 	} else {
1280 		/* success */
1281 		ASSERT(zio == NULL || zio->io_error == 0);
1282 		dbuf_set_data(db, buf);
1283 		db->db_state = DB_CACHED;
1284 		DTRACE_SET_STATE(db, "successful read");
1285 	}
1286 	cv_broadcast(&db->db_changed);
1287 	dbuf_rele_and_unlock(db, NULL, B_FALSE);
1288 }
1289 
1290 /*
1291  * Shortcut for performing reads on bonus dbufs.  Returns
1292  * an error if we fail to verify the dnode associated with
1293  * a decrypted block. Otherwise success.
1294  */
1295 static int
1296 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1297 {
1298 	int bonuslen, max_bonuslen, err;
1299 
1300 	err = dbuf_read_verify_dnode_crypt(db, flags);
1301 	if (err)
1302 		return (err);
1303 
1304 	bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1305 	max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1306 	ASSERT(MUTEX_HELD(&db->db_mtx));
1307 	ASSERT(DB_DNODE_HELD(db));
1308 	ASSERT3U(bonuslen, <=, db->db.db_size);
1309 	db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1310 	arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1311 	if (bonuslen < max_bonuslen)
1312 		bzero(db->db.db_data, max_bonuslen);
1313 	if (bonuslen)
1314 		bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
1315 	db->db_state = DB_CACHED;
1316 	DTRACE_SET_STATE(db, "bonus buffer filled");
1317 	return (0);
1318 }
1319 
1320 static void
1321 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn)
1322 {
1323 	blkptr_t *bps = db->db.db_data;
1324 	uint32_t indbs = 1ULL << dn->dn_indblkshift;
1325 	int n_bps = indbs >> SPA_BLKPTRSHIFT;
1326 
1327 	for (int i = 0; i < n_bps; i++) {
1328 		blkptr_t *bp = &bps[i];
1329 
1330 		ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs);
1331 		BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ?
1332 		    dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr));
1333 		BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
1334 		BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1);
1335 		BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
1336 	}
1337 }
1338 
1339 /*
1340  * Handle reads on dbufs that are holes, if necessary.  This function
1341  * requires that the dbuf's mutex is held. Returns success (0) if action
1342  * was taken, ENOENT if no action was taken.
1343  */
1344 static int
1345 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1346 {
1347 	ASSERT(MUTEX_HELD(&db->db_mtx));
1348 
1349 	int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr);
1350 	/*
1351 	 * For level 0 blocks only, if the above check fails:
1352 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1353 	 * processes the delete record and clears the bp while we are waiting
1354 	 * for the dn_mtx (resulting in a "no" from block_freed).
1355 	 */
1356 	if (!is_hole && db->db_level == 0) {
1357 		is_hole = dnode_block_freed(dn, db->db_blkid) ||
1358 		    BP_IS_HOLE(db->db_blkptr);
1359 	}
1360 
1361 	if (is_hole) {
1362 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1363 		bzero(db->db.db_data, db->db.db_size);
1364 
1365 		if (db->db_blkptr != NULL && db->db_level > 0 &&
1366 		    BP_IS_HOLE(db->db_blkptr) &&
1367 		    db->db_blkptr->blk_birth != 0) {
1368 			dbuf_handle_indirect_hole(db, dn);
1369 		}
1370 		db->db_state = DB_CACHED;
1371 		DTRACE_SET_STATE(db, "hole read satisfied");
1372 		return (0);
1373 	}
1374 	return (ENOENT);
1375 }
1376 
1377 /*
1378  * This function ensures that, when doing a decrypting read of a block,
1379  * we make sure we have decrypted the dnode associated with it. We must do
1380  * this so that we ensure we are fully authenticating the checksum-of-MACs
1381  * tree from the root of the objset down to this block. Indirect blocks are
1382  * always verified against their secure checksum-of-MACs assuming that the
1383  * dnode containing them is correct. Now that we are doing a decrypting read,
1384  * we can be sure that the key is loaded and verify that assumption. This is
1385  * especially important considering that we always read encrypted dnode
1386  * blocks as raw data (without verifying their MACs) to start, and
1387  * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1388  */
1389 static int
1390 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1391 {
1392 	int err = 0;
1393 	objset_t *os = db->db_objset;
1394 	arc_buf_t *dnode_abuf;
1395 	dnode_t *dn;
1396 	zbookmark_phys_t zb;
1397 
1398 	ASSERT(MUTEX_HELD(&db->db_mtx));
1399 
1400 	if (!os->os_encrypted || os->os_raw_receive ||
1401 	    (flags & DB_RF_NO_DECRYPT) != 0)
1402 		return (0);
1403 
1404 	DB_DNODE_ENTER(db);
1405 	dn = DB_DNODE(db);
1406 	dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1407 
1408 	if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1409 		DB_DNODE_EXIT(db);
1410 		return (0);
1411 	}
1412 
1413 	SET_BOOKMARK(&zb, dmu_objset_id(os),
1414 	    DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1415 	err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1416 
1417 	/*
1418 	 * An error code of EACCES tells us that the key is still not
1419 	 * available. This is ok if we are only reading authenticated
1420 	 * (and therefore non-encrypted) blocks.
1421 	 */
1422 	if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1423 	    !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1424 	    (db->db_blkid == DMU_BONUS_BLKID &&
1425 	    !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1426 		err = 0;
1427 
1428 	DB_DNODE_EXIT(db);
1429 
1430 	return (err);
1431 }
1432 
1433 /*
1434  * Drops db_mtx and the parent lock specified by dblt and tag before
1435  * returning.
1436  */
1437 static int
1438 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1439     db_lock_type_t dblt, void *tag)
1440 {
1441 	dnode_t *dn;
1442 	zbookmark_phys_t zb;
1443 	uint32_t aflags = ARC_FLAG_NOWAIT;
1444 	int err, zio_flags;
1445 	boolean_t bonus_read;
1446 
1447 	err = zio_flags = 0;
1448 	bonus_read = B_FALSE;
1449 	DB_DNODE_ENTER(db);
1450 	dn = DB_DNODE(db);
1451 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1452 	ASSERT(MUTEX_HELD(&db->db_mtx));
1453 	ASSERT(db->db_state == DB_UNCACHED);
1454 	ASSERT(db->db_buf == NULL);
1455 	ASSERT(db->db_parent == NULL ||
1456 	    RW_LOCK_HELD(&db->db_parent->db_rwlock));
1457 
1458 	if (db->db_blkid == DMU_BONUS_BLKID) {
1459 		err = dbuf_read_bonus(db, dn, flags);
1460 		goto early_unlock;
1461 	}
1462 
1463 	err = dbuf_read_hole(db, dn, flags);
1464 	if (err == 0)
1465 		goto early_unlock;
1466 
1467 	/*
1468 	 * Any attempt to read a redacted block should result in an error. This
1469 	 * will never happen under normal conditions, but can be useful for
1470 	 * debugging purposes.
1471 	 */
1472 	if (BP_IS_REDACTED(db->db_blkptr)) {
1473 		ASSERT(dsl_dataset_feature_is_active(
1474 		    db->db_objset->os_dsl_dataset,
1475 		    SPA_FEATURE_REDACTED_DATASETS));
1476 		err = SET_ERROR(EIO);
1477 		goto early_unlock;
1478 	}
1479 
1480 	SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1481 	    db->db.db_object, db->db_level, db->db_blkid);
1482 
1483 	/*
1484 	 * All bps of an encrypted os should have the encryption bit set.
1485 	 * If this is not true it indicates tampering and we report an error.
1486 	 */
1487 	if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
1488 		spa_log_error(db->db_objset->os_spa, &zb);
1489 		zfs_panic_recover("unencrypted block in encrypted "
1490 		    "object set %llu", dmu_objset_id(db->db_objset));
1491 		err = SET_ERROR(EIO);
1492 		goto early_unlock;
1493 	}
1494 
1495 	err = dbuf_read_verify_dnode_crypt(db, flags);
1496 	if (err != 0)
1497 		goto early_unlock;
1498 
1499 	DB_DNODE_EXIT(db);
1500 
1501 	db->db_state = DB_READ;
1502 	DTRACE_SET_STATE(db, "read issued");
1503 	mutex_exit(&db->db_mtx);
1504 
1505 	if (DBUF_IS_L2CACHEABLE(db))
1506 		aflags |= ARC_FLAG_L2CACHE;
1507 
1508 	dbuf_add_ref(db, NULL);
1509 
1510 	zio_flags = (flags & DB_RF_CANFAIL) ?
1511 	    ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1512 
1513 	if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1514 		zio_flags |= ZIO_FLAG_RAW;
1515 	/*
1516 	 * The zio layer will copy the provided blkptr later, but we need to
1517 	 * do this now so that we can release the parent's rwlock. We have to
1518 	 * do that now so that if dbuf_read_done is called synchronously (on
1519 	 * an l1 cache hit) we don't acquire the db_mtx while holding the
1520 	 * parent's rwlock, which would be a lock ordering violation.
1521 	 */
1522 	blkptr_t bp = *db->db_blkptr;
1523 	dmu_buf_unlock_parent(db, dblt, tag);
1524 	(void) arc_read(zio, db->db_objset->os_spa, &bp,
1525 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1526 	    &aflags, &zb);
1527 	return (err);
1528 early_unlock:
1529 	DB_DNODE_EXIT(db);
1530 	mutex_exit(&db->db_mtx);
1531 	dmu_buf_unlock_parent(db, dblt, tag);
1532 	return (err);
1533 }
1534 
1535 /*
1536  * This is our just-in-time copy function.  It makes a copy of buffers that
1537  * have been modified in a previous transaction group before we access them in
1538  * the current active group.
1539  *
1540  * This function is used in three places: when we are dirtying a buffer for the
1541  * first time in a txg, when we are freeing a range in a dnode that includes
1542  * this buffer, and when we are accessing a buffer which was received compressed
1543  * and later referenced in a WRITE_BYREF record.
1544  *
1545  * Note that when we are called from dbuf_free_range() we do not put a hold on
1546  * the buffer, we just traverse the active dbuf list for the dnode.
1547  */
1548 static void
1549 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1550 {
1551 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1552 
1553 	ASSERT(MUTEX_HELD(&db->db_mtx));
1554 	ASSERT(db->db.db_data != NULL);
1555 	ASSERT(db->db_level == 0);
1556 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1557 
1558 	if (dr == NULL ||
1559 	    (dr->dt.dl.dr_data !=
1560 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1561 		return;
1562 
1563 	/*
1564 	 * If the last dirty record for this dbuf has not yet synced
1565 	 * and its referencing the dbuf data, either:
1566 	 *	reset the reference to point to a new copy,
1567 	 * or (if there a no active holders)
1568 	 *	just null out the current db_data pointer.
1569 	 */
1570 	ASSERT3U(dr->dr_txg, >=, txg - 2);
1571 	if (db->db_blkid == DMU_BONUS_BLKID) {
1572 		dnode_t *dn = DB_DNODE(db);
1573 		int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1574 		dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1575 		arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1576 		bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
1577 	} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1578 		arc_buf_t *buf = dbuf_alloc_arcbuf_from_arcbuf(db, db->db_buf);
1579 		dr->dt.dl.dr_data = buf;
1580 		bcopy(db->db.db_data, buf->b_data, arc_buf_size(buf));
1581 	} else {
1582 		db->db_buf = NULL;
1583 		dbuf_clear_data(db);
1584 	}
1585 }
1586 
1587 int
1588 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1589 {
1590 	int err = 0;
1591 	boolean_t prefetch;
1592 	dnode_t *dn;
1593 
1594 	/*
1595 	 * We don't have to hold the mutex to check db_state because it
1596 	 * can't be freed while we have a hold on the buffer.
1597 	 */
1598 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1599 
1600 	if (db->db_state == DB_NOFILL)
1601 		return (SET_ERROR(EIO));
1602 
1603 	DB_DNODE_ENTER(db);
1604 	dn = DB_DNODE(db);
1605 
1606 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1607 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
1608 	    DBUF_IS_CACHEABLE(db);
1609 
1610 	mutex_enter(&db->db_mtx);
1611 	if (db->db_state == DB_CACHED) {
1612 		spa_t *spa = dn->dn_objset->os_spa;
1613 
1614 		/*
1615 		 * Ensure that this block's dnode has been decrypted if
1616 		 * the caller has requested decrypted data.
1617 		 */
1618 		err = dbuf_read_verify_dnode_crypt(db, flags);
1619 
1620 		/*
1621 		 * If the arc buf is compressed or encrypted and the caller
1622 		 * requested uncompressed data, we need to untransform it
1623 		 * before returning. We also call arc_untransform() on any
1624 		 * unauthenticated blocks, which will verify their MAC if
1625 		 * the key is now available.
1626 		 */
1627 		if (err == 0 && db->db_buf != NULL &&
1628 		    (flags & DB_RF_NO_DECRYPT) == 0 &&
1629 		    (arc_is_encrypted(db->db_buf) ||
1630 		    arc_is_unauthenticated(db->db_buf) ||
1631 		    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1632 			zbookmark_phys_t zb;
1633 
1634 			SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1635 			    db->db.db_object, db->db_level, db->db_blkid);
1636 			dbuf_fix_old_data(db, spa_syncing_txg(spa));
1637 			err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1638 			dbuf_set_data(db, db->db_buf);
1639 		}
1640 		mutex_exit(&db->db_mtx);
1641 		if (err == 0 && prefetch) {
1642 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1643 			    flags & DB_RF_HAVESTRUCT);
1644 		}
1645 		DB_DNODE_EXIT(db);
1646 		DBUF_STAT_BUMP(hash_hits);
1647 	} else if (db->db_state == DB_UNCACHED) {
1648 		spa_t *spa = dn->dn_objset->os_spa;
1649 		boolean_t need_wait = B_FALSE;
1650 
1651 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1652 
1653 		if (zio == NULL &&
1654 		    db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1655 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1656 			need_wait = B_TRUE;
1657 		}
1658 		err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1659 		/*
1660 		 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1661 		 * for us
1662 		 */
1663 		if (!err && prefetch) {
1664 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1665 			    flags & DB_RF_HAVESTRUCT);
1666 		}
1667 
1668 		DB_DNODE_EXIT(db);
1669 		DBUF_STAT_BUMP(hash_misses);
1670 
1671 		/*
1672 		 * If we created a zio_root we must execute it to avoid
1673 		 * leaking it, even if it isn't attached to any work due
1674 		 * to an error in dbuf_read_impl().
1675 		 */
1676 		if (need_wait) {
1677 			if (err == 0)
1678 				err = zio_wait(zio);
1679 			else
1680 				VERIFY0(zio_wait(zio));
1681 		}
1682 	} else {
1683 		/*
1684 		 * Another reader came in while the dbuf was in flight
1685 		 * between UNCACHED and CACHED.  Either a writer will finish
1686 		 * writing the buffer (sending the dbuf to CACHED) or the
1687 		 * first reader's request will reach the read_done callback
1688 		 * and send the dbuf to CACHED.  Otherwise, a failure
1689 		 * occurred and the dbuf went to UNCACHED.
1690 		 */
1691 		mutex_exit(&db->db_mtx);
1692 		if (prefetch) {
1693 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1694 			    flags & DB_RF_HAVESTRUCT);
1695 		}
1696 		DB_DNODE_EXIT(db);
1697 		DBUF_STAT_BUMP(hash_misses);
1698 
1699 		/* Skip the wait per the caller's request. */
1700 		if ((flags & DB_RF_NEVERWAIT) == 0) {
1701 			mutex_enter(&db->db_mtx);
1702 			while (db->db_state == DB_READ ||
1703 			    db->db_state == DB_FILL) {
1704 				ASSERT(db->db_state == DB_READ ||
1705 				    (flags & DB_RF_HAVESTRUCT) == 0);
1706 				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1707 				    db, zio_t *, zio);
1708 				cv_wait(&db->db_changed, &db->db_mtx);
1709 			}
1710 			if (db->db_state == DB_UNCACHED)
1711 				err = SET_ERROR(EIO);
1712 			mutex_exit(&db->db_mtx);
1713 		}
1714 	}
1715 
1716 	return (err);
1717 }
1718 
1719 static void
1720 dbuf_noread(dmu_buf_impl_t *db)
1721 {
1722 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1723 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1724 	mutex_enter(&db->db_mtx);
1725 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1726 		cv_wait(&db->db_changed, &db->db_mtx);
1727 	if (db->db_state == DB_UNCACHED) {
1728 		ASSERT(db->db_buf == NULL);
1729 		ASSERT(db->db.db_data == NULL);
1730 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1731 		db->db_state = DB_FILL;
1732 		DTRACE_SET_STATE(db, "assigning filled buffer");
1733 	} else if (db->db_state == DB_NOFILL) {
1734 		dbuf_clear_data(db);
1735 	} else {
1736 		ASSERT3U(db->db_state, ==, DB_CACHED);
1737 	}
1738 	mutex_exit(&db->db_mtx);
1739 }
1740 
1741 void
1742 dbuf_unoverride(dbuf_dirty_record_t *dr)
1743 {
1744 	dmu_buf_impl_t *db = dr->dr_dbuf;
1745 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1746 	uint64_t txg = dr->dr_txg;
1747 
1748 	ASSERT(MUTEX_HELD(&db->db_mtx));
1749 	/*
1750 	 * This assert is valid because dmu_sync() expects to be called by
1751 	 * a zilog's get_data while holding a range lock.  This call only
1752 	 * comes from dbuf_dirty() callers who must also hold a range lock.
1753 	 */
1754 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1755 	ASSERT(db->db_level == 0);
1756 
1757 	if (db->db_blkid == DMU_BONUS_BLKID ||
1758 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1759 		return;
1760 
1761 	ASSERT(db->db_data_pending != dr);
1762 
1763 	/* free this block */
1764 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1765 		zio_free(db->db_objset->os_spa, txg, bp);
1766 
1767 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1768 	dr->dt.dl.dr_nopwrite = B_FALSE;
1769 	dr->dt.dl.dr_has_raw_params = B_FALSE;
1770 
1771 	/*
1772 	 * Release the already-written buffer, so we leave it in
1773 	 * a consistent dirty state.  Note that all callers are
1774 	 * modifying the buffer, so they will immediately do
1775 	 * another (redundant) arc_release().  Therefore, leave
1776 	 * the buf thawed to save the effort of freezing &
1777 	 * immediately re-thawing it.
1778 	 */
1779 	arc_release(dr->dt.dl.dr_data, db);
1780 }
1781 
1782 /*
1783  * Evict (if its unreferenced) or clear (if its referenced) any level-0
1784  * data blocks in the free range, so that any future readers will find
1785  * empty blocks.
1786  */
1787 void
1788 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1789     dmu_tx_t *tx)
1790 {
1791 	dmu_buf_impl_t *db_search;
1792 	dmu_buf_impl_t *db, *db_next;
1793 	uint64_t txg = tx->tx_txg;
1794 	avl_index_t where;
1795 	dbuf_dirty_record_t *dr;
1796 
1797 	if (end_blkid > dn->dn_maxblkid &&
1798 	    !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1799 		end_blkid = dn->dn_maxblkid;
1800 	dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
1801 
1802 	db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1803 	db_search->db_level = 0;
1804 	db_search->db_blkid = start_blkid;
1805 	db_search->db_state = DB_SEARCH;
1806 
1807 	mutex_enter(&dn->dn_dbufs_mtx);
1808 	db = avl_find(&dn->dn_dbufs, db_search, &where);
1809 	ASSERT3P(db, ==, NULL);
1810 
1811 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1812 
1813 	for (; db != NULL; db = db_next) {
1814 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
1815 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1816 
1817 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
1818 			break;
1819 		}
1820 		ASSERT3U(db->db_blkid, >=, start_blkid);
1821 
1822 		/* found a level 0 buffer in the range */
1823 		mutex_enter(&db->db_mtx);
1824 		if (dbuf_undirty(db, tx)) {
1825 			/* mutex has been dropped and dbuf destroyed */
1826 			continue;
1827 		}
1828 
1829 		if (db->db_state == DB_UNCACHED ||
1830 		    db->db_state == DB_NOFILL ||
1831 		    db->db_state == DB_EVICTING) {
1832 			ASSERT(db->db.db_data == NULL);
1833 			mutex_exit(&db->db_mtx);
1834 			continue;
1835 		}
1836 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1837 			/* will be handled in dbuf_read_done or dbuf_rele */
1838 			db->db_freed_in_flight = TRUE;
1839 			mutex_exit(&db->db_mtx);
1840 			continue;
1841 		}
1842 		if (zfs_refcount_count(&db->db_holds) == 0) {
1843 			ASSERT(db->db_buf);
1844 			dbuf_destroy(db);
1845 			continue;
1846 		}
1847 		/* The dbuf is referenced */
1848 
1849 		dr = list_head(&db->db_dirty_records);
1850 		if (dr != NULL) {
1851 			if (dr->dr_txg == txg) {
1852 				/*
1853 				 * This buffer is "in-use", re-adjust the file
1854 				 * size to reflect that this buffer may
1855 				 * contain new data when we sync.
1856 				 */
1857 				if (db->db_blkid != DMU_SPILL_BLKID &&
1858 				    db->db_blkid > dn->dn_maxblkid)
1859 					dn->dn_maxblkid = db->db_blkid;
1860 				dbuf_unoverride(dr);
1861 			} else {
1862 				/*
1863 				 * This dbuf is not dirty in the open context.
1864 				 * Either uncache it (if its not referenced in
1865 				 * the open context) or reset its contents to
1866 				 * empty.
1867 				 */
1868 				dbuf_fix_old_data(db, txg);
1869 			}
1870 		}
1871 		/* clear the contents if its cached */
1872 		if (db->db_state == DB_CACHED) {
1873 			ASSERT(db->db.db_data != NULL);
1874 			arc_release(db->db_buf, db);
1875 			rw_enter(&db->db_rwlock, RW_WRITER);
1876 			bzero(db->db.db_data, db->db.db_size);
1877 			rw_exit(&db->db_rwlock);
1878 			arc_buf_freeze(db->db_buf);
1879 		}
1880 
1881 		mutex_exit(&db->db_mtx);
1882 	}
1883 
1884 	kmem_free(db_search, sizeof (dmu_buf_impl_t));
1885 	mutex_exit(&dn->dn_dbufs_mtx);
1886 }
1887 
1888 void
1889 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1890 {
1891 	arc_buf_t *buf, *old_buf;
1892 	dbuf_dirty_record_t *dr;
1893 	int osize = db->db.db_size;
1894 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1895 	dnode_t *dn;
1896 
1897 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1898 
1899 	DB_DNODE_ENTER(db);
1900 	dn = DB_DNODE(db);
1901 
1902 	/*
1903 	 * XXX we should be doing a dbuf_read, checking the return
1904 	 * value and returning that up to our callers
1905 	 */
1906 	dmu_buf_will_dirty(&db->db, tx);
1907 
1908 	/* create the data buffer for the new block */
1909 	buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
1910 
1911 	/* copy old block data to the new block */
1912 	old_buf = db->db_buf;
1913 	bcopy(old_buf->b_data, buf->b_data, MIN(osize, size));
1914 	/* zero the remainder */
1915 	if (size > osize)
1916 		bzero((uint8_t *)buf->b_data + osize, size - osize);
1917 
1918 	mutex_enter(&db->db_mtx);
1919 	dbuf_set_data(db, buf);
1920 	arc_buf_destroy(old_buf, db);
1921 	db->db.db_size = size;
1922 
1923 	dr = list_head(&db->db_dirty_records);
1924 	/* dirty record added by dmu_buf_will_dirty() */
1925 	VERIFY(dr != NULL);
1926 	if (db->db_level == 0)
1927 		dr->dt.dl.dr_data = buf;
1928 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
1929 	ASSERT3U(dr->dr_accounted, ==, osize);
1930 	dr->dr_accounted = size;
1931 	mutex_exit(&db->db_mtx);
1932 
1933 	dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
1934 	DB_DNODE_EXIT(db);
1935 }
1936 
1937 void
1938 dbuf_release_bp(dmu_buf_impl_t *db)
1939 {
1940 	objset_t *os __maybe_unused = db->db_objset;
1941 
1942 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1943 	ASSERT(arc_released(os->os_phys_buf) ||
1944 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1945 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1946 
1947 	(void) arc_release(db->db_buf, db);
1948 }
1949 
1950 /*
1951  * We already have a dirty record for this TXG, and we are being
1952  * dirtied again.
1953  */
1954 static void
1955 dbuf_redirty(dbuf_dirty_record_t *dr)
1956 {
1957 	dmu_buf_impl_t *db = dr->dr_dbuf;
1958 
1959 	ASSERT(MUTEX_HELD(&db->db_mtx));
1960 
1961 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1962 		/*
1963 		 * If this buffer has already been written out,
1964 		 * we now need to reset its state.
1965 		 */
1966 		dbuf_unoverride(dr);
1967 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1968 		    db->db_state != DB_NOFILL) {
1969 			/* Already released on initial dirty, so just thaw. */
1970 			ASSERT(arc_released(db->db_buf));
1971 			arc_buf_thaw(db->db_buf);
1972 		}
1973 	}
1974 }
1975 
1976 dbuf_dirty_record_t *
1977 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
1978 {
1979 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
1980 	IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
1981 	dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
1982 	ASSERT(dn->dn_maxblkid >= blkid);
1983 
1984 	dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
1985 	list_link_init(&dr->dr_dirty_node);
1986 	list_link_init(&dr->dr_dbuf_node);
1987 	dr->dr_dnode = dn;
1988 	dr->dr_txg = tx->tx_txg;
1989 	dr->dt.dll.dr_blkid = blkid;
1990 	dr->dr_accounted = dn->dn_datablksz;
1991 
1992 	/*
1993 	 * There should not be any dbuf for the block that we're dirtying.
1994 	 * Otherwise the buffer contents could be inconsistent between the
1995 	 * dbuf and the lightweight dirty record.
1996 	 */
1997 	ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid));
1998 
1999 	mutex_enter(&dn->dn_mtx);
2000 	int txgoff = tx->tx_txg & TXG_MASK;
2001 	if (dn->dn_free_ranges[txgoff] != NULL) {
2002 		range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2003 	}
2004 
2005 	if (dn->dn_nlevels == 1) {
2006 		ASSERT3U(blkid, <, dn->dn_nblkptr);
2007 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2008 		mutex_exit(&dn->dn_mtx);
2009 		rw_exit(&dn->dn_struct_rwlock);
2010 		dnode_setdirty(dn, tx);
2011 	} else {
2012 		mutex_exit(&dn->dn_mtx);
2013 
2014 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2015 		dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2016 		    1, blkid >> epbs, FTAG);
2017 		rw_exit(&dn->dn_struct_rwlock);
2018 		if (parent_db == NULL) {
2019 			kmem_free(dr, sizeof (*dr));
2020 			return (NULL);
2021 		}
2022 		int err = dbuf_read(parent_db, NULL,
2023 		    (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2024 		if (err != 0) {
2025 			dbuf_rele(parent_db, FTAG);
2026 			kmem_free(dr, sizeof (*dr));
2027 			return (NULL);
2028 		}
2029 
2030 		dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2031 		dbuf_rele(parent_db, FTAG);
2032 		mutex_enter(&parent_dr->dt.di.dr_mtx);
2033 		ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2034 		list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2035 		mutex_exit(&parent_dr->dt.di.dr_mtx);
2036 		dr->dr_parent = parent_dr;
2037 	}
2038 
2039 	dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2040 
2041 	return (dr);
2042 }
2043 
2044 dbuf_dirty_record_t *
2045 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2046 {
2047 	dnode_t *dn;
2048 	objset_t *os;
2049 	dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2050 	int txgoff = tx->tx_txg & TXG_MASK;
2051 	boolean_t drop_struct_rwlock = B_FALSE;
2052 
2053 	ASSERT(tx->tx_txg != 0);
2054 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2055 	DMU_TX_DIRTY_BUF(tx, db);
2056 
2057 	DB_DNODE_ENTER(db);
2058 	dn = DB_DNODE(db);
2059 	/*
2060 	 * Shouldn't dirty a regular buffer in syncing context.  Private
2061 	 * objects may be dirtied in syncing context, but only if they
2062 	 * were already pre-dirtied in open context.
2063 	 */
2064 #ifdef ZFS_DEBUG
2065 	if (dn->dn_objset->os_dsl_dataset != NULL) {
2066 		rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2067 		    RW_READER, FTAG);
2068 	}
2069 	ASSERT(!dmu_tx_is_syncing(tx) ||
2070 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2071 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2072 	    dn->dn_objset->os_dsl_dataset == NULL);
2073 	if (dn->dn_objset->os_dsl_dataset != NULL)
2074 		rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2075 #endif
2076 	/*
2077 	 * We make this assert for private objects as well, but after we
2078 	 * check if we're already dirty.  They are allowed to re-dirty
2079 	 * in syncing context.
2080 	 */
2081 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2082 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2083 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2084 
2085 	mutex_enter(&db->db_mtx);
2086 	/*
2087 	 * XXX make this true for indirects too?  The problem is that
2088 	 * transactions created with dmu_tx_create_assigned() from
2089 	 * syncing context don't bother holding ahead.
2090 	 */
2091 	ASSERT(db->db_level != 0 ||
2092 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2093 	    db->db_state == DB_NOFILL);
2094 
2095 	mutex_enter(&dn->dn_mtx);
2096 	dnode_set_dirtyctx(dn, tx, db);
2097 	if (tx->tx_txg > dn->dn_dirty_txg)
2098 		dn->dn_dirty_txg = tx->tx_txg;
2099 	mutex_exit(&dn->dn_mtx);
2100 
2101 	if (db->db_blkid == DMU_SPILL_BLKID)
2102 		dn->dn_have_spill = B_TRUE;
2103 
2104 	/*
2105 	 * If this buffer is already dirty, we're done.
2106 	 */
2107 	dr_head = list_head(&db->db_dirty_records);
2108 	ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2109 	    db->db.db_object == DMU_META_DNODE_OBJECT);
2110 	dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2111 	if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2112 		DB_DNODE_EXIT(db);
2113 
2114 		dbuf_redirty(dr_next);
2115 		mutex_exit(&db->db_mtx);
2116 		return (dr_next);
2117 	}
2118 
2119 	/*
2120 	 * Only valid if not already dirty.
2121 	 */
2122 	ASSERT(dn->dn_object == 0 ||
2123 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2124 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2125 
2126 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
2127 
2128 	/*
2129 	 * We should only be dirtying in syncing context if it's the
2130 	 * mos or we're initializing the os or it's a special object.
2131 	 * However, we are allowed to dirty in syncing context provided
2132 	 * we already dirtied it in open context.  Hence we must make
2133 	 * this assertion only if we're not already dirty.
2134 	 */
2135 	os = dn->dn_objset;
2136 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2137 #ifdef ZFS_DEBUG
2138 	if (dn->dn_objset->os_dsl_dataset != NULL)
2139 		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2140 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2141 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2142 	if (dn->dn_objset->os_dsl_dataset != NULL)
2143 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2144 #endif
2145 	ASSERT(db->db.db_size != 0);
2146 
2147 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2148 
2149 	if (db->db_blkid != DMU_BONUS_BLKID) {
2150 		dmu_objset_willuse_space(os, db->db.db_size, tx);
2151 	}
2152 
2153 	/*
2154 	 * If this buffer is dirty in an old transaction group we need
2155 	 * to make a copy of it so that the changes we make in this
2156 	 * transaction group won't leak out when we sync the older txg.
2157 	 */
2158 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2159 	list_link_init(&dr->dr_dirty_node);
2160 	list_link_init(&dr->dr_dbuf_node);
2161 	dr->dr_dnode = dn;
2162 	if (db->db_level == 0) {
2163 		void *data_old = db->db_buf;
2164 
2165 		if (db->db_state != DB_NOFILL) {
2166 			if (db->db_blkid == DMU_BONUS_BLKID) {
2167 				dbuf_fix_old_data(db, tx->tx_txg);
2168 				data_old = db->db.db_data;
2169 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2170 				/*
2171 				 * Release the data buffer from the cache so
2172 				 * that we can modify it without impacting
2173 				 * possible other users of this cached data
2174 				 * block.  Note that indirect blocks and
2175 				 * private objects are not released until the
2176 				 * syncing state (since they are only modified
2177 				 * then).
2178 				 */
2179 				arc_release(db->db_buf, db);
2180 				dbuf_fix_old_data(db, tx->tx_txg);
2181 				data_old = db->db_buf;
2182 			}
2183 			ASSERT(data_old != NULL);
2184 		}
2185 		dr->dt.dl.dr_data = data_old;
2186 	} else {
2187 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2188 		list_create(&dr->dt.di.dr_children,
2189 		    sizeof (dbuf_dirty_record_t),
2190 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
2191 	}
2192 	if (db->db_blkid != DMU_BONUS_BLKID)
2193 		dr->dr_accounted = db->db.db_size;
2194 	dr->dr_dbuf = db;
2195 	dr->dr_txg = tx->tx_txg;
2196 	list_insert_before(&db->db_dirty_records, dr_next, dr);
2197 
2198 	/*
2199 	 * We could have been freed_in_flight between the dbuf_noread
2200 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
2201 	 * happened after the free.
2202 	 */
2203 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2204 	    db->db_blkid != DMU_SPILL_BLKID) {
2205 		mutex_enter(&dn->dn_mtx);
2206 		if (dn->dn_free_ranges[txgoff] != NULL) {
2207 			range_tree_clear(dn->dn_free_ranges[txgoff],
2208 			    db->db_blkid, 1);
2209 		}
2210 		mutex_exit(&dn->dn_mtx);
2211 		db->db_freed_in_flight = FALSE;
2212 	}
2213 
2214 	/*
2215 	 * This buffer is now part of this txg
2216 	 */
2217 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2218 	db->db_dirtycnt += 1;
2219 	ASSERT3U(db->db_dirtycnt, <=, 3);
2220 
2221 	mutex_exit(&db->db_mtx);
2222 
2223 	if (db->db_blkid == DMU_BONUS_BLKID ||
2224 	    db->db_blkid == DMU_SPILL_BLKID) {
2225 		mutex_enter(&dn->dn_mtx);
2226 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2227 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2228 		mutex_exit(&dn->dn_mtx);
2229 		dnode_setdirty(dn, tx);
2230 		DB_DNODE_EXIT(db);
2231 		return (dr);
2232 	}
2233 
2234 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2235 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2236 		drop_struct_rwlock = B_TRUE;
2237 	}
2238 
2239 	/*
2240 	 * If we are overwriting a dedup BP, then unless it is snapshotted,
2241 	 * when we get to syncing context we will need to decrement its
2242 	 * refcount in the DDT.  Prefetch the relevant DDT block so that
2243 	 * syncing context won't have to wait for the i/o.
2244 	 */
2245 	if (db->db_blkptr != NULL) {
2246 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2247 		ddt_prefetch(os->os_spa, db->db_blkptr);
2248 		dmu_buf_unlock_parent(db, dblt, FTAG);
2249 	}
2250 
2251 	/*
2252 	 * We need to hold the dn_struct_rwlock to make this assertion,
2253 	 * because it protects dn_phys / dn_next_nlevels from changing.
2254 	 */
2255 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2256 	    dn->dn_phys->dn_nlevels > db->db_level ||
2257 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
2258 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2259 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2260 
2261 
2262 	if (db->db_level == 0) {
2263 		ASSERT(!db->db_objset->os_raw_receive ||
2264 		    dn->dn_maxblkid >= db->db_blkid);
2265 		dnode_new_blkid(dn, db->db_blkid, tx,
2266 		    drop_struct_rwlock, B_FALSE);
2267 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
2268 	}
2269 
2270 	if (db->db_level+1 < dn->dn_nlevels) {
2271 		dmu_buf_impl_t *parent = db->db_parent;
2272 		dbuf_dirty_record_t *di;
2273 		int parent_held = FALSE;
2274 
2275 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2276 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2277 			parent = dbuf_hold_level(dn, db->db_level + 1,
2278 			    db->db_blkid >> epbs, FTAG);
2279 			ASSERT(parent != NULL);
2280 			parent_held = TRUE;
2281 		}
2282 		if (drop_struct_rwlock)
2283 			rw_exit(&dn->dn_struct_rwlock);
2284 		ASSERT3U(db->db_level + 1, ==, parent->db_level);
2285 		di = dbuf_dirty(parent, tx);
2286 		if (parent_held)
2287 			dbuf_rele(parent, FTAG);
2288 
2289 		mutex_enter(&db->db_mtx);
2290 		/*
2291 		 * Since we've dropped the mutex, it's possible that
2292 		 * dbuf_undirty() might have changed this out from under us.
2293 		 */
2294 		if (list_head(&db->db_dirty_records) == dr ||
2295 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
2296 			mutex_enter(&di->dt.di.dr_mtx);
2297 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2298 			ASSERT(!list_link_active(&dr->dr_dirty_node));
2299 			list_insert_tail(&di->dt.di.dr_children, dr);
2300 			mutex_exit(&di->dt.di.dr_mtx);
2301 			dr->dr_parent = di;
2302 		}
2303 		mutex_exit(&db->db_mtx);
2304 	} else {
2305 		ASSERT(db->db_level + 1 == dn->dn_nlevels);
2306 		ASSERT(db->db_blkid < dn->dn_nblkptr);
2307 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2308 		mutex_enter(&dn->dn_mtx);
2309 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2310 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2311 		mutex_exit(&dn->dn_mtx);
2312 		if (drop_struct_rwlock)
2313 			rw_exit(&dn->dn_struct_rwlock);
2314 	}
2315 
2316 	dnode_setdirty(dn, tx);
2317 	DB_DNODE_EXIT(db);
2318 	return (dr);
2319 }
2320 
2321 static void
2322 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2323 {
2324 	dmu_buf_impl_t *db = dr->dr_dbuf;
2325 
2326 	if (dr->dt.dl.dr_data != db->db.db_data) {
2327 		struct dnode *dn = dr->dr_dnode;
2328 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2329 
2330 		kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2331 		arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2332 	}
2333 	db->db_data_pending = NULL;
2334 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2335 	list_remove(&db->db_dirty_records, dr);
2336 	if (dr->dr_dbuf->db_level != 0) {
2337 		mutex_destroy(&dr->dt.di.dr_mtx);
2338 		list_destroy(&dr->dt.di.dr_children);
2339 	}
2340 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2341 	ASSERT3U(db->db_dirtycnt, >, 0);
2342 	db->db_dirtycnt -= 1;
2343 }
2344 
2345 /*
2346  * Undirty a buffer in the transaction group referenced by the given
2347  * transaction.  Return whether this evicted the dbuf.
2348  */
2349 static boolean_t
2350 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2351 {
2352 	uint64_t txg = tx->tx_txg;
2353 
2354 	ASSERT(txg != 0);
2355 
2356 	/*
2357 	 * Due to our use of dn_nlevels below, this can only be called
2358 	 * in open context, unless we are operating on the MOS.
2359 	 * From syncing context, dn_nlevels may be different from the
2360 	 * dn_nlevels used when dbuf was dirtied.
2361 	 */
2362 	ASSERT(db->db_objset ==
2363 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2364 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2365 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2366 	ASSERT0(db->db_level);
2367 	ASSERT(MUTEX_HELD(&db->db_mtx));
2368 
2369 	/*
2370 	 * If this buffer is not dirty, we're done.
2371 	 */
2372 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2373 	if (dr == NULL)
2374 		return (B_FALSE);
2375 	ASSERT(dr->dr_dbuf == db);
2376 
2377 	dnode_t *dn = dr->dr_dnode;
2378 
2379 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2380 
2381 	ASSERT(db->db.db_size != 0);
2382 
2383 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2384 	    dr->dr_accounted, txg);
2385 
2386 	list_remove(&db->db_dirty_records, dr);
2387 
2388 	/*
2389 	 * Note that there are three places in dbuf_dirty()
2390 	 * where this dirty record may be put on a list.
2391 	 * Make sure to do a list_remove corresponding to
2392 	 * every one of those list_insert calls.
2393 	 */
2394 	if (dr->dr_parent) {
2395 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2396 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2397 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2398 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
2399 	    db->db_level + 1 == dn->dn_nlevels) {
2400 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2401 		mutex_enter(&dn->dn_mtx);
2402 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2403 		mutex_exit(&dn->dn_mtx);
2404 	}
2405 
2406 	if (db->db_state != DB_NOFILL) {
2407 		dbuf_unoverride(dr);
2408 
2409 		ASSERT(db->db_buf != NULL);
2410 		ASSERT(dr->dt.dl.dr_data != NULL);
2411 		if (dr->dt.dl.dr_data != db->db_buf)
2412 			arc_buf_destroy(dr->dt.dl.dr_data, db);
2413 	}
2414 
2415 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2416 
2417 	ASSERT(db->db_dirtycnt > 0);
2418 	db->db_dirtycnt -= 1;
2419 
2420 	if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2421 		ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
2422 		dbuf_destroy(db);
2423 		return (B_TRUE);
2424 	}
2425 
2426 	return (B_FALSE);
2427 }
2428 
2429 static void
2430 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2431 {
2432 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2433 
2434 	ASSERT(tx->tx_txg != 0);
2435 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2436 
2437 	/*
2438 	 * Quick check for dirtiness.  For already dirty blocks, this
2439 	 * reduces runtime of this function by >90%, and overall performance
2440 	 * by 50% for some workloads (e.g. file deletion with indirect blocks
2441 	 * cached).
2442 	 */
2443 	mutex_enter(&db->db_mtx);
2444 
2445 	if (db->db_state == DB_CACHED) {
2446 		dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2447 		/*
2448 		 * It's possible that it is already dirty but not cached,
2449 		 * because there are some calls to dbuf_dirty() that don't
2450 		 * go through dmu_buf_will_dirty().
2451 		 */
2452 		if (dr != NULL) {
2453 			/* This dbuf is already dirty and cached. */
2454 			dbuf_redirty(dr);
2455 			mutex_exit(&db->db_mtx);
2456 			return;
2457 		}
2458 	}
2459 	mutex_exit(&db->db_mtx);
2460 
2461 	DB_DNODE_ENTER(db);
2462 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2463 		flags |= DB_RF_HAVESTRUCT;
2464 	DB_DNODE_EXIT(db);
2465 	(void) dbuf_read(db, NULL, flags);
2466 	(void) dbuf_dirty(db, tx);
2467 }
2468 
2469 void
2470 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2471 {
2472 	dmu_buf_will_dirty_impl(db_fake,
2473 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2474 }
2475 
2476 boolean_t
2477 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2478 {
2479 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2480 	dbuf_dirty_record_t *dr;
2481 
2482 	mutex_enter(&db->db_mtx);
2483 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2484 	mutex_exit(&db->db_mtx);
2485 	return (dr != NULL);
2486 }
2487 
2488 void
2489 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2490 {
2491 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2492 
2493 	db->db_state = DB_NOFILL;
2494 	DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2495 	dmu_buf_will_fill(db_fake, tx);
2496 }
2497 
2498 void
2499 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2500 {
2501 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2502 
2503 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2504 	ASSERT(tx->tx_txg != 0);
2505 	ASSERT(db->db_level == 0);
2506 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2507 
2508 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2509 	    dmu_tx_private_ok(tx));
2510 
2511 	dbuf_noread(db);
2512 	(void) dbuf_dirty(db, tx);
2513 }
2514 
2515 /*
2516  * This function is effectively the same as dmu_buf_will_dirty(), but
2517  * indicates the caller expects raw encrypted data in the db, and provides
2518  * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2519  * blkptr_t when this dbuf is written.  This is only used for blocks of
2520  * dnodes, during raw receive.
2521  */
2522 void
2523 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2524     const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2525 {
2526 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2527 	dbuf_dirty_record_t *dr;
2528 
2529 	/*
2530 	 * dr_has_raw_params is only processed for blocks of dnodes
2531 	 * (see dbuf_sync_dnode_leaf_crypt()).
2532 	 */
2533 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2534 	ASSERT3U(db->db_level, ==, 0);
2535 	ASSERT(db->db_objset->os_raw_receive);
2536 
2537 	dmu_buf_will_dirty_impl(db_fake,
2538 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2539 
2540 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2541 
2542 	ASSERT3P(dr, !=, NULL);
2543 
2544 	dr->dt.dl.dr_has_raw_params = B_TRUE;
2545 	dr->dt.dl.dr_byteorder = byteorder;
2546 	bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
2547 	bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
2548 	bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
2549 }
2550 
2551 static void
2552 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2553 {
2554 	struct dirty_leaf *dl;
2555 	dbuf_dirty_record_t *dr;
2556 
2557 	dr = list_head(&db->db_dirty_records);
2558 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2559 	dl = &dr->dt.dl;
2560 	dl->dr_overridden_by = *bp;
2561 	dl->dr_override_state = DR_OVERRIDDEN;
2562 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2563 }
2564 
2565 /* ARGSUSED */
2566 void
2567 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2568 {
2569 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2570 	dbuf_states_t old_state;
2571 	mutex_enter(&db->db_mtx);
2572 	DBUF_VERIFY(db);
2573 
2574 	old_state = db->db_state;
2575 	db->db_state = DB_CACHED;
2576 	if (old_state == DB_FILL) {
2577 		if (db->db_level == 0 && db->db_freed_in_flight) {
2578 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2579 			/* we were freed while filling */
2580 			/* XXX dbuf_undirty? */
2581 			bzero(db->db.db_data, db->db.db_size);
2582 			db->db_freed_in_flight = FALSE;
2583 			DTRACE_SET_STATE(db,
2584 			    "fill done handling freed in flight");
2585 		} else {
2586 			DTRACE_SET_STATE(db, "fill done");
2587 		}
2588 		cv_broadcast(&db->db_changed);
2589 	}
2590 	mutex_exit(&db->db_mtx);
2591 }
2592 
2593 void
2594 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2595     bp_embedded_type_t etype, enum zio_compress comp,
2596     int uncompressed_size, int compressed_size, int byteorder,
2597     dmu_tx_t *tx)
2598 {
2599 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2600 	struct dirty_leaf *dl;
2601 	dmu_object_type_t type;
2602 	dbuf_dirty_record_t *dr;
2603 
2604 	if (etype == BP_EMBEDDED_TYPE_DATA) {
2605 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2606 		    SPA_FEATURE_EMBEDDED_DATA));
2607 	}
2608 
2609 	DB_DNODE_ENTER(db);
2610 	type = DB_DNODE(db)->dn_type;
2611 	DB_DNODE_EXIT(db);
2612 
2613 	ASSERT0(db->db_level);
2614 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2615 
2616 	dmu_buf_will_not_fill(dbuf, tx);
2617 
2618 	dr = list_head(&db->db_dirty_records);
2619 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2620 	dl = &dr->dt.dl;
2621 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
2622 	    data, comp, uncompressed_size, compressed_size);
2623 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2624 	BP_SET_TYPE(&dl->dr_overridden_by, type);
2625 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2626 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2627 
2628 	dl->dr_override_state = DR_OVERRIDDEN;
2629 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2630 }
2631 
2632 void
2633 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2634 {
2635 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2636 	dmu_object_type_t type;
2637 	ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2638 	    SPA_FEATURE_REDACTED_DATASETS));
2639 
2640 	DB_DNODE_ENTER(db);
2641 	type = DB_DNODE(db)->dn_type;
2642 	DB_DNODE_EXIT(db);
2643 
2644 	ASSERT0(db->db_level);
2645 	dmu_buf_will_not_fill(dbuf, tx);
2646 
2647 	blkptr_t bp = { { { {0} } } };
2648 	BP_SET_TYPE(&bp, type);
2649 	BP_SET_LEVEL(&bp, 0);
2650 	BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2651 	BP_SET_REDACTED(&bp);
2652 	BPE_SET_LSIZE(&bp, dbuf->db_size);
2653 
2654 	dbuf_override_impl(db, &bp, tx);
2655 }
2656 
2657 /*
2658  * Directly assign a provided arc buf to a given dbuf if it's not referenced
2659  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2660  */
2661 void
2662 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2663 {
2664 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2665 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2666 	ASSERT(db->db_level == 0);
2667 	ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2668 	ASSERT(buf != NULL);
2669 	ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2670 	ASSERT(tx->tx_txg != 0);
2671 
2672 	arc_return_buf(buf, db);
2673 	ASSERT(arc_released(buf));
2674 
2675 	mutex_enter(&db->db_mtx);
2676 
2677 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
2678 		cv_wait(&db->db_changed, &db->db_mtx);
2679 
2680 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2681 
2682 	if (db->db_state == DB_CACHED &&
2683 	    zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2684 		/*
2685 		 * In practice, we will never have a case where we have an
2686 		 * encrypted arc buffer while additional holds exist on the
2687 		 * dbuf. We don't handle this here so we simply assert that
2688 		 * fact instead.
2689 		 */
2690 		ASSERT(!arc_is_encrypted(buf));
2691 		mutex_exit(&db->db_mtx);
2692 		(void) dbuf_dirty(db, tx);
2693 		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
2694 		arc_buf_destroy(buf, db);
2695 		return;
2696 	}
2697 
2698 	if (db->db_state == DB_CACHED) {
2699 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2700 
2701 		ASSERT(db->db_buf != NULL);
2702 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2703 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
2704 
2705 			if (!arc_released(db->db_buf)) {
2706 				ASSERT(dr->dt.dl.dr_override_state ==
2707 				    DR_OVERRIDDEN);
2708 				arc_release(db->db_buf, db);
2709 			}
2710 			dr->dt.dl.dr_data = buf;
2711 			arc_buf_destroy(db->db_buf, db);
2712 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2713 			arc_release(db->db_buf, db);
2714 			arc_buf_destroy(db->db_buf, db);
2715 		}
2716 		db->db_buf = NULL;
2717 	}
2718 	ASSERT(db->db_buf == NULL);
2719 	dbuf_set_data(db, buf);
2720 	db->db_state = DB_FILL;
2721 	DTRACE_SET_STATE(db, "filling assigned arcbuf");
2722 	mutex_exit(&db->db_mtx);
2723 	(void) dbuf_dirty(db, tx);
2724 	dmu_buf_fill_done(&db->db, tx);
2725 }
2726 
2727 void
2728 dbuf_destroy(dmu_buf_impl_t *db)
2729 {
2730 	dnode_t *dn;
2731 	dmu_buf_impl_t *parent = db->db_parent;
2732 	dmu_buf_impl_t *dndb;
2733 
2734 	ASSERT(MUTEX_HELD(&db->db_mtx));
2735 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
2736 
2737 	if (db->db_buf != NULL) {
2738 		arc_buf_destroy(db->db_buf, db);
2739 		db->db_buf = NULL;
2740 	}
2741 
2742 	if (db->db_blkid == DMU_BONUS_BLKID) {
2743 		int slots = DB_DNODE(db)->dn_num_slots;
2744 		int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2745 		if (db->db.db_data != NULL) {
2746 			kmem_free(db->db.db_data, bonuslen);
2747 			arc_space_return(bonuslen, ARC_SPACE_BONUS);
2748 			db->db_state = DB_UNCACHED;
2749 			DTRACE_SET_STATE(db, "buffer cleared");
2750 		}
2751 	}
2752 
2753 	dbuf_clear_data(db);
2754 
2755 	if (multilist_link_active(&db->db_cache_link)) {
2756 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2757 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
2758 
2759 		multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
2760 		(void) zfs_refcount_remove_many(
2761 		    &dbuf_caches[db->db_caching_status].size,
2762 		    db->db.db_size, db);
2763 
2764 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2765 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
2766 		} else {
2767 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2768 			DBUF_STAT_BUMPDOWN(cache_count);
2769 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2770 			    db->db.db_size);
2771 		}
2772 		db->db_caching_status = DB_NO_CACHE;
2773 	}
2774 
2775 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2776 	ASSERT(db->db_data_pending == NULL);
2777 	ASSERT(list_is_empty(&db->db_dirty_records));
2778 
2779 	db->db_state = DB_EVICTING;
2780 	DTRACE_SET_STATE(db, "buffer eviction started");
2781 	db->db_blkptr = NULL;
2782 
2783 	/*
2784 	 * Now that db_state is DB_EVICTING, nobody else can find this via
2785 	 * the hash table.  We can now drop db_mtx, which allows us to
2786 	 * acquire the dn_dbufs_mtx.
2787 	 */
2788 	mutex_exit(&db->db_mtx);
2789 
2790 	DB_DNODE_ENTER(db);
2791 	dn = DB_DNODE(db);
2792 	dndb = dn->dn_dbuf;
2793 	if (db->db_blkid != DMU_BONUS_BLKID) {
2794 		boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2795 		if (needlock)
2796 			mutex_enter_nested(&dn->dn_dbufs_mtx,
2797 			    NESTED_SINGLE);
2798 		avl_remove(&dn->dn_dbufs, db);
2799 		membar_producer();
2800 		DB_DNODE_EXIT(db);
2801 		if (needlock)
2802 			mutex_exit(&dn->dn_dbufs_mtx);
2803 		/*
2804 		 * Decrementing the dbuf count means that the hold corresponding
2805 		 * to the removed dbuf is no longer discounted in dnode_move(),
2806 		 * so the dnode cannot be moved until after we release the hold.
2807 		 * The membar_producer() ensures visibility of the decremented
2808 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2809 		 * release any lock.
2810 		 */
2811 		mutex_enter(&dn->dn_mtx);
2812 		dnode_rele_and_unlock(dn, db, B_TRUE);
2813 		db->db_dnode_handle = NULL;
2814 
2815 		dbuf_hash_remove(db);
2816 	} else {
2817 		DB_DNODE_EXIT(db);
2818 	}
2819 
2820 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
2821 
2822 	db->db_parent = NULL;
2823 
2824 	ASSERT(db->db_buf == NULL);
2825 	ASSERT(db->db.db_data == NULL);
2826 	ASSERT(db->db_hash_next == NULL);
2827 	ASSERT(db->db_blkptr == NULL);
2828 	ASSERT(db->db_data_pending == NULL);
2829 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
2830 	ASSERT(!multilist_link_active(&db->db_cache_link));
2831 
2832 	kmem_cache_free(dbuf_kmem_cache, db);
2833 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2834 
2835 	/*
2836 	 * If this dbuf is referenced from an indirect dbuf,
2837 	 * decrement the ref count on the indirect dbuf.
2838 	 */
2839 	if (parent && parent != dndb) {
2840 		mutex_enter(&parent->db_mtx);
2841 		dbuf_rele_and_unlock(parent, db, B_TRUE);
2842 	}
2843 }
2844 
2845 /*
2846  * Note: While bpp will always be updated if the function returns success,
2847  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
2848  * this happens when the dnode is the meta-dnode, or {user|group|project}used
2849  * object.
2850  */
2851 __attribute__((always_inline))
2852 static inline int
2853 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
2854     dmu_buf_impl_t **parentp, blkptr_t **bpp)
2855 {
2856 	*parentp = NULL;
2857 	*bpp = NULL;
2858 
2859 	ASSERT(blkid != DMU_BONUS_BLKID);
2860 
2861 	if (blkid == DMU_SPILL_BLKID) {
2862 		mutex_enter(&dn->dn_mtx);
2863 		if (dn->dn_have_spill &&
2864 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
2865 			*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
2866 		else
2867 			*bpp = NULL;
2868 		dbuf_add_ref(dn->dn_dbuf, NULL);
2869 		*parentp = dn->dn_dbuf;
2870 		mutex_exit(&dn->dn_mtx);
2871 		return (0);
2872 	}
2873 
2874 	int nlevels =
2875 	    (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
2876 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2877 
2878 	ASSERT3U(level * epbs, <, 64);
2879 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2880 	/*
2881 	 * This assertion shouldn't trip as long as the max indirect block size
2882 	 * is less than 1M.  The reason for this is that up to that point,
2883 	 * the number of levels required to address an entire object with blocks
2884 	 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.	 In
2885 	 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
2886 	 * (i.e. we can address the entire object), objects will all use at most
2887 	 * N-1 levels and the assertion won't overflow.	 However, once epbs is
2888 	 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
2889 	 * enough to address an entire object, so objects will have 5 levels,
2890 	 * but then this assertion will overflow.
2891 	 *
2892 	 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
2893 	 * need to redo this logic to handle overflows.
2894 	 */
2895 	ASSERT(level >= nlevels ||
2896 	    ((nlevels - level - 1) * epbs) +
2897 	    highbit64(dn->dn_phys->dn_nblkptr) <= 64);
2898 	if (level >= nlevels ||
2899 	    blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
2900 	    ((nlevels - level - 1) * epbs)) ||
2901 	    (fail_sparse &&
2902 	    blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
2903 		/* the buffer has no parent yet */
2904 		return (SET_ERROR(ENOENT));
2905 	} else if (level < nlevels-1) {
2906 		/* this block is referenced from an indirect block */
2907 		int err;
2908 
2909 		err = dbuf_hold_impl(dn, level + 1,
2910 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
2911 
2912 		if (err)
2913 			return (err);
2914 		err = dbuf_read(*parentp, NULL,
2915 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2916 		if (err) {
2917 			dbuf_rele(*parentp, NULL);
2918 			*parentp = NULL;
2919 			return (err);
2920 		}
2921 		rw_enter(&(*parentp)->db_rwlock, RW_READER);
2922 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
2923 		    (blkid & ((1ULL << epbs) - 1));
2924 		if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
2925 			ASSERT(BP_IS_HOLE(*bpp));
2926 		rw_exit(&(*parentp)->db_rwlock);
2927 		return (0);
2928 	} else {
2929 		/* the block is referenced from the dnode */
2930 		ASSERT3U(level, ==, nlevels-1);
2931 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
2932 		    blkid < dn->dn_phys->dn_nblkptr);
2933 		if (dn->dn_dbuf) {
2934 			dbuf_add_ref(dn->dn_dbuf, NULL);
2935 			*parentp = dn->dn_dbuf;
2936 		}
2937 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
2938 		return (0);
2939 	}
2940 }
2941 
2942 static dmu_buf_impl_t *
2943 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
2944     dmu_buf_impl_t *parent, blkptr_t *blkptr)
2945 {
2946 	objset_t *os = dn->dn_objset;
2947 	dmu_buf_impl_t *db, *odb;
2948 
2949 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2950 	ASSERT(dn->dn_type != DMU_OT_NONE);
2951 
2952 	db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
2953 
2954 	list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
2955 	    offsetof(dbuf_dirty_record_t, dr_dbuf_node));
2956 
2957 	db->db_objset = os;
2958 	db->db.db_object = dn->dn_object;
2959 	db->db_level = level;
2960 	db->db_blkid = blkid;
2961 	db->db_dirtycnt = 0;
2962 	db->db_dnode_handle = dn->dn_handle;
2963 	db->db_parent = parent;
2964 	db->db_blkptr = blkptr;
2965 
2966 	db->db_user = NULL;
2967 	db->db_user_immediate_evict = FALSE;
2968 	db->db_freed_in_flight = FALSE;
2969 	db->db_pending_evict = FALSE;
2970 
2971 	if (blkid == DMU_BONUS_BLKID) {
2972 		ASSERT3P(parent, ==, dn->dn_dbuf);
2973 		db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
2974 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
2975 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
2976 		db->db.db_offset = DMU_BONUS_BLKID;
2977 		db->db_state = DB_UNCACHED;
2978 		DTRACE_SET_STATE(db, "bonus buffer created");
2979 		db->db_caching_status = DB_NO_CACHE;
2980 		/* the bonus dbuf is not placed in the hash table */
2981 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2982 		return (db);
2983 	} else if (blkid == DMU_SPILL_BLKID) {
2984 		db->db.db_size = (blkptr != NULL) ?
2985 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
2986 		db->db.db_offset = 0;
2987 	} else {
2988 		int blocksize =
2989 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
2990 		db->db.db_size = blocksize;
2991 		db->db.db_offset = db->db_blkid * blocksize;
2992 	}
2993 
2994 	/*
2995 	 * Hold the dn_dbufs_mtx while we get the new dbuf
2996 	 * in the hash table *and* added to the dbufs list.
2997 	 * This prevents a possible deadlock with someone
2998 	 * trying to look up this dbuf before it's added to the
2999 	 * dn_dbufs list.
3000 	 */
3001 	mutex_enter(&dn->dn_dbufs_mtx);
3002 	db->db_state = DB_EVICTING; /* not worth logging this state change */
3003 	if ((odb = dbuf_hash_insert(db)) != NULL) {
3004 		/* someone else inserted it first */
3005 		kmem_cache_free(dbuf_kmem_cache, db);
3006 		mutex_exit(&dn->dn_dbufs_mtx);
3007 		DBUF_STAT_BUMP(hash_insert_race);
3008 		return (odb);
3009 	}
3010 	avl_add(&dn->dn_dbufs, db);
3011 
3012 	db->db_state = DB_UNCACHED;
3013 	DTRACE_SET_STATE(db, "regular buffer created");
3014 	db->db_caching_status = DB_NO_CACHE;
3015 	mutex_exit(&dn->dn_dbufs_mtx);
3016 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3017 
3018 	if (parent && parent != dn->dn_dbuf)
3019 		dbuf_add_ref(parent, db);
3020 
3021 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3022 	    zfs_refcount_count(&dn->dn_holds) > 0);
3023 	(void) zfs_refcount_add(&dn->dn_holds, db);
3024 
3025 	dprintf_dbuf(db, "db=%p\n", db);
3026 
3027 	return (db);
3028 }
3029 
3030 /*
3031  * This function returns a block pointer and information about the object,
3032  * given a dnode and a block.  This is a publicly accessible version of
3033  * dbuf_findbp that only returns some information, rather than the
3034  * dbuf.  Note that the dnode passed in must be held, and the dn_struct_rwlock
3035  * should be locked as (at least) a reader.
3036  */
3037 int
3038 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3039     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3040 {
3041 	dmu_buf_impl_t *dbp = NULL;
3042 	blkptr_t *bp2;
3043 	int err = 0;
3044 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3045 
3046 	err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3047 	if (err == 0) {
3048 		*bp = *bp2;
3049 		if (dbp != NULL)
3050 			dbuf_rele(dbp, NULL);
3051 		if (datablkszsec != NULL)
3052 			*datablkszsec = dn->dn_phys->dn_datablkszsec;
3053 		if (indblkshift != NULL)
3054 			*indblkshift = dn->dn_phys->dn_indblkshift;
3055 	}
3056 
3057 	return (err);
3058 }
3059 
3060 typedef struct dbuf_prefetch_arg {
3061 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
3062 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3063 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3064 	int dpa_curlevel; /* The current level that we're reading */
3065 	dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3066 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3067 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3068 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3069 	dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3070 	void *dpa_arg; /* prefetch completion arg */
3071 } dbuf_prefetch_arg_t;
3072 
3073 static void
3074 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3075 {
3076 	if (dpa->dpa_cb != NULL)
3077 		dpa->dpa_cb(dpa->dpa_arg, io_done);
3078 	kmem_free(dpa, sizeof (*dpa));
3079 }
3080 
3081 static void
3082 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3083     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3084 {
3085 	dbuf_prefetch_arg_t *dpa = private;
3086 
3087 	dbuf_prefetch_fini(dpa, B_TRUE);
3088 	if (abuf != NULL)
3089 		arc_buf_destroy(abuf, private);
3090 }
3091 
3092 /*
3093  * Actually issue the prefetch read for the block given.
3094  */
3095 static void
3096 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3097 {
3098 	ASSERT(!BP_IS_REDACTED(bp) ||
3099 	    dsl_dataset_feature_is_active(
3100 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3101 	    SPA_FEATURE_REDACTED_DATASETS));
3102 
3103 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3104 		return (dbuf_prefetch_fini(dpa, B_FALSE));
3105 
3106 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3107 	arc_flags_t aflags =
3108 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3109 	    ARC_FLAG_NO_BUF;
3110 
3111 	/* dnodes are always read as raw and then converted later */
3112 	if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3113 	    dpa->dpa_curlevel == 0)
3114 		zio_flags |= ZIO_FLAG_RAW;
3115 
3116 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3117 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3118 	ASSERT(dpa->dpa_zio != NULL);
3119 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3120 	    dbuf_issue_final_prefetch_done, dpa,
3121 	    dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3122 }
3123 
3124 /*
3125  * Called when an indirect block above our prefetch target is read in.  This
3126  * will either read in the next indirect block down the tree or issue the actual
3127  * prefetch if the next block down is our target.
3128  */
3129 static void
3130 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3131     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3132 {
3133 	dbuf_prefetch_arg_t *dpa = private;
3134 
3135 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3136 	ASSERT3S(dpa->dpa_curlevel, >, 0);
3137 
3138 	if (abuf == NULL) {
3139 		ASSERT(zio == NULL || zio->io_error != 0);
3140 		return (dbuf_prefetch_fini(dpa, B_TRUE));
3141 	}
3142 	ASSERT(zio == NULL || zio->io_error == 0);
3143 
3144 	/*
3145 	 * The dpa_dnode is only valid if we are called with a NULL
3146 	 * zio. This indicates that the arc_read() returned without
3147 	 * first calling zio_read() to issue a physical read. Once
3148 	 * a physical read is made the dpa_dnode must be invalidated
3149 	 * as the locks guarding it may have been dropped. If the
3150 	 * dpa_dnode is still valid, then we want to add it to the dbuf
3151 	 * cache. To do so, we must hold the dbuf associated with the block
3152 	 * we just prefetched, read its contents so that we associate it
3153 	 * with an arc_buf_t, and then release it.
3154 	 */
3155 	if (zio != NULL) {
3156 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3157 		if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3158 			ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3159 		} else {
3160 			ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3161 		}
3162 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3163 
3164 		dpa->dpa_dnode = NULL;
3165 	} else if (dpa->dpa_dnode != NULL) {
3166 		uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3167 		    (dpa->dpa_epbs * (dpa->dpa_curlevel -
3168 		    dpa->dpa_zb.zb_level));
3169 		dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3170 		    dpa->dpa_curlevel, curblkid, FTAG);
3171 		if (db == NULL) {
3172 			arc_buf_destroy(abuf, private);
3173 			return (dbuf_prefetch_fini(dpa, B_TRUE));
3174 		}
3175 		(void) dbuf_read(db, NULL,
3176 		    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3177 		dbuf_rele(db, FTAG);
3178 	}
3179 
3180 	dpa->dpa_curlevel--;
3181 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3182 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3183 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3184 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3185 
3186 	ASSERT(!BP_IS_REDACTED(bp) ||
3187 	    dsl_dataset_feature_is_active(
3188 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3189 	    SPA_FEATURE_REDACTED_DATASETS));
3190 	if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3191 		dbuf_prefetch_fini(dpa, B_TRUE);
3192 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3193 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3194 		dbuf_issue_final_prefetch(dpa, bp);
3195 	} else {
3196 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3197 		zbookmark_phys_t zb;
3198 
3199 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3200 		if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3201 			iter_aflags |= ARC_FLAG_L2CACHE;
3202 
3203 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3204 
3205 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3206 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3207 
3208 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3209 		    bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
3210 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3211 		    &iter_aflags, &zb);
3212 	}
3213 
3214 	arc_buf_destroy(abuf, private);
3215 }
3216 
3217 /*
3218  * Issue prefetch reads for the given block on the given level.  If the indirect
3219  * blocks above that block are not in memory, we will read them in
3220  * asynchronously.  As a result, this call never blocks waiting for a read to
3221  * complete. Note that the prefetch might fail if the dataset is encrypted and
3222  * the encryption key is unmapped before the IO completes.
3223  */
3224 int
3225 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3226     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3227     void *arg)
3228 {
3229 	blkptr_t bp;
3230 	int epbs, nlevels, curlevel;
3231 	uint64_t curblkid;
3232 
3233 	ASSERT(blkid != DMU_BONUS_BLKID);
3234 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3235 
3236 	if (blkid > dn->dn_maxblkid)
3237 		goto no_issue;
3238 
3239 	if (level == 0 && dnode_block_freed(dn, blkid))
3240 		goto no_issue;
3241 
3242 	/*
3243 	 * This dnode hasn't been written to disk yet, so there's nothing to
3244 	 * prefetch.
3245 	 */
3246 	nlevels = dn->dn_phys->dn_nlevels;
3247 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3248 		goto no_issue;
3249 
3250 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3251 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3252 		goto no_issue;
3253 
3254 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3255 	    level, blkid);
3256 	if (db != NULL) {
3257 		mutex_exit(&db->db_mtx);
3258 		/*
3259 		 * This dbuf already exists.  It is either CACHED, or
3260 		 * (we assume) about to be read or filled.
3261 		 */
3262 		goto no_issue;
3263 	}
3264 
3265 	/*
3266 	 * Find the closest ancestor (indirect block) of the target block
3267 	 * that is present in the cache.  In this indirect block, we will
3268 	 * find the bp that is at curlevel, curblkid.
3269 	 */
3270 	curlevel = level;
3271 	curblkid = blkid;
3272 	while (curlevel < nlevels - 1) {
3273 		int parent_level = curlevel + 1;
3274 		uint64_t parent_blkid = curblkid >> epbs;
3275 		dmu_buf_impl_t *db;
3276 
3277 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3278 		    FALSE, TRUE, FTAG, &db) == 0) {
3279 			blkptr_t *bpp = db->db_buf->b_data;
3280 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3281 			dbuf_rele(db, FTAG);
3282 			break;
3283 		}
3284 
3285 		curlevel = parent_level;
3286 		curblkid = parent_blkid;
3287 	}
3288 
3289 	if (curlevel == nlevels - 1) {
3290 		/* No cached indirect blocks found. */
3291 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3292 		bp = dn->dn_phys->dn_blkptr[curblkid];
3293 	}
3294 	ASSERT(!BP_IS_REDACTED(&bp) ||
3295 	    dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3296 	    SPA_FEATURE_REDACTED_DATASETS));
3297 	if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3298 		goto no_issue;
3299 
3300 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3301 
3302 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3303 	    ZIO_FLAG_CANFAIL);
3304 
3305 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3306 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3307 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3308 	    dn->dn_object, level, blkid);
3309 	dpa->dpa_curlevel = curlevel;
3310 	dpa->dpa_prio = prio;
3311 	dpa->dpa_aflags = aflags;
3312 	dpa->dpa_spa = dn->dn_objset->os_spa;
3313 	dpa->dpa_dnode = dn;
3314 	dpa->dpa_epbs = epbs;
3315 	dpa->dpa_zio = pio;
3316 	dpa->dpa_cb = cb;
3317 	dpa->dpa_arg = arg;
3318 
3319 	/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3320 	if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3321 		dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3322 
3323 	/*
3324 	 * If we have the indirect just above us, no need to do the asynchronous
3325 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
3326 	 * a higher level, though, we want to issue the prefetches for all the
3327 	 * indirect blocks asynchronously, so we can go on with whatever we were
3328 	 * doing.
3329 	 */
3330 	if (curlevel == level) {
3331 		ASSERT3U(curblkid, ==, blkid);
3332 		dbuf_issue_final_prefetch(dpa, &bp);
3333 	} else {
3334 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3335 		zbookmark_phys_t zb;
3336 
3337 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3338 		if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3339 			iter_aflags |= ARC_FLAG_L2CACHE;
3340 
3341 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3342 		    dn->dn_object, curlevel, curblkid);
3343 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3344 		    &bp, dbuf_prefetch_indirect_done, dpa, prio,
3345 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3346 		    &iter_aflags, &zb);
3347 	}
3348 	/*
3349 	 * We use pio here instead of dpa_zio since it's possible that
3350 	 * dpa may have already been freed.
3351 	 */
3352 	zio_nowait(pio);
3353 	return (1);
3354 no_issue:
3355 	if (cb != NULL)
3356 		cb(arg, B_FALSE);
3357 	return (0);
3358 }
3359 
3360 int
3361 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3362     arc_flags_t aflags)
3363 {
3364 
3365 	return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3366 }
3367 
3368 /*
3369  * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3370  * the case of encrypted, compressed and uncompressed buffers by
3371  * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3372  * arc_alloc_compressed_buf() or arc_alloc_buf().*
3373  *
3374  * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3375  */
3376 noinline static void
3377 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3378 {
3379 	dbuf_dirty_record_t *dr = db->db_data_pending;
3380 	arc_buf_t *newdata, *data = dr->dt.dl.dr_data;
3381 
3382 	newdata = dbuf_alloc_arcbuf_from_arcbuf(db, data);
3383 	dbuf_set_data(db, newdata);
3384 	rw_enter(&db->db_rwlock, RW_WRITER);
3385 	bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
3386 	rw_exit(&db->db_rwlock);
3387 }
3388 
3389 /*
3390  * Returns with db_holds incremented, and db_mtx not held.
3391  * Note: dn_struct_rwlock must be held.
3392  */
3393 int
3394 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3395     boolean_t fail_sparse, boolean_t fail_uncached,
3396     void *tag, dmu_buf_impl_t **dbp)
3397 {
3398 	dmu_buf_impl_t *db, *parent = NULL;
3399 
3400 	/* If the pool has been created, verify the tx_sync_lock is not held */
3401 	spa_t *spa = dn->dn_objset->os_spa;
3402 	dsl_pool_t *dp = spa->spa_dsl_pool;
3403 	if (dp != NULL) {
3404 		ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3405 	}
3406 
3407 	ASSERT(blkid != DMU_BONUS_BLKID);
3408 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3409 	ASSERT3U(dn->dn_nlevels, >, level);
3410 
3411 	*dbp = NULL;
3412 
3413 	/* dbuf_find() returns with db_mtx held */
3414 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
3415 
3416 	if (db == NULL) {
3417 		blkptr_t *bp = NULL;
3418 		int err;
3419 
3420 		if (fail_uncached)
3421 			return (SET_ERROR(ENOENT));
3422 
3423 		ASSERT3P(parent, ==, NULL);
3424 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3425 		if (fail_sparse) {
3426 			if (err == 0 && bp && BP_IS_HOLE(bp))
3427 				err = SET_ERROR(ENOENT);
3428 			if (err) {
3429 				if (parent)
3430 					dbuf_rele(parent, NULL);
3431 				return (err);
3432 			}
3433 		}
3434 		if (err && err != ENOENT)
3435 			return (err);
3436 		db = dbuf_create(dn, level, blkid, parent, bp);
3437 	}
3438 
3439 	if (fail_uncached && db->db_state != DB_CACHED) {
3440 		mutex_exit(&db->db_mtx);
3441 		return (SET_ERROR(ENOENT));
3442 	}
3443 
3444 	if (db->db_buf != NULL) {
3445 		arc_buf_access(db->db_buf);
3446 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3447 	}
3448 
3449 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3450 
3451 	/*
3452 	 * If this buffer is currently syncing out, and we are
3453 	 * still referencing it from db_data, we need to make a copy
3454 	 * of it in case we decide we want to dirty it again in this txg.
3455 	 */
3456 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3457 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
3458 	    db->db_state == DB_CACHED && db->db_data_pending) {
3459 		dbuf_dirty_record_t *dr = db->db_data_pending;
3460 		if (dr->dt.dl.dr_data == db->db_buf)
3461 			dbuf_hold_copy(dn, db);
3462 	}
3463 
3464 	if (multilist_link_active(&db->db_cache_link)) {
3465 		ASSERT(zfs_refcount_is_zero(&db->db_holds));
3466 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3467 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3468 
3469 		multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
3470 		(void) zfs_refcount_remove_many(
3471 		    &dbuf_caches[db->db_caching_status].size,
3472 		    db->db.db_size, db);
3473 
3474 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3475 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3476 		} else {
3477 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3478 			DBUF_STAT_BUMPDOWN(cache_count);
3479 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3480 			    db->db.db_size);
3481 		}
3482 		db->db_caching_status = DB_NO_CACHE;
3483 	}
3484 	(void) zfs_refcount_add(&db->db_holds, tag);
3485 	DBUF_VERIFY(db);
3486 	mutex_exit(&db->db_mtx);
3487 
3488 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
3489 	if (parent)
3490 		dbuf_rele(parent, NULL);
3491 
3492 	ASSERT3P(DB_DNODE(db), ==, dn);
3493 	ASSERT3U(db->db_blkid, ==, blkid);
3494 	ASSERT3U(db->db_level, ==, level);
3495 	*dbp = db;
3496 
3497 	return (0);
3498 }
3499 
3500 dmu_buf_impl_t *
3501 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
3502 {
3503 	return (dbuf_hold_level(dn, 0, blkid, tag));
3504 }
3505 
3506 dmu_buf_impl_t *
3507 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
3508 {
3509 	dmu_buf_impl_t *db;
3510 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3511 	return (err ? NULL : db);
3512 }
3513 
3514 void
3515 dbuf_create_bonus(dnode_t *dn)
3516 {
3517 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3518 
3519 	ASSERT(dn->dn_bonus == NULL);
3520 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
3521 }
3522 
3523 int
3524 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3525 {
3526 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3527 
3528 	if (db->db_blkid != DMU_SPILL_BLKID)
3529 		return (SET_ERROR(ENOTSUP));
3530 	if (blksz == 0)
3531 		blksz = SPA_MINBLOCKSIZE;
3532 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3533 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3534 
3535 	dbuf_new_size(db, blksz, tx);
3536 
3537 	return (0);
3538 }
3539 
3540 void
3541 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3542 {
3543 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3544 }
3545 
3546 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3547 void
3548 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
3549 {
3550 	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3551 	VERIFY3S(holds, >, 1);
3552 }
3553 
3554 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3555 boolean_t
3556 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3557     void *tag)
3558 {
3559 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3560 	dmu_buf_impl_t *found_db;
3561 	boolean_t result = B_FALSE;
3562 
3563 	if (blkid == DMU_BONUS_BLKID)
3564 		found_db = dbuf_find_bonus(os, obj);
3565 	else
3566 		found_db = dbuf_find(os, obj, 0, blkid);
3567 
3568 	if (found_db != NULL) {
3569 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3570 			(void) zfs_refcount_add(&db->db_holds, tag);
3571 			result = B_TRUE;
3572 		}
3573 		mutex_exit(&found_db->db_mtx);
3574 	}
3575 	return (result);
3576 }
3577 
3578 /*
3579  * If you call dbuf_rele() you had better not be referencing the dnode handle
3580  * unless you have some other direct or indirect hold on the dnode. (An indirect
3581  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3582  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3583  * dnode's parent dbuf evicting its dnode handles.
3584  */
3585 void
3586 dbuf_rele(dmu_buf_impl_t *db, void *tag)
3587 {
3588 	mutex_enter(&db->db_mtx);
3589 	dbuf_rele_and_unlock(db, tag, B_FALSE);
3590 }
3591 
3592 void
3593 dmu_buf_rele(dmu_buf_t *db, void *tag)
3594 {
3595 	dbuf_rele((dmu_buf_impl_t *)db, tag);
3596 }
3597 
3598 /*
3599  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
3600  * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
3601  * argument should be set if we are already in the dbuf-evicting code
3602  * path, in which case we don't want to recursively evict.  This allows us to
3603  * avoid deeply nested stacks that would have a call flow similar to this:
3604  *
3605  * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3606  *	^						|
3607  *	|						|
3608  *	+-----dbuf_destroy()<--dbuf_evict_one()<--------+
3609  *
3610  */
3611 void
3612 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
3613 {
3614 	int64_t holds;
3615 	uint64_t size;
3616 
3617 	ASSERT(MUTEX_HELD(&db->db_mtx));
3618 	DBUF_VERIFY(db);
3619 
3620 	/*
3621 	 * Remove the reference to the dbuf before removing its hold on the
3622 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
3623 	 * buffer has a corresponding dnode hold.
3624 	 */
3625 	holds = zfs_refcount_remove(&db->db_holds, tag);
3626 	ASSERT(holds >= 0);
3627 
3628 	/*
3629 	 * We can't freeze indirects if there is a possibility that they
3630 	 * may be modified in the current syncing context.
3631 	 */
3632 	if (db->db_buf != NULL &&
3633 	    holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3634 		arc_buf_freeze(db->db_buf);
3635 	}
3636 
3637 	if (holds == db->db_dirtycnt &&
3638 	    db->db_level == 0 && db->db_user_immediate_evict)
3639 		dbuf_evict_user(db);
3640 
3641 	if (holds == 0) {
3642 		if (db->db_blkid == DMU_BONUS_BLKID) {
3643 			dnode_t *dn;
3644 			boolean_t evict_dbuf = db->db_pending_evict;
3645 
3646 			/*
3647 			 * If the dnode moves here, we cannot cross this
3648 			 * barrier until the move completes.
3649 			 */
3650 			DB_DNODE_ENTER(db);
3651 
3652 			dn = DB_DNODE(db);
3653 			atomic_dec_32(&dn->dn_dbufs_count);
3654 
3655 			/*
3656 			 * Decrementing the dbuf count means that the bonus
3657 			 * buffer's dnode hold is no longer discounted in
3658 			 * dnode_move(). The dnode cannot move until after
3659 			 * the dnode_rele() below.
3660 			 */
3661 			DB_DNODE_EXIT(db);
3662 
3663 			/*
3664 			 * Do not reference db after its lock is dropped.
3665 			 * Another thread may evict it.
3666 			 */
3667 			mutex_exit(&db->db_mtx);
3668 
3669 			if (evict_dbuf)
3670 				dnode_evict_bonus(dn);
3671 
3672 			dnode_rele(dn, db);
3673 		} else if (db->db_buf == NULL) {
3674 			/*
3675 			 * This is a special case: we never associated this
3676 			 * dbuf with any data allocated from the ARC.
3677 			 */
3678 			ASSERT(db->db_state == DB_UNCACHED ||
3679 			    db->db_state == DB_NOFILL);
3680 			dbuf_destroy(db);
3681 		} else if (arc_released(db->db_buf)) {
3682 			/*
3683 			 * This dbuf has anonymous data associated with it.
3684 			 */
3685 			dbuf_destroy(db);
3686 		} else {
3687 			boolean_t do_arc_evict = B_FALSE;
3688 			blkptr_t bp;
3689 			spa_t *spa = dmu_objset_spa(db->db_objset);
3690 
3691 			if (!DBUF_IS_CACHEABLE(db) &&
3692 			    db->db_blkptr != NULL &&
3693 			    !BP_IS_HOLE(db->db_blkptr) &&
3694 			    !BP_IS_EMBEDDED(db->db_blkptr)) {
3695 				do_arc_evict = B_TRUE;
3696 				bp = *db->db_blkptr;
3697 			}
3698 
3699 			if (!DBUF_IS_CACHEABLE(db) ||
3700 			    db->db_pending_evict) {
3701 				dbuf_destroy(db);
3702 			} else if (!multilist_link_active(&db->db_cache_link)) {
3703 				ASSERT3U(db->db_caching_status, ==,
3704 				    DB_NO_CACHE);
3705 
3706 				dbuf_cached_state_t dcs =
3707 				    dbuf_include_in_metadata_cache(db) ?
3708 				    DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3709 				db->db_caching_status = dcs;
3710 
3711 				multilist_insert(dbuf_caches[dcs].cache, db);
3712 				size = zfs_refcount_add_many(
3713 				    &dbuf_caches[dcs].size,
3714 				    db->db.db_size, db);
3715 
3716 				if (dcs == DB_DBUF_METADATA_CACHE) {
3717 					DBUF_STAT_BUMP(metadata_cache_count);
3718 					DBUF_STAT_MAX(
3719 					    metadata_cache_size_bytes_max,
3720 					    size);
3721 				} else {
3722 					DBUF_STAT_BUMP(
3723 					    cache_levels[db->db_level]);
3724 					DBUF_STAT_BUMP(cache_count);
3725 					DBUF_STAT_INCR(
3726 					    cache_levels_bytes[db->db_level],
3727 					    db->db.db_size);
3728 					DBUF_STAT_MAX(cache_size_bytes_max,
3729 					    size);
3730 				}
3731 				mutex_exit(&db->db_mtx);
3732 
3733 				if (dcs == DB_DBUF_CACHE && !evicting)
3734 					dbuf_evict_notify(size);
3735 			}
3736 
3737 			if (do_arc_evict)
3738 				arc_freed(spa, &bp);
3739 		}
3740 	} else {
3741 		mutex_exit(&db->db_mtx);
3742 	}
3743 
3744 }
3745 
3746 #pragma weak dmu_buf_refcount = dbuf_refcount
3747 uint64_t
3748 dbuf_refcount(dmu_buf_impl_t *db)
3749 {
3750 	return (zfs_refcount_count(&db->db_holds));
3751 }
3752 
3753 uint64_t
3754 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3755 {
3756 	uint64_t holds;
3757 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3758 
3759 	mutex_enter(&db->db_mtx);
3760 	ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3761 	holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3762 	mutex_exit(&db->db_mtx);
3763 
3764 	return (holds);
3765 }
3766 
3767 void *
3768 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3769     dmu_buf_user_t *new_user)
3770 {
3771 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3772 
3773 	mutex_enter(&db->db_mtx);
3774 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3775 	if (db->db_user == old_user)
3776 		db->db_user = new_user;
3777 	else
3778 		old_user = db->db_user;
3779 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3780 	mutex_exit(&db->db_mtx);
3781 
3782 	return (old_user);
3783 }
3784 
3785 void *
3786 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3787 {
3788 	return (dmu_buf_replace_user(db_fake, NULL, user));
3789 }
3790 
3791 void *
3792 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3793 {
3794 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3795 
3796 	db->db_user_immediate_evict = TRUE;
3797 	return (dmu_buf_set_user(db_fake, user));
3798 }
3799 
3800 void *
3801 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3802 {
3803 	return (dmu_buf_replace_user(db_fake, user, NULL));
3804 }
3805 
3806 void *
3807 dmu_buf_get_user(dmu_buf_t *db_fake)
3808 {
3809 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3810 
3811 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3812 	return (db->db_user);
3813 }
3814 
3815 void
3816 dmu_buf_user_evict_wait()
3817 {
3818 	taskq_wait(dbu_evict_taskq);
3819 }
3820 
3821 blkptr_t *
3822 dmu_buf_get_blkptr(dmu_buf_t *db)
3823 {
3824 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3825 	return (dbi->db_blkptr);
3826 }
3827 
3828 objset_t *
3829 dmu_buf_get_objset(dmu_buf_t *db)
3830 {
3831 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3832 	return (dbi->db_objset);
3833 }
3834 
3835 dnode_t *
3836 dmu_buf_dnode_enter(dmu_buf_t *db)
3837 {
3838 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3839 	DB_DNODE_ENTER(dbi);
3840 	return (DB_DNODE(dbi));
3841 }
3842 
3843 void
3844 dmu_buf_dnode_exit(dmu_buf_t *db)
3845 {
3846 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3847 	DB_DNODE_EXIT(dbi);
3848 }
3849 
3850 static void
3851 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
3852 {
3853 	/* ASSERT(dmu_tx_is_syncing(tx) */
3854 	ASSERT(MUTEX_HELD(&db->db_mtx));
3855 
3856 	if (db->db_blkptr != NULL)
3857 		return;
3858 
3859 	if (db->db_blkid == DMU_SPILL_BLKID) {
3860 		db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
3861 		BP_ZERO(db->db_blkptr);
3862 		return;
3863 	}
3864 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
3865 		/*
3866 		 * This buffer was allocated at a time when there was
3867 		 * no available blkptrs from the dnode, or it was
3868 		 * inappropriate to hook it in (i.e., nlevels mismatch).
3869 		 */
3870 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
3871 		ASSERT(db->db_parent == NULL);
3872 		db->db_parent = dn->dn_dbuf;
3873 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
3874 		DBUF_VERIFY(db);
3875 	} else {
3876 		dmu_buf_impl_t *parent = db->db_parent;
3877 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3878 
3879 		ASSERT(dn->dn_phys->dn_nlevels > 1);
3880 		if (parent == NULL) {
3881 			mutex_exit(&db->db_mtx);
3882 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
3883 			parent = dbuf_hold_level(dn, db->db_level + 1,
3884 			    db->db_blkid >> epbs, db);
3885 			rw_exit(&dn->dn_struct_rwlock);
3886 			mutex_enter(&db->db_mtx);
3887 			db->db_parent = parent;
3888 		}
3889 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
3890 		    (db->db_blkid & ((1ULL << epbs) - 1));
3891 		DBUF_VERIFY(db);
3892 	}
3893 }
3894 
3895 static void
3896 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3897 {
3898 	dmu_buf_impl_t *db = dr->dr_dbuf;
3899 	void *data = dr->dt.dl.dr_data;
3900 
3901 	ASSERT0(db->db_level);
3902 	ASSERT(MUTEX_HELD(&db->db_mtx));
3903 	ASSERT(db->db_blkid == DMU_BONUS_BLKID);
3904 	ASSERT(data != NULL);
3905 
3906 	dnode_t *dn = dr->dr_dnode;
3907 	ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
3908 	    DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
3909 	bcopy(data, DN_BONUS(dn->dn_phys), DN_MAX_BONUS_LEN(dn->dn_phys));
3910 
3911 	dbuf_sync_leaf_verify_bonus_dnode(dr);
3912 
3913 	dbuf_undirty_bonus(dr);
3914 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
3915 }
3916 
3917 /*
3918  * When syncing out a blocks of dnodes, adjust the block to deal with
3919  * encryption.  Normally, we make sure the block is decrypted before writing
3920  * it.  If we have crypt params, then we are writing a raw (encrypted) block,
3921  * from a raw receive.  In this case, set the ARC buf's crypt params so
3922  * that the BP will be filled with the correct byteorder, salt, iv, and mac.
3923  */
3924 static void
3925 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
3926 {
3927 	int err;
3928 	dmu_buf_impl_t *db = dr->dr_dbuf;
3929 
3930 	ASSERT(MUTEX_HELD(&db->db_mtx));
3931 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
3932 	ASSERT3U(db->db_level, ==, 0);
3933 
3934 	if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
3935 		zbookmark_phys_t zb;
3936 
3937 		/*
3938 		 * Unfortunately, there is currently no mechanism for
3939 		 * syncing context to handle decryption errors. An error
3940 		 * here is only possible if an attacker maliciously
3941 		 * changed a dnode block and updated the associated
3942 		 * checksums going up the block tree.
3943 		 */
3944 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
3945 		    db->db.db_object, db->db_level, db->db_blkid);
3946 		err = arc_untransform(db->db_buf, db->db_objset->os_spa,
3947 		    &zb, B_TRUE);
3948 		if (err)
3949 			panic("Invalid dnode block MAC");
3950 	} else if (dr->dt.dl.dr_has_raw_params) {
3951 		(void) arc_release(dr->dt.dl.dr_data, db);
3952 		arc_convert_to_raw(dr->dt.dl.dr_data,
3953 		    dmu_objset_id(db->db_objset),
3954 		    dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
3955 		    dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
3956 	}
3957 }
3958 
3959 /*
3960  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
3961  * is critical the we not allow the compiler to inline this function in to
3962  * dbuf_sync_list() thereby drastically bloating the stack usage.
3963  */
3964 noinline static void
3965 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3966 {
3967 	dmu_buf_impl_t *db = dr->dr_dbuf;
3968 	dnode_t *dn = dr->dr_dnode;
3969 
3970 	ASSERT(dmu_tx_is_syncing(tx));
3971 
3972 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
3973 
3974 	mutex_enter(&db->db_mtx);
3975 
3976 	ASSERT(db->db_level > 0);
3977 	DBUF_VERIFY(db);
3978 
3979 	/* Read the block if it hasn't been read yet. */
3980 	if (db->db_buf == NULL) {
3981 		mutex_exit(&db->db_mtx);
3982 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
3983 		mutex_enter(&db->db_mtx);
3984 	}
3985 	ASSERT3U(db->db_state, ==, DB_CACHED);
3986 	ASSERT(db->db_buf != NULL);
3987 
3988 	/* Indirect block size must match what the dnode thinks it is. */
3989 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
3990 	dbuf_check_blkptr(dn, db);
3991 
3992 	/* Provide the pending dirty record to child dbufs */
3993 	db->db_data_pending = dr;
3994 
3995 	mutex_exit(&db->db_mtx);
3996 
3997 	dbuf_write(dr, db->db_buf, tx);
3998 
3999 	zio_t *zio = dr->dr_zio;
4000 	mutex_enter(&dr->dt.di.dr_mtx);
4001 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4002 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4003 	mutex_exit(&dr->dt.di.dr_mtx);
4004 	zio_nowait(zio);
4005 }
4006 
4007 /*
4008  * Verify that the size of the data in our bonus buffer does not exceed
4009  * its recorded size.
4010  *
4011  * The purpose of this verification is to catch any cases in development
4012  * where the size of a phys structure (i.e space_map_phys_t) grows and,
4013  * due to incorrect feature management, older pools expect to read more
4014  * data even though they didn't actually write it to begin with.
4015  *
4016  * For a example, this would catch an error in the feature logic where we
4017  * open an older pool and we expect to write the space map histogram of
4018  * a space map with size SPACE_MAP_SIZE_V0.
4019  */
4020 static void
4021 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4022 {
4023 #ifdef ZFS_DEBUG
4024 	dnode_t *dn = dr->dr_dnode;
4025 
4026 	/*
4027 	 * Encrypted bonus buffers can have data past their bonuslen.
4028 	 * Skip the verification of these blocks.
4029 	 */
4030 	if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4031 		return;
4032 
4033 	uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4034 	uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4035 	ASSERT3U(bonuslen, <=, maxbonuslen);
4036 
4037 	arc_buf_t *datap = dr->dt.dl.dr_data;
4038 	char *datap_end = ((char *)datap) + bonuslen;
4039 	char *datap_max = ((char *)datap) + maxbonuslen;
4040 
4041 	/* ensure that everything is zero after our data */
4042 	for (; datap_end < datap_max; datap_end++)
4043 		ASSERT(*datap_end == 0);
4044 #endif
4045 }
4046 
4047 static blkptr_t *
4048 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4049 {
4050 	/* This must be a lightweight dirty record. */
4051 	ASSERT3P(dr->dr_dbuf, ==, NULL);
4052 	dnode_t *dn = dr->dr_dnode;
4053 
4054 	if (dn->dn_phys->dn_nlevels == 1) {
4055 		VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4056 		return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4057 	} else {
4058 		dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4059 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4060 		VERIFY3U(parent_db->db_level, ==, 1);
4061 		VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4062 		VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4063 		blkptr_t *bp = parent_db->db.db_data;
4064 		return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4065 	}
4066 }
4067 
4068 static void
4069 dbuf_lightweight_ready(zio_t *zio)
4070 {
4071 	dbuf_dirty_record_t *dr = zio->io_private;
4072 	blkptr_t *bp = zio->io_bp;
4073 
4074 	if (zio->io_error != 0)
4075 		return;
4076 
4077 	dnode_t *dn = dr->dr_dnode;
4078 
4079 	blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4080 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4081 	int64_t delta = bp_get_dsize_sync(spa, bp) -
4082 	    bp_get_dsize_sync(spa, bp_orig);
4083 	dnode_diduse_space(dn, delta);
4084 
4085 	uint64_t blkid = dr->dt.dll.dr_blkid;
4086 	mutex_enter(&dn->dn_mtx);
4087 	if (blkid > dn->dn_phys->dn_maxblkid) {
4088 		ASSERT0(dn->dn_objset->os_raw_receive);
4089 		dn->dn_phys->dn_maxblkid = blkid;
4090 	}
4091 	mutex_exit(&dn->dn_mtx);
4092 
4093 	if (!BP_IS_EMBEDDED(bp)) {
4094 		uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4095 		BP_SET_FILL(bp, fill);
4096 	}
4097 
4098 	dmu_buf_impl_t *parent_db;
4099 	EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4100 	if (dr->dr_parent == NULL) {
4101 		parent_db = dn->dn_dbuf;
4102 	} else {
4103 		parent_db = dr->dr_parent->dr_dbuf;
4104 	}
4105 	rw_enter(&parent_db->db_rwlock, RW_WRITER);
4106 	*bp_orig = *bp;
4107 	rw_exit(&parent_db->db_rwlock);
4108 }
4109 
4110 static void
4111 dbuf_lightweight_physdone(zio_t *zio)
4112 {
4113 	dbuf_dirty_record_t *dr = zio->io_private;
4114 	dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
4115 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4116 
4117 	/*
4118 	 * The callback will be called io_phys_children times.  Retire one
4119 	 * portion of our dirty space each time we are called.  Any rounding
4120 	 * error will be cleaned up by dbuf_lightweight_done().
4121 	 */
4122 	int delta = dr->dr_accounted / zio->io_phys_children;
4123 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4124 }
4125 
4126 static void
4127 dbuf_lightweight_done(zio_t *zio)
4128 {
4129 	dbuf_dirty_record_t *dr = zio->io_private;
4130 
4131 	VERIFY0(zio->io_error);
4132 
4133 	objset_t *os = dr->dr_dnode->dn_objset;
4134 	dmu_tx_t *tx = os->os_synctx;
4135 
4136 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4137 		ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4138 	} else {
4139 		dsl_dataset_t *ds = os->os_dsl_dataset;
4140 		(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4141 		dsl_dataset_block_born(ds, zio->io_bp, tx);
4142 	}
4143 
4144 	/*
4145 	 * See comment in dbuf_write_done().
4146 	 */
4147 	if (zio->io_phys_children == 0) {
4148 		dsl_pool_undirty_space(dmu_objset_pool(os),
4149 		    dr->dr_accounted, zio->io_txg);
4150 	} else {
4151 		dsl_pool_undirty_space(dmu_objset_pool(os),
4152 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4153 	}
4154 
4155 	abd_free(dr->dt.dll.dr_abd);
4156 	kmem_free(dr, sizeof (*dr));
4157 }
4158 
4159 noinline static void
4160 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4161 {
4162 	dnode_t *dn = dr->dr_dnode;
4163 	zio_t *pio;
4164 	if (dn->dn_phys->dn_nlevels == 1) {
4165 		pio = dn->dn_zio;
4166 	} else {
4167 		pio = dr->dr_parent->dr_zio;
4168 	}
4169 
4170 	zbookmark_phys_t zb = {
4171 		.zb_objset = dmu_objset_id(dn->dn_objset),
4172 		.zb_object = dn->dn_object,
4173 		.zb_level = 0,
4174 		.zb_blkid = dr->dt.dll.dr_blkid,
4175 	};
4176 
4177 	/*
4178 	 * See comment in dbuf_write().  This is so that zio->io_bp_orig
4179 	 * will have the old BP in dbuf_lightweight_done().
4180 	 */
4181 	dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4182 
4183 	dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4184 	    dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4185 	    dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4186 	    &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4187 	    dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
4188 	    ZIO_PRIORITY_ASYNC_WRITE,
4189 	    ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4190 
4191 	zio_nowait(dr->dr_zio);
4192 }
4193 
4194 /*
4195  * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4196  * critical the we not allow the compiler to inline this function in to
4197  * dbuf_sync_list() thereby drastically bloating the stack usage.
4198  */
4199 noinline static void
4200 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4201 {
4202 	arc_buf_t **datap = &dr->dt.dl.dr_data;
4203 	dmu_buf_impl_t *db = dr->dr_dbuf;
4204 	dnode_t *dn = dr->dr_dnode;
4205 	objset_t *os;
4206 	uint64_t txg = tx->tx_txg;
4207 
4208 	ASSERT(dmu_tx_is_syncing(tx));
4209 
4210 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4211 
4212 	mutex_enter(&db->db_mtx);
4213 	/*
4214 	 * To be synced, we must be dirtied.  But we
4215 	 * might have been freed after the dirty.
4216 	 */
4217 	if (db->db_state == DB_UNCACHED) {
4218 		/* This buffer has been freed since it was dirtied */
4219 		ASSERT(db->db.db_data == NULL);
4220 	} else if (db->db_state == DB_FILL) {
4221 		/* This buffer was freed and is now being re-filled */
4222 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4223 	} else {
4224 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4225 	}
4226 	DBUF_VERIFY(db);
4227 
4228 	if (db->db_blkid == DMU_SPILL_BLKID) {
4229 		mutex_enter(&dn->dn_mtx);
4230 		if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4231 			/*
4232 			 * In the previous transaction group, the bonus buffer
4233 			 * was entirely used to store the attributes for the
4234 			 * dnode which overrode the dn_spill field.  However,
4235 			 * when adding more attributes to the file a spill
4236 			 * block was required to hold the extra attributes.
4237 			 *
4238 			 * Make sure to clear the garbage left in the dn_spill
4239 			 * field from the previous attributes in the bonus
4240 			 * buffer.  Otherwise, after writing out the spill
4241 			 * block to the new allocated dva, it will free
4242 			 * the old block pointed to by the invalid dn_spill.
4243 			 */
4244 			db->db_blkptr = NULL;
4245 		}
4246 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4247 		mutex_exit(&dn->dn_mtx);
4248 	}
4249 
4250 	/*
4251 	 * If this is a bonus buffer, simply copy the bonus data into the
4252 	 * dnode.  It will be written out when the dnode is synced (and it
4253 	 * will be synced, since it must have been dirty for dbuf_sync to
4254 	 * be called).
4255 	 */
4256 	if (db->db_blkid == DMU_BONUS_BLKID) {
4257 		ASSERT(dr->dr_dbuf == db);
4258 		dbuf_sync_bonus(dr, tx);
4259 		return;
4260 	}
4261 
4262 	os = dn->dn_objset;
4263 
4264 	/*
4265 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
4266 	 * operation to sneak in. As a result, we need to ensure that we
4267 	 * don't check the dr_override_state until we have returned from
4268 	 * dbuf_check_blkptr.
4269 	 */
4270 	dbuf_check_blkptr(dn, db);
4271 
4272 	/*
4273 	 * If this buffer is in the middle of an immediate write,
4274 	 * wait for the synchronous IO to complete.
4275 	 */
4276 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4277 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4278 		cv_wait(&db->db_changed, &db->db_mtx);
4279 		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
4280 	}
4281 
4282 	/*
4283 	 * If this is a dnode block, ensure it is appropriately encrypted
4284 	 * or decrypted, depending on what we are writing to it this txg.
4285 	 */
4286 	if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4287 		dbuf_prepare_encrypted_dnode_leaf(dr);
4288 
4289 	if (db->db_state != DB_NOFILL &&
4290 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
4291 	    zfs_refcount_count(&db->db_holds) > 1 &&
4292 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4293 	    *datap == db->db_buf) {
4294 		/*
4295 		 * If this buffer is currently "in use" (i.e., there
4296 		 * are active holds and db_data still references it),
4297 		 * then make a copy before we start the write so that
4298 		 * any modifications from the open txg will not leak
4299 		 * into this write.
4300 		 *
4301 		 * NOTE: this copy does not need to be made for
4302 		 * objects only modified in the syncing context (e.g.
4303 		 * DNONE_DNODE blocks).
4304 		 */
4305 		*datap = dbuf_alloc_arcbuf_from_arcbuf(db, db->db_buf);
4306 		bcopy(db->db.db_data, (*datap)->b_data, arc_buf_size(*datap));
4307 	}
4308 	db->db_data_pending = dr;
4309 
4310 	mutex_exit(&db->db_mtx);
4311 
4312 	dbuf_write(dr, *datap, tx);
4313 
4314 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4315 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4316 		list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4317 	} else {
4318 		zio_nowait(dr->dr_zio);
4319 	}
4320 }
4321 
4322 void
4323 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4324 {
4325 	dbuf_dirty_record_t *dr;
4326 
4327 	while ((dr = list_head(list))) {
4328 		if (dr->dr_zio != NULL) {
4329 			/*
4330 			 * If we find an already initialized zio then we
4331 			 * are processing the meta-dnode, and we have finished.
4332 			 * The dbufs for all dnodes are put back on the list
4333 			 * during processing, so that we can zio_wait()
4334 			 * these IOs after initiating all child IOs.
4335 			 */
4336 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4337 			    DMU_META_DNODE_OBJECT);
4338 			break;
4339 		}
4340 		list_remove(list, dr);
4341 		if (dr->dr_dbuf == NULL) {
4342 			dbuf_sync_lightweight(dr, tx);
4343 		} else {
4344 			if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4345 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4346 				VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4347 			}
4348 			if (dr->dr_dbuf->db_level > 0)
4349 				dbuf_sync_indirect(dr, tx);
4350 			else
4351 				dbuf_sync_leaf(dr, tx);
4352 		}
4353 	}
4354 }
4355 
4356 /* ARGSUSED */
4357 static void
4358 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4359 {
4360 	dmu_buf_impl_t *db = vdb;
4361 	dnode_t *dn;
4362 	blkptr_t *bp = zio->io_bp;
4363 	blkptr_t *bp_orig = &zio->io_bp_orig;
4364 	spa_t *spa = zio->io_spa;
4365 	int64_t delta;
4366 	uint64_t fill = 0;
4367 	int i;
4368 
4369 	ASSERT3P(db->db_blkptr, !=, NULL);
4370 	ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4371 
4372 	DB_DNODE_ENTER(db);
4373 	dn = DB_DNODE(db);
4374 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4375 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4376 	zio->io_prev_space_delta = delta;
4377 
4378 	if (bp->blk_birth != 0) {
4379 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4380 		    BP_GET_TYPE(bp) == dn->dn_type) ||
4381 		    (db->db_blkid == DMU_SPILL_BLKID &&
4382 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4383 		    BP_IS_EMBEDDED(bp));
4384 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4385 	}
4386 
4387 	mutex_enter(&db->db_mtx);
4388 
4389 #ifdef ZFS_DEBUG
4390 	if (db->db_blkid == DMU_SPILL_BLKID) {
4391 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4392 		ASSERT(!(BP_IS_HOLE(bp)) &&
4393 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4394 	}
4395 #endif
4396 
4397 	if (db->db_level == 0) {
4398 		mutex_enter(&dn->dn_mtx);
4399 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4400 		    db->db_blkid != DMU_SPILL_BLKID) {
4401 			ASSERT0(db->db_objset->os_raw_receive);
4402 			dn->dn_phys->dn_maxblkid = db->db_blkid;
4403 		}
4404 		mutex_exit(&dn->dn_mtx);
4405 
4406 		if (dn->dn_type == DMU_OT_DNODE) {
4407 			i = 0;
4408 			while (i < db->db.db_size) {
4409 				dnode_phys_t *dnp =
4410 				    (void *)(((char *)db->db.db_data) + i);
4411 
4412 				i += DNODE_MIN_SIZE;
4413 				if (dnp->dn_type != DMU_OT_NONE) {
4414 					fill++;
4415 					i += dnp->dn_extra_slots *
4416 					    DNODE_MIN_SIZE;
4417 				}
4418 			}
4419 		} else {
4420 			if (BP_IS_HOLE(bp)) {
4421 				fill = 0;
4422 			} else {
4423 				fill = 1;
4424 			}
4425 		}
4426 	} else {
4427 		blkptr_t *ibp = db->db.db_data;
4428 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4429 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4430 			if (BP_IS_HOLE(ibp))
4431 				continue;
4432 			fill += BP_GET_FILL(ibp);
4433 		}
4434 	}
4435 	DB_DNODE_EXIT(db);
4436 
4437 	if (!BP_IS_EMBEDDED(bp))
4438 		BP_SET_FILL(bp, fill);
4439 
4440 	mutex_exit(&db->db_mtx);
4441 
4442 	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4443 	*db->db_blkptr = *bp;
4444 	dmu_buf_unlock_parent(db, dblt, FTAG);
4445 }
4446 
4447 /* ARGSUSED */
4448 /*
4449  * This function gets called just prior to running through the compression
4450  * stage of the zio pipeline. If we're an indirect block comprised of only
4451  * holes, then we want this indirect to be compressed away to a hole. In
4452  * order to do that we must zero out any information about the holes that
4453  * this indirect points to prior to before we try to compress it.
4454  */
4455 static void
4456 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4457 {
4458 	dmu_buf_impl_t *db = vdb;
4459 	dnode_t *dn;
4460 	blkptr_t *bp;
4461 	unsigned int epbs, i;
4462 
4463 	ASSERT3U(db->db_level, >, 0);
4464 	DB_DNODE_ENTER(db);
4465 	dn = DB_DNODE(db);
4466 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4467 	ASSERT3U(epbs, <, 31);
4468 
4469 	/* Determine if all our children are holes */
4470 	for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4471 		if (!BP_IS_HOLE(bp))
4472 			break;
4473 	}
4474 
4475 	/*
4476 	 * If all the children are holes, then zero them all out so that
4477 	 * we may get compressed away.
4478 	 */
4479 	if (i == 1ULL << epbs) {
4480 		/*
4481 		 * We only found holes. Grab the rwlock to prevent
4482 		 * anybody from reading the blocks we're about to
4483 		 * zero out.
4484 		 */
4485 		rw_enter(&db->db_rwlock, RW_WRITER);
4486 		bzero(db->db.db_data, db->db.db_size);
4487 		rw_exit(&db->db_rwlock);
4488 	}
4489 	DB_DNODE_EXIT(db);
4490 }
4491 
4492 /*
4493  * The SPA will call this callback several times for each zio - once
4494  * for every physical child i/o (zio->io_phys_children times).  This
4495  * allows the DMU to monitor the progress of each logical i/o.  For example,
4496  * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4497  * block.  There may be a long delay before all copies/fragments are completed,
4498  * so this callback allows us to retire dirty space gradually, as the physical
4499  * i/os complete.
4500  */
4501 /* ARGSUSED */
4502 static void
4503 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4504 {
4505 	dmu_buf_impl_t *db = arg;
4506 	objset_t *os = db->db_objset;
4507 	dsl_pool_t *dp = dmu_objset_pool(os);
4508 	dbuf_dirty_record_t *dr;
4509 	int delta = 0;
4510 
4511 	dr = db->db_data_pending;
4512 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4513 
4514 	/*
4515 	 * The callback will be called io_phys_children times.  Retire one
4516 	 * portion of our dirty space each time we are called.  Any rounding
4517 	 * error will be cleaned up by dbuf_write_done().
4518 	 */
4519 	delta = dr->dr_accounted / zio->io_phys_children;
4520 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4521 }
4522 
4523 /* ARGSUSED */
4524 static void
4525 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4526 {
4527 	dmu_buf_impl_t *db = vdb;
4528 	blkptr_t *bp_orig = &zio->io_bp_orig;
4529 	blkptr_t *bp = db->db_blkptr;
4530 	objset_t *os = db->db_objset;
4531 	dmu_tx_t *tx = os->os_synctx;
4532 
4533 	ASSERT0(zio->io_error);
4534 	ASSERT(db->db_blkptr == bp);
4535 
4536 	/*
4537 	 * For nopwrites and rewrites we ensure that the bp matches our
4538 	 * original and bypass all the accounting.
4539 	 */
4540 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4541 		ASSERT(BP_EQUAL(bp, bp_orig));
4542 	} else {
4543 		dsl_dataset_t *ds = os->os_dsl_dataset;
4544 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4545 		dsl_dataset_block_born(ds, bp, tx);
4546 	}
4547 
4548 	mutex_enter(&db->db_mtx);
4549 
4550 	DBUF_VERIFY(db);
4551 
4552 	dbuf_dirty_record_t *dr = db->db_data_pending;
4553 	dnode_t *dn = dr->dr_dnode;
4554 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4555 	ASSERT(dr->dr_dbuf == db);
4556 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4557 	list_remove(&db->db_dirty_records, dr);
4558 
4559 #ifdef ZFS_DEBUG
4560 	if (db->db_blkid == DMU_SPILL_BLKID) {
4561 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4562 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4563 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4564 	}
4565 #endif
4566 
4567 	if (db->db_level == 0) {
4568 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4569 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4570 		if (db->db_state != DB_NOFILL) {
4571 			if (dr->dt.dl.dr_data != db->db_buf)
4572 				arc_buf_destroy(dr->dt.dl.dr_data, db);
4573 		}
4574 	} else {
4575 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4576 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4577 		if (!BP_IS_HOLE(db->db_blkptr)) {
4578 			int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4579 			    SPA_BLKPTRSHIFT;
4580 			ASSERT3U(db->db_blkid, <=,
4581 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4582 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4583 			    db->db.db_size);
4584 		}
4585 		mutex_destroy(&dr->dt.di.dr_mtx);
4586 		list_destroy(&dr->dt.di.dr_children);
4587 	}
4588 
4589 	cv_broadcast(&db->db_changed);
4590 	ASSERT(db->db_dirtycnt > 0);
4591 	db->db_dirtycnt -= 1;
4592 	db->db_data_pending = NULL;
4593 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4594 
4595 	/*
4596 	 * If we didn't do a physical write in this ZIO and we
4597 	 * still ended up here, it means that the space of the
4598 	 * dbuf that we just released (and undirtied) above hasn't
4599 	 * been marked as undirtied in the pool's accounting.
4600 	 *
4601 	 * Thus, we undirty that space in the pool's view of the
4602 	 * world here. For physical writes this type of update
4603 	 * happens in dbuf_write_physdone().
4604 	 *
4605 	 * If we did a physical write, cleanup any rounding errors
4606 	 * that came up due to writing multiple copies of a block
4607 	 * on disk [see dbuf_write_physdone()].
4608 	 */
4609 	if (zio->io_phys_children == 0) {
4610 		dsl_pool_undirty_space(dmu_objset_pool(os),
4611 		    dr->dr_accounted, zio->io_txg);
4612 	} else {
4613 		dsl_pool_undirty_space(dmu_objset_pool(os),
4614 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4615 	}
4616 
4617 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
4618 }
4619 
4620 static void
4621 dbuf_write_nofill_ready(zio_t *zio)
4622 {
4623 	dbuf_write_ready(zio, NULL, zio->io_private);
4624 }
4625 
4626 static void
4627 dbuf_write_nofill_done(zio_t *zio)
4628 {
4629 	dbuf_write_done(zio, NULL, zio->io_private);
4630 }
4631 
4632 static void
4633 dbuf_write_override_ready(zio_t *zio)
4634 {
4635 	dbuf_dirty_record_t *dr = zio->io_private;
4636 	dmu_buf_impl_t *db = dr->dr_dbuf;
4637 
4638 	dbuf_write_ready(zio, NULL, db);
4639 }
4640 
4641 static void
4642 dbuf_write_override_done(zio_t *zio)
4643 {
4644 	dbuf_dirty_record_t *dr = zio->io_private;
4645 	dmu_buf_impl_t *db = dr->dr_dbuf;
4646 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4647 
4648 	mutex_enter(&db->db_mtx);
4649 	if (!BP_EQUAL(zio->io_bp, obp)) {
4650 		if (!BP_IS_HOLE(obp))
4651 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4652 		arc_release(dr->dt.dl.dr_data, db);
4653 	}
4654 	mutex_exit(&db->db_mtx);
4655 
4656 	dbuf_write_done(zio, NULL, db);
4657 
4658 	if (zio->io_abd != NULL)
4659 		abd_put(zio->io_abd);
4660 }
4661 
4662 typedef struct dbuf_remap_impl_callback_arg {
4663 	objset_t	*drica_os;
4664 	uint64_t	drica_blk_birth;
4665 	dmu_tx_t	*drica_tx;
4666 } dbuf_remap_impl_callback_arg_t;
4667 
4668 static void
4669 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4670     void *arg)
4671 {
4672 	dbuf_remap_impl_callback_arg_t *drica = arg;
4673 	objset_t *os = drica->drica_os;
4674 	spa_t *spa = dmu_objset_spa(os);
4675 	dmu_tx_t *tx = drica->drica_tx;
4676 
4677 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4678 
4679 	if (os == spa_meta_objset(spa)) {
4680 		spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4681 	} else {
4682 		dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4683 		    size, drica->drica_blk_birth, tx);
4684 	}
4685 }
4686 
4687 static void
4688 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4689 {
4690 	blkptr_t bp_copy = *bp;
4691 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4692 	dbuf_remap_impl_callback_arg_t drica;
4693 
4694 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4695 
4696 	drica.drica_os = dn->dn_objset;
4697 	drica.drica_blk_birth = bp->blk_birth;
4698 	drica.drica_tx = tx;
4699 	if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4700 	    &drica)) {
4701 		/*
4702 		 * If the blkptr being remapped is tracked by a livelist,
4703 		 * then we need to make sure the livelist reflects the update.
4704 		 * First, cancel out the old blkptr by appending a 'FREE'
4705 		 * entry. Next, add an 'ALLOC' to track the new version. This
4706 		 * way we avoid trying to free an inaccurate blkptr at delete.
4707 		 * Note that embedded blkptrs are not tracked in livelists.
4708 		 */
4709 		if (dn->dn_objset != spa_meta_objset(spa)) {
4710 			dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4711 			if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4712 			    bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4713 				ASSERT(!BP_IS_EMBEDDED(bp));
4714 				ASSERT(dsl_dir_is_clone(ds->ds_dir));
4715 				ASSERT(spa_feature_is_enabled(spa,
4716 				    SPA_FEATURE_LIVELIST));
4717 				bplist_append(&ds->ds_dir->dd_pending_frees,
4718 				    bp);
4719 				bplist_append(&ds->ds_dir->dd_pending_allocs,
4720 				    &bp_copy);
4721 			}
4722 		}
4723 
4724 		/*
4725 		 * The db_rwlock prevents dbuf_read_impl() from
4726 		 * dereferencing the BP while we are changing it.  To
4727 		 * avoid lock contention, only grab it when we are actually
4728 		 * changing the BP.
4729 		 */
4730 		if (rw != NULL)
4731 			rw_enter(rw, RW_WRITER);
4732 		*bp = bp_copy;
4733 		if (rw != NULL)
4734 			rw_exit(rw);
4735 	}
4736 }
4737 
4738 /*
4739  * Remap any existing BP's to concrete vdevs, if possible.
4740  */
4741 static void
4742 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4743 {
4744 	spa_t *spa = dmu_objset_spa(db->db_objset);
4745 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4746 
4747 	if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4748 		return;
4749 
4750 	if (db->db_level > 0) {
4751 		blkptr_t *bp = db->db.db_data;
4752 		for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4753 			dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4754 		}
4755 	} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4756 		dnode_phys_t *dnp = db->db.db_data;
4757 		ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4758 		    DMU_OT_DNODE);
4759 		for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4760 		    i += dnp[i].dn_extra_slots + 1) {
4761 			for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4762 				krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4763 				    &dn->dn_dbuf->db_rwlock);
4764 				dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4765 				    tx);
4766 			}
4767 		}
4768 	}
4769 }
4770 
4771 
4772 /* Issue I/O to commit a dirty buffer to disk. */
4773 static void
4774 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
4775 {
4776 	dmu_buf_impl_t *db = dr->dr_dbuf;
4777 	dnode_t *dn = dr->dr_dnode;
4778 	objset_t *os;
4779 	dmu_buf_impl_t *parent = db->db_parent;
4780 	uint64_t txg = tx->tx_txg;
4781 	zbookmark_phys_t zb;
4782 	zio_prop_t zp;
4783 	zio_t *pio; /* parent I/O */
4784 	int wp_flag = 0;
4785 
4786 	ASSERT(dmu_tx_is_syncing(tx));
4787 
4788 	os = dn->dn_objset;
4789 
4790 	if (db->db_state != DB_NOFILL) {
4791 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
4792 			/*
4793 			 * Private object buffers are released here rather
4794 			 * than in dbuf_dirty() since they are only modified
4795 			 * in the syncing context and we don't want the
4796 			 * overhead of making multiple copies of the data.
4797 			 */
4798 			if (BP_IS_HOLE(db->db_blkptr)) {
4799 				arc_buf_thaw(data);
4800 			} else {
4801 				dbuf_release_bp(db);
4802 			}
4803 			dbuf_remap(dn, db, tx);
4804 		}
4805 	}
4806 
4807 	if (parent != dn->dn_dbuf) {
4808 		/* Our parent is an indirect block. */
4809 		/* We have a dirty parent that has been scheduled for write. */
4810 		ASSERT(parent && parent->db_data_pending);
4811 		/* Our parent's buffer is one level closer to the dnode. */
4812 		ASSERT(db->db_level == parent->db_level-1);
4813 		/*
4814 		 * We're about to modify our parent's db_data by modifying
4815 		 * our block pointer, so the parent must be released.
4816 		 */
4817 		ASSERT(arc_released(parent->db_buf));
4818 		pio = parent->db_data_pending->dr_zio;
4819 	} else {
4820 		/* Our parent is the dnode itself. */
4821 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
4822 		    db->db_blkid != DMU_SPILL_BLKID) ||
4823 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
4824 		if (db->db_blkid != DMU_SPILL_BLKID)
4825 			ASSERT3P(db->db_blkptr, ==,
4826 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
4827 		pio = dn->dn_zio;
4828 	}
4829 
4830 	ASSERT(db->db_level == 0 || data == db->db_buf);
4831 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
4832 	ASSERT(pio);
4833 
4834 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
4835 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
4836 	    db->db.db_object, db->db_level, db->db_blkid);
4837 
4838 	if (db->db_blkid == DMU_SPILL_BLKID)
4839 		wp_flag = WP_SPILL;
4840 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
4841 
4842 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
4843 
4844 	/*
4845 	 * We copy the blkptr now (rather than when we instantiate the dirty
4846 	 * record), because its value can change between open context and
4847 	 * syncing context. We do not need to hold dn_struct_rwlock to read
4848 	 * db_blkptr because we are in syncing context.
4849 	 */
4850 	dr->dr_bp_copy = *db->db_blkptr;
4851 
4852 	if (db->db_level == 0 &&
4853 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
4854 		/*
4855 		 * The BP for this block has been provided by open context
4856 		 * (by dmu_sync() or dmu_buf_write_embedded()).
4857 		 */
4858 		abd_t *contents = (data != NULL) ?
4859 		    abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
4860 
4861 		dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
4862 		    contents, db->db.db_size, db->db.db_size, &zp,
4863 		    dbuf_write_override_ready, NULL, NULL,
4864 		    dbuf_write_override_done,
4865 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
4866 		mutex_enter(&db->db_mtx);
4867 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
4868 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
4869 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
4870 		mutex_exit(&db->db_mtx);
4871 	} else if (db->db_state == DB_NOFILL) {
4872 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
4873 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
4874 		dr->dr_zio = zio_write(pio, os->os_spa, txg,
4875 		    &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
4876 		    dbuf_write_nofill_ready, NULL, NULL,
4877 		    dbuf_write_nofill_done, db,
4878 		    ZIO_PRIORITY_ASYNC_WRITE,
4879 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
4880 	} else {
4881 		ASSERT(arc_released(data));
4882 
4883 		/*
4884 		 * For indirect blocks, we want to setup the children
4885 		 * ready callback so that we can properly handle an indirect
4886 		 * block that only contains holes.
4887 		 */
4888 		arc_write_done_func_t *children_ready_cb = NULL;
4889 		if (db->db_level != 0)
4890 			children_ready_cb = dbuf_write_children_ready;
4891 
4892 		dr->dr_zio = arc_write(pio, os->os_spa, txg,
4893 		    &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
4894 		    &zp, dbuf_write_ready,
4895 		    children_ready_cb, dbuf_write_physdone,
4896 		    dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
4897 		    ZIO_FLAG_MUSTSUCCEED, &zb);
4898 	}
4899 }
4900 
4901 EXPORT_SYMBOL(dbuf_find);
4902 EXPORT_SYMBOL(dbuf_is_metadata);
4903 EXPORT_SYMBOL(dbuf_destroy);
4904 EXPORT_SYMBOL(dbuf_loan_arcbuf);
4905 EXPORT_SYMBOL(dbuf_whichblock);
4906 EXPORT_SYMBOL(dbuf_read);
4907 EXPORT_SYMBOL(dbuf_unoverride);
4908 EXPORT_SYMBOL(dbuf_free_range);
4909 EXPORT_SYMBOL(dbuf_new_size);
4910 EXPORT_SYMBOL(dbuf_release_bp);
4911 EXPORT_SYMBOL(dbuf_dirty);
4912 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
4913 EXPORT_SYMBOL(dmu_buf_will_dirty);
4914 EXPORT_SYMBOL(dmu_buf_is_dirty);
4915 EXPORT_SYMBOL(dmu_buf_will_not_fill);
4916 EXPORT_SYMBOL(dmu_buf_will_fill);
4917 EXPORT_SYMBOL(dmu_buf_fill_done);
4918 EXPORT_SYMBOL(dmu_buf_rele);
4919 EXPORT_SYMBOL(dbuf_assign_arcbuf);
4920 EXPORT_SYMBOL(dbuf_prefetch);
4921 EXPORT_SYMBOL(dbuf_hold_impl);
4922 EXPORT_SYMBOL(dbuf_hold);
4923 EXPORT_SYMBOL(dbuf_hold_level);
4924 EXPORT_SYMBOL(dbuf_create_bonus);
4925 EXPORT_SYMBOL(dbuf_spill_set_blksz);
4926 EXPORT_SYMBOL(dbuf_rm_spill);
4927 EXPORT_SYMBOL(dbuf_add_ref);
4928 EXPORT_SYMBOL(dbuf_rele);
4929 EXPORT_SYMBOL(dbuf_rele_and_unlock);
4930 EXPORT_SYMBOL(dbuf_refcount);
4931 EXPORT_SYMBOL(dbuf_sync_list);
4932 EXPORT_SYMBOL(dmu_buf_set_user);
4933 EXPORT_SYMBOL(dmu_buf_set_user_ie);
4934 EXPORT_SYMBOL(dmu_buf_get_user);
4935 EXPORT_SYMBOL(dmu_buf_get_blkptr);
4936 
4937 /* BEGIN CSTYLED */
4938 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, ULONG, ZMOD_RW,
4939 	"Maximum size in bytes of the dbuf cache.");
4940 
4941 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
4942 	"Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
4943 	"directly.");
4944 
4945 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
4946 	"Percentage below dbuf_cache_max_bytes when the evict thread stops "
4947 	"evicting dbufs.");
4948 
4949 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
4950 	"Maximum size in bytes of the dbuf metadata cache.");
4951 
4952 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW,
4953 	"Set the size of the dbuf cache to a log2 fraction of arc size.");
4954 
4955 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW,
4956 	"Set the size of the dbuf metadata cache to a log2 fraction of arc "
4957 	"size.");
4958 /* END CSTYLED */
4959