xref: /freebsd/sys/contrib/openzfs/module/zfs/dbuf.c (revision 5fb307d29b364982acbde82cbf77db3cae486f8c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2019, Klara Inc.
28  * Copyright (c) 2019, Allan Jude
29  * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
59 
60 static kstat_t *dbuf_ksp;
61 
62 typedef struct dbuf_stats {
63 	/*
64 	 * Various statistics about the size of the dbuf cache.
65 	 */
66 	kstat_named_t cache_count;
67 	kstat_named_t cache_size_bytes;
68 	kstat_named_t cache_size_bytes_max;
69 	/*
70 	 * Statistics regarding the bounds on the dbuf cache size.
71 	 */
72 	kstat_named_t cache_target_bytes;
73 	kstat_named_t cache_lowater_bytes;
74 	kstat_named_t cache_hiwater_bytes;
75 	/*
76 	 * Total number of dbuf cache evictions that have occurred.
77 	 */
78 	kstat_named_t cache_total_evicts;
79 	/*
80 	 * The distribution of dbuf levels in the dbuf cache and
81 	 * the total size of all dbufs at each level.
82 	 */
83 	kstat_named_t cache_levels[DN_MAX_LEVELS];
84 	kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
85 	/*
86 	 * Statistics about the dbuf hash table.
87 	 */
88 	kstat_named_t hash_hits;
89 	kstat_named_t hash_misses;
90 	kstat_named_t hash_collisions;
91 	kstat_named_t hash_elements;
92 	kstat_named_t hash_elements_max;
93 	/*
94 	 * Number of sublists containing more than one dbuf in the dbuf
95 	 * hash table. Keep track of the longest hash chain.
96 	 */
97 	kstat_named_t hash_chains;
98 	kstat_named_t hash_chain_max;
99 	/*
100 	 * Number of times a dbuf_create() discovers that a dbuf was
101 	 * already created and in the dbuf hash table.
102 	 */
103 	kstat_named_t hash_insert_race;
104 	/*
105 	 * Number of entries in the hash table dbuf and mutex arrays.
106 	 */
107 	kstat_named_t hash_table_count;
108 	kstat_named_t hash_mutex_count;
109 	/*
110 	 * Statistics about the size of the metadata dbuf cache.
111 	 */
112 	kstat_named_t metadata_cache_count;
113 	kstat_named_t metadata_cache_size_bytes;
114 	kstat_named_t metadata_cache_size_bytes_max;
115 	/*
116 	 * For diagnostic purposes, this is incremented whenever we can't add
117 	 * something to the metadata cache because it's full, and instead put
118 	 * the data in the regular dbuf cache.
119 	 */
120 	kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122 
123 dbuf_stats_t dbuf_stats = {
124 	{ "cache_count",			KSTAT_DATA_UINT64 },
125 	{ "cache_size_bytes",			KSTAT_DATA_UINT64 },
126 	{ "cache_size_bytes_max",		KSTAT_DATA_UINT64 },
127 	{ "cache_target_bytes",			KSTAT_DATA_UINT64 },
128 	{ "cache_lowater_bytes",		KSTAT_DATA_UINT64 },
129 	{ "cache_hiwater_bytes",		KSTAT_DATA_UINT64 },
130 	{ "cache_total_evicts",			KSTAT_DATA_UINT64 },
131 	{ { "cache_levels_N",			KSTAT_DATA_UINT64 } },
132 	{ { "cache_levels_bytes_N",		KSTAT_DATA_UINT64 } },
133 	{ "hash_hits",				KSTAT_DATA_UINT64 },
134 	{ "hash_misses",			KSTAT_DATA_UINT64 },
135 	{ "hash_collisions",			KSTAT_DATA_UINT64 },
136 	{ "hash_elements",			KSTAT_DATA_UINT64 },
137 	{ "hash_elements_max",			KSTAT_DATA_UINT64 },
138 	{ "hash_chains",			KSTAT_DATA_UINT64 },
139 	{ "hash_chain_max",			KSTAT_DATA_UINT64 },
140 	{ "hash_insert_race",			KSTAT_DATA_UINT64 },
141 	{ "hash_table_count",			KSTAT_DATA_UINT64 },
142 	{ "hash_mutex_count",			KSTAT_DATA_UINT64 },
143 	{ "metadata_cache_count",		KSTAT_DATA_UINT64 },
144 	{ "metadata_cache_size_bytes",		KSTAT_DATA_UINT64 },
145 	{ "metadata_cache_size_bytes_max",	KSTAT_DATA_UINT64 },
146 	{ "metadata_cache_overflow",		KSTAT_DATA_UINT64 }
147 };
148 
149 struct {
150 	wmsum_t cache_count;
151 	wmsum_t cache_total_evicts;
152 	wmsum_t cache_levels[DN_MAX_LEVELS];
153 	wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 	wmsum_t hash_hits;
155 	wmsum_t hash_misses;
156 	wmsum_t hash_collisions;
157 	wmsum_t hash_chains;
158 	wmsum_t hash_insert_race;
159 	wmsum_t metadata_cache_count;
160 	wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162 
163 #define	DBUF_STAT_INCR(stat, val)	\
164 	wmsum_add(&dbuf_sums.stat, val);
165 #define	DBUF_STAT_DECR(stat, val)	\
166 	DBUF_STAT_INCR(stat, -(val));
167 #define	DBUF_STAT_BUMP(stat)		\
168 	DBUF_STAT_INCR(stat, 1);
169 #define	DBUF_STAT_BUMPDOWN(stat)	\
170 	DBUF_STAT_INCR(stat, -1);
171 #define	DBUF_STAT_MAX(stat, v) {					\
172 	uint64_t _m;							\
173 	while ((v) > (_m = dbuf_stats.stat.value.ui64) &&		\
174 	    (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 		continue;						\
176 }
177 
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
181 
182 /*
183  * Global data structures and functions for the dbuf cache.
184  */
185 static kmem_cache_t *dbuf_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
187 
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
192 
193 /*
194  * There are two dbuf caches; each dbuf can only be in one of them at a time.
195  *
196  * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197  *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198  *    that represent the metadata that describes filesystems/snapshots/
199  *    bookmarks/properties/etc. We only evict from this cache when we export a
200  *    pool, to short-circuit as much I/O as possible for all administrative
201  *    commands that need the metadata. There is no eviction policy for this
202  *    cache, because we try to only include types in it which would occupy a
203  *    very small amount of space per object but create a large impact on the
204  *    performance of these commands. Instead, after it reaches a maximum size
205  *    (which should only happen on very small memory systems with a very large
206  *    number of filesystem objects), we stop taking new dbufs into the
207  *    metadata cache, instead putting them in the normal dbuf cache.
208  *
209  * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210  *    are not currently held but have been recently released. These dbufs
211  *    are not eligible for arc eviction until they are aged out of the cache.
212  *    Dbufs that are aged out of the cache will be immediately destroyed and
213  *    become eligible for arc eviction.
214  *
215  * Dbufs are added to these caches once the last hold is released. If a dbuf is
216  * later accessed and still exists in the dbuf cache, then it will be removed
217  * from the cache and later re-added to the head of the cache.
218  *
219  * If a given dbuf meets the requirements for the metadata cache, it will go
220  * there, otherwise it will be considered for the generic LRU dbuf cache. The
221  * caches and the refcounts tracking their sizes are stored in an array indexed
222  * by those caches' matching enum values (from dbuf_cached_state_t).
223  */
224 typedef struct dbuf_cache {
225 	multilist_t cache;
226 	zfs_refcount_t size ____cacheline_aligned;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229 
230 /* Size limits for the caches */
231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
233 
234 /* Set the default sizes of the caches to log2 fraction of arc size */
235 static uint_t dbuf_cache_shift = 5;
236 static uint_t dbuf_metadata_cache_shift = 6;
237 
238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
239 static uint_t dbuf_mutex_cache_shift = 0;
240 
241 static unsigned long dbuf_cache_target_bytes(void);
242 static unsigned long dbuf_metadata_cache_target_bytes(void);
243 
244 /*
245  * The LRU dbuf cache uses a three-stage eviction policy:
246  *	- A low water marker designates when the dbuf eviction thread
247  *	should stop evicting from the dbuf cache.
248  *	- When we reach the maximum size (aka mid water mark), we
249  *	signal the eviction thread to run.
250  *	- The high water mark indicates when the eviction thread
251  *	is unable to keep up with the incoming load and eviction must
252  *	happen in the context of the calling thread.
253  *
254  * The dbuf cache:
255  *                                                 (max size)
256  *                                      low water   mid water   hi water
257  * +----------------------------------------+----------+----------+
258  * |                                        |          |          |
259  * |                                        |          |          |
260  * |                                        |          |          |
261  * |                                        |          |          |
262  * +----------------------------------------+----------+----------+
263  *                                        stop        signal     evict
264  *                                      evicting     eviction   directly
265  *                                                    thread
266  *
267  * The high and low water marks indicate the operating range for the eviction
268  * thread. The low water mark is, by default, 90% of the total size of the
269  * cache and the high water mark is at 110% (both of these percentages can be
270  * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
271  * respectively). The eviction thread will try to ensure that the cache remains
272  * within this range by waking up every second and checking if the cache is
273  * above the low water mark. The thread can also be woken up by callers adding
274  * elements into the cache if the cache is larger than the mid water (i.e max
275  * cache size). Once the eviction thread is woken up and eviction is required,
276  * it will continue evicting buffers until it's able to reduce the cache size
277  * to the low water mark. If the cache size continues to grow and hits the high
278  * water mark, then callers adding elements to the cache will begin to evict
279  * directly from the cache until the cache is no longer above the high water
280  * mark.
281  */
282 
283 /*
284  * The percentage above and below the maximum cache size.
285  */
286 static uint_t dbuf_cache_hiwater_pct = 10;
287 static uint_t dbuf_cache_lowater_pct = 10;
288 
289 static int
290 dbuf_cons(void *vdb, void *unused, int kmflag)
291 {
292 	(void) unused, (void) kmflag;
293 	dmu_buf_impl_t *db = vdb;
294 	memset(db, 0, sizeof (dmu_buf_impl_t));
295 
296 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
297 	rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
298 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
299 	multilist_link_init(&db->db_cache_link);
300 	zfs_refcount_create(&db->db_holds);
301 
302 	return (0);
303 }
304 
305 static void
306 dbuf_dest(void *vdb, void *unused)
307 {
308 	(void) unused;
309 	dmu_buf_impl_t *db = vdb;
310 	mutex_destroy(&db->db_mtx);
311 	rw_destroy(&db->db_rwlock);
312 	cv_destroy(&db->db_changed);
313 	ASSERT(!multilist_link_active(&db->db_cache_link));
314 	zfs_refcount_destroy(&db->db_holds);
315 }
316 
317 /*
318  * dbuf hash table routines
319  */
320 static dbuf_hash_table_t dbuf_hash_table;
321 
322 /*
323  * We use Cityhash for this. It's fast, and has good hash properties without
324  * requiring any large static buffers.
325  */
326 static uint64_t
327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
328 {
329 	return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
330 }
331 
332 #define	DTRACE_SET_STATE(db, why) \
333 	DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db,	\
334 	    const char *, why)
335 
336 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
337 	((dbuf)->db.db_object == (obj) &&		\
338 	(dbuf)->db_objset == (os) &&			\
339 	(dbuf)->db_level == (level) &&			\
340 	(dbuf)->db_blkid == (blkid))
341 
342 dmu_buf_impl_t *
343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
344     uint64_t *hash_out)
345 {
346 	dbuf_hash_table_t *h = &dbuf_hash_table;
347 	uint64_t hv;
348 	uint64_t idx;
349 	dmu_buf_impl_t *db;
350 
351 	hv = dbuf_hash(os, obj, level, blkid);
352 	idx = hv & h->hash_table_mask;
353 
354 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
355 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
356 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
357 			mutex_enter(&db->db_mtx);
358 			if (db->db_state != DB_EVICTING) {
359 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 				return (db);
361 			}
362 			mutex_exit(&db->db_mtx);
363 		}
364 	}
365 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
366 	if (hash_out != NULL)
367 		*hash_out = hv;
368 	return (NULL);
369 }
370 
371 static dmu_buf_impl_t *
372 dbuf_find_bonus(objset_t *os, uint64_t object)
373 {
374 	dnode_t *dn;
375 	dmu_buf_impl_t *db = NULL;
376 
377 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
378 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
379 		if (dn->dn_bonus != NULL) {
380 			db = dn->dn_bonus;
381 			mutex_enter(&db->db_mtx);
382 		}
383 		rw_exit(&dn->dn_struct_rwlock);
384 		dnode_rele(dn, FTAG);
385 	}
386 	return (db);
387 }
388 
389 /*
390  * Insert an entry into the hash table.  If there is already an element
391  * equal to elem in the hash table, then the already existing element
392  * will be returned and the new element will not be inserted.
393  * Otherwise returns NULL.
394  */
395 static dmu_buf_impl_t *
396 dbuf_hash_insert(dmu_buf_impl_t *db)
397 {
398 	dbuf_hash_table_t *h = &dbuf_hash_table;
399 	objset_t *os = db->db_objset;
400 	uint64_t obj = db->db.db_object;
401 	int level = db->db_level;
402 	uint64_t blkid, idx;
403 	dmu_buf_impl_t *dbf;
404 	uint32_t i;
405 
406 	blkid = db->db_blkid;
407 	ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
408 	idx = db->db_hash & h->hash_table_mask;
409 
410 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
411 	for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
412 	    dbf = dbf->db_hash_next, i++) {
413 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
414 			mutex_enter(&dbf->db_mtx);
415 			if (dbf->db_state != DB_EVICTING) {
416 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
417 				return (dbf);
418 			}
419 			mutex_exit(&dbf->db_mtx);
420 		}
421 	}
422 
423 	if (i > 0) {
424 		DBUF_STAT_BUMP(hash_collisions);
425 		if (i == 1)
426 			DBUF_STAT_BUMP(hash_chains);
427 
428 		DBUF_STAT_MAX(hash_chain_max, i);
429 	}
430 
431 	mutex_enter(&db->db_mtx);
432 	db->db_hash_next = h->hash_table[idx];
433 	h->hash_table[idx] = db;
434 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
435 	uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
436 	DBUF_STAT_MAX(hash_elements_max, he);
437 
438 	return (NULL);
439 }
440 
441 /*
442  * This returns whether this dbuf should be stored in the metadata cache, which
443  * is based on whether it's from one of the dnode types that store data related
444  * to traversing dataset hierarchies.
445  */
446 static boolean_t
447 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
448 {
449 	DB_DNODE_ENTER(db);
450 	dmu_object_type_t type = DB_DNODE(db)->dn_type;
451 	DB_DNODE_EXIT(db);
452 
453 	/* Check if this dbuf is one of the types we care about */
454 	if (DMU_OT_IS_METADATA_CACHED(type)) {
455 		/* If we hit this, then we set something up wrong in dmu_ot */
456 		ASSERT(DMU_OT_IS_METADATA(type));
457 
458 		/*
459 		 * Sanity check for small-memory systems: don't allocate too
460 		 * much memory for this purpose.
461 		 */
462 		if (zfs_refcount_count(
463 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
464 		    dbuf_metadata_cache_target_bytes()) {
465 			DBUF_STAT_BUMP(metadata_cache_overflow);
466 			return (B_FALSE);
467 		}
468 
469 		return (B_TRUE);
470 	}
471 
472 	return (B_FALSE);
473 }
474 
475 /*
476  * Remove an entry from the hash table.  It must be in the EVICTING state.
477  */
478 static void
479 dbuf_hash_remove(dmu_buf_impl_t *db)
480 {
481 	dbuf_hash_table_t *h = &dbuf_hash_table;
482 	uint64_t idx;
483 	dmu_buf_impl_t *dbf, **dbp;
484 
485 	ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
486 	    db->db_blkid), ==, db->db_hash);
487 	idx = db->db_hash & h->hash_table_mask;
488 
489 	/*
490 	 * We mustn't hold db_mtx to maintain lock ordering:
491 	 * DBUF_HASH_MUTEX > db_mtx.
492 	 */
493 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
494 	ASSERT(db->db_state == DB_EVICTING);
495 	ASSERT(!MUTEX_HELD(&db->db_mtx));
496 
497 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
498 	dbp = &h->hash_table[idx];
499 	while ((dbf = *dbp) != db) {
500 		dbp = &dbf->db_hash_next;
501 		ASSERT(dbf != NULL);
502 	}
503 	*dbp = db->db_hash_next;
504 	db->db_hash_next = NULL;
505 	if (h->hash_table[idx] &&
506 	    h->hash_table[idx]->db_hash_next == NULL)
507 		DBUF_STAT_BUMPDOWN(hash_chains);
508 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
509 	atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
510 }
511 
512 typedef enum {
513 	DBVU_EVICTING,
514 	DBVU_NOT_EVICTING
515 } dbvu_verify_type_t;
516 
517 static void
518 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
519 {
520 #ifdef ZFS_DEBUG
521 	int64_t holds;
522 
523 	if (db->db_user == NULL)
524 		return;
525 
526 	/* Only data blocks support the attachment of user data. */
527 	ASSERT(db->db_level == 0);
528 
529 	/* Clients must resolve a dbuf before attaching user data. */
530 	ASSERT(db->db.db_data != NULL);
531 	ASSERT3U(db->db_state, ==, DB_CACHED);
532 
533 	holds = zfs_refcount_count(&db->db_holds);
534 	if (verify_type == DBVU_EVICTING) {
535 		/*
536 		 * Immediate eviction occurs when holds == dirtycnt.
537 		 * For normal eviction buffers, holds is zero on
538 		 * eviction, except when dbuf_fix_old_data() calls
539 		 * dbuf_clear_data().  However, the hold count can grow
540 		 * during eviction even though db_mtx is held (see
541 		 * dmu_bonus_hold() for an example), so we can only
542 		 * test the generic invariant that holds >= dirtycnt.
543 		 */
544 		ASSERT3U(holds, >=, db->db_dirtycnt);
545 	} else {
546 		if (db->db_user_immediate_evict == TRUE)
547 			ASSERT3U(holds, >=, db->db_dirtycnt);
548 		else
549 			ASSERT3U(holds, >, 0);
550 	}
551 #endif
552 }
553 
554 static void
555 dbuf_evict_user(dmu_buf_impl_t *db)
556 {
557 	dmu_buf_user_t *dbu = db->db_user;
558 
559 	ASSERT(MUTEX_HELD(&db->db_mtx));
560 
561 	if (dbu == NULL)
562 		return;
563 
564 	dbuf_verify_user(db, DBVU_EVICTING);
565 	db->db_user = NULL;
566 
567 #ifdef ZFS_DEBUG
568 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
569 		*dbu->dbu_clear_on_evict_dbufp = NULL;
570 #endif
571 
572 	if (db->db_caching_status != DB_NO_CACHE) {
573 		/*
574 		 * This is a cached dbuf, so the size of the user data is
575 		 * included in its cached amount. We adjust it here because the
576 		 * user data has already been detached from the dbuf, and the
577 		 * sync functions are not supposed to touch it (the dbuf might
578 		 * not exist anymore by the time the sync functions run.
579 		 */
580 		uint64_t size = dbu->dbu_size;
581 		(void) zfs_refcount_remove_many(
582 		    &dbuf_caches[db->db_caching_status].size, size, db);
583 		if (db->db_caching_status == DB_DBUF_CACHE)
584 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
585 	}
586 
587 	/*
588 	 * There are two eviction callbacks - one that we call synchronously
589 	 * and one that we invoke via a taskq.  The async one is useful for
590 	 * avoiding lock order reversals and limiting stack depth.
591 	 *
592 	 * Note that if we have a sync callback but no async callback,
593 	 * it's likely that the sync callback will free the structure
594 	 * containing the dbu.  In that case we need to take care to not
595 	 * dereference dbu after calling the sync evict func.
596 	 */
597 	boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
598 
599 	if (dbu->dbu_evict_func_sync != NULL)
600 		dbu->dbu_evict_func_sync(dbu);
601 
602 	if (has_async) {
603 		taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
604 		    dbu, 0, &dbu->dbu_tqent);
605 	}
606 }
607 
608 boolean_t
609 dbuf_is_metadata(dmu_buf_impl_t *db)
610 {
611 	/*
612 	 * Consider indirect blocks and spill blocks to be meta data.
613 	 */
614 	if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
615 		return (B_TRUE);
616 	} else {
617 		boolean_t is_metadata;
618 
619 		DB_DNODE_ENTER(db);
620 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
621 		DB_DNODE_EXIT(db);
622 
623 		return (is_metadata);
624 	}
625 }
626 
627 /*
628  * We want to exclude buffers that are on a special allocation class from
629  * L2ARC.
630  */
631 boolean_t
632 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
633 {
634 	if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
635 	    (db->db_objset->os_secondary_cache ==
636 	    ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
637 		if (l2arc_exclude_special == 0)
638 			return (B_TRUE);
639 
640 		blkptr_t *bp = db->db_blkptr;
641 		if (bp == NULL || BP_IS_HOLE(bp))
642 			return (B_FALSE);
643 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
644 		vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
645 		vdev_t *vd = NULL;
646 
647 		if (vdev < rvd->vdev_children)
648 			vd = rvd->vdev_child[vdev];
649 
650 		if (vd == NULL)
651 			return (B_TRUE);
652 
653 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
654 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
655 			return (B_TRUE);
656 	}
657 	return (B_FALSE);
658 }
659 
660 static inline boolean_t
661 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
662 {
663 	if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
664 	    (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
665 	    (level > 0 ||
666 	    DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
667 		if (l2arc_exclude_special == 0)
668 			return (B_TRUE);
669 
670 		if (bp == NULL || BP_IS_HOLE(bp))
671 			return (B_FALSE);
672 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
673 		vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
674 		vdev_t *vd = NULL;
675 
676 		if (vdev < rvd->vdev_children)
677 			vd = rvd->vdev_child[vdev];
678 
679 		if (vd == NULL)
680 			return (B_TRUE);
681 
682 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
683 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
684 			return (B_TRUE);
685 	}
686 	return (B_FALSE);
687 }
688 
689 
690 /*
691  * This function *must* return indices evenly distributed between all
692  * sublists of the multilist. This is needed due to how the dbuf eviction
693  * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
694  * distributed between all sublists and uses this assumption when
695  * deciding which sublist to evict from and how much to evict from it.
696  */
697 static unsigned int
698 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
699 {
700 	dmu_buf_impl_t *db = obj;
701 
702 	/*
703 	 * The assumption here, is the hash value for a given
704 	 * dmu_buf_impl_t will remain constant throughout it's lifetime
705 	 * (i.e. it's objset, object, level and blkid fields don't change).
706 	 * Thus, we don't need to store the dbuf's sublist index
707 	 * on insertion, as this index can be recalculated on removal.
708 	 *
709 	 * Also, the low order bits of the hash value are thought to be
710 	 * distributed evenly. Otherwise, in the case that the multilist
711 	 * has a power of two number of sublists, each sublists' usage
712 	 * would not be evenly distributed. In this context full 64bit
713 	 * division would be a waste of time, so limit it to 32 bits.
714 	 */
715 	return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
716 	    db->db_level, db->db_blkid) %
717 	    multilist_get_num_sublists(ml));
718 }
719 
720 /*
721  * The target size of the dbuf cache can grow with the ARC target,
722  * unless limited by the tunable dbuf_cache_max_bytes.
723  */
724 static inline unsigned long
725 dbuf_cache_target_bytes(void)
726 {
727 	return (MIN(dbuf_cache_max_bytes,
728 	    arc_target_bytes() >> dbuf_cache_shift));
729 }
730 
731 /*
732  * The target size of the dbuf metadata cache can grow with the ARC target,
733  * unless limited by the tunable dbuf_metadata_cache_max_bytes.
734  */
735 static inline unsigned long
736 dbuf_metadata_cache_target_bytes(void)
737 {
738 	return (MIN(dbuf_metadata_cache_max_bytes,
739 	    arc_target_bytes() >> dbuf_metadata_cache_shift));
740 }
741 
742 static inline uint64_t
743 dbuf_cache_hiwater_bytes(void)
744 {
745 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
746 	return (dbuf_cache_target +
747 	    (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
748 }
749 
750 static inline uint64_t
751 dbuf_cache_lowater_bytes(void)
752 {
753 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
754 	return (dbuf_cache_target -
755 	    (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
756 }
757 
758 static inline boolean_t
759 dbuf_cache_above_lowater(void)
760 {
761 	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
762 	    dbuf_cache_lowater_bytes());
763 }
764 
765 /*
766  * Evict the oldest eligible dbuf from the dbuf cache.
767  */
768 static void
769 dbuf_evict_one(void)
770 {
771 	int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
772 	multilist_sublist_t *mls = multilist_sublist_lock(
773 	    &dbuf_caches[DB_DBUF_CACHE].cache, idx);
774 
775 	ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
776 
777 	dmu_buf_impl_t *db = multilist_sublist_tail(mls);
778 	while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
779 		db = multilist_sublist_prev(mls, db);
780 	}
781 
782 	DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
783 	    multilist_sublist_t *, mls);
784 
785 	if (db != NULL) {
786 		multilist_sublist_remove(mls, db);
787 		multilist_sublist_unlock(mls);
788 		uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db);
789 		(void) zfs_refcount_remove_many(
790 		    &dbuf_caches[DB_DBUF_CACHE].size, size, db);
791 		DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
792 		DBUF_STAT_BUMPDOWN(cache_count);
793 		DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
794 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
795 		db->db_caching_status = DB_NO_CACHE;
796 		dbuf_destroy(db);
797 		DBUF_STAT_BUMP(cache_total_evicts);
798 	} else {
799 		multilist_sublist_unlock(mls);
800 	}
801 }
802 
803 /*
804  * The dbuf evict thread is responsible for aging out dbufs from the
805  * cache. Once the cache has reached it's maximum size, dbufs are removed
806  * and destroyed. The eviction thread will continue running until the size
807  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
808  * out of the cache it is destroyed and becomes eligible for arc eviction.
809  */
810 static __attribute__((noreturn)) void
811 dbuf_evict_thread(void *unused)
812 {
813 	(void) unused;
814 	callb_cpr_t cpr;
815 
816 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
817 
818 	mutex_enter(&dbuf_evict_lock);
819 	while (!dbuf_evict_thread_exit) {
820 		while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
821 			CALLB_CPR_SAFE_BEGIN(&cpr);
822 			(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
823 			    &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
824 			CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
825 		}
826 		mutex_exit(&dbuf_evict_lock);
827 
828 		/*
829 		 * Keep evicting as long as we're above the low water mark
830 		 * for the cache. We do this without holding the locks to
831 		 * minimize lock contention.
832 		 */
833 		while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
834 			dbuf_evict_one();
835 		}
836 
837 		mutex_enter(&dbuf_evict_lock);
838 	}
839 
840 	dbuf_evict_thread_exit = B_FALSE;
841 	cv_broadcast(&dbuf_evict_cv);
842 	CALLB_CPR_EXIT(&cpr);	/* drops dbuf_evict_lock */
843 	thread_exit();
844 }
845 
846 /*
847  * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
848  * If the dbuf cache is at its high water mark, then evict a dbuf from the
849  * dbuf cache using the caller's context.
850  */
851 static void
852 dbuf_evict_notify(uint64_t size)
853 {
854 	/*
855 	 * We check if we should evict without holding the dbuf_evict_lock,
856 	 * because it's OK to occasionally make the wrong decision here,
857 	 * and grabbing the lock results in massive lock contention.
858 	 */
859 	if (size > dbuf_cache_target_bytes()) {
860 		if (size > dbuf_cache_hiwater_bytes())
861 			dbuf_evict_one();
862 		cv_signal(&dbuf_evict_cv);
863 	}
864 }
865 
866 static int
867 dbuf_kstat_update(kstat_t *ksp, int rw)
868 {
869 	dbuf_stats_t *ds = ksp->ks_data;
870 	dbuf_hash_table_t *h = &dbuf_hash_table;
871 
872 	if (rw == KSTAT_WRITE)
873 		return (SET_ERROR(EACCES));
874 
875 	ds->cache_count.value.ui64 =
876 	    wmsum_value(&dbuf_sums.cache_count);
877 	ds->cache_size_bytes.value.ui64 =
878 	    zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
879 	ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
880 	ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
881 	ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
882 	ds->cache_total_evicts.value.ui64 =
883 	    wmsum_value(&dbuf_sums.cache_total_evicts);
884 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
885 		ds->cache_levels[i].value.ui64 =
886 		    wmsum_value(&dbuf_sums.cache_levels[i]);
887 		ds->cache_levels_bytes[i].value.ui64 =
888 		    wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
889 	}
890 	ds->hash_hits.value.ui64 =
891 	    wmsum_value(&dbuf_sums.hash_hits);
892 	ds->hash_misses.value.ui64 =
893 	    wmsum_value(&dbuf_sums.hash_misses);
894 	ds->hash_collisions.value.ui64 =
895 	    wmsum_value(&dbuf_sums.hash_collisions);
896 	ds->hash_chains.value.ui64 =
897 	    wmsum_value(&dbuf_sums.hash_chains);
898 	ds->hash_insert_race.value.ui64 =
899 	    wmsum_value(&dbuf_sums.hash_insert_race);
900 	ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
901 	ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
902 	ds->metadata_cache_count.value.ui64 =
903 	    wmsum_value(&dbuf_sums.metadata_cache_count);
904 	ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
905 	    &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
906 	ds->metadata_cache_overflow.value.ui64 =
907 	    wmsum_value(&dbuf_sums.metadata_cache_overflow);
908 	return (0);
909 }
910 
911 void
912 dbuf_init(void)
913 {
914 	uint64_t hmsize, hsize = 1ULL << 16;
915 	dbuf_hash_table_t *h = &dbuf_hash_table;
916 
917 	/*
918 	 * The hash table is big enough to fill one eighth of physical memory
919 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
920 	 * By default, the table will take up
921 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
922 	 */
923 	while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
924 		hsize <<= 1;
925 
926 	h->hash_table = NULL;
927 	while (h->hash_table == NULL) {
928 		h->hash_table_mask = hsize - 1;
929 
930 		h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
931 		if (h->hash_table == NULL)
932 			hsize >>= 1;
933 
934 		ASSERT3U(hsize, >=, 1ULL << 10);
935 	}
936 
937 	/*
938 	 * The hash table buckets are protected by an array of mutexes where
939 	 * each mutex is reponsible for protecting 128 buckets.  A minimum
940 	 * array size of 8192 is targeted to avoid contention.
941 	 */
942 	if (dbuf_mutex_cache_shift == 0)
943 		hmsize = MAX(hsize >> 7, 1ULL << 13);
944 	else
945 		hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
946 
947 	h->hash_mutexes = NULL;
948 	while (h->hash_mutexes == NULL) {
949 		h->hash_mutex_mask = hmsize - 1;
950 
951 		h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
952 		    KM_SLEEP);
953 		if (h->hash_mutexes == NULL)
954 			hmsize >>= 1;
955 	}
956 
957 	dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
958 	    sizeof (dmu_buf_impl_t),
959 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
960 
961 	for (int i = 0; i < hmsize; i++)
962 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
963 
964 	dbuf_stats_init(h);
965 
966 	/*
967 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
968 	 * configuration is not required.
969 	 */
970 	dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
971 
972 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
973 		multilist_create(&dbuf_caches[dcs].cache,
974 		    sizeof (dmu_buf_impl_t),
975 		    offsetof(dmu_buf_impl_t, db_cache_link),
976 		    dbuf_cache_multilist_index_func);
977 		zfs_refcount_create(&dbuf_caches[dcs].size);
978 	}
979 
980 	dbuf_evict_thread_exit = B_FALSE;
981 	mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
982 	cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
983 	dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
984 	    NULL, 0, &p0, TS_RUN, minclsyspri);
985 
986 	wmsum_init(&dbuf_sums.cache_count, 0);
987 	wmsum_init(&dbuf_sums.cache_total_evicts, 0);
988 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
989 		wmsum_init(&dbuf_sums.cache_levels[i], 0);
990 		wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
991 	}
992 	wmsum_init(&dbuf_sums.hash_hits, 0);
993 	wmsum_init(&dbuf_sums.hash_misses, 0);
994 	wmsum_init(&dbuf_sums.hash_collisions, 0);
995 	wmsum_init(&dbuf_sums.hash_chains, 0);
996 	wmsum_init(&dbuf_sums.hash_insert_race, 0);
997 	wmsum_init(&dbuf_sums.metadata_cache_count, 0);
998 	wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
999 
1000 	dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
1001 	    KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
1002 	    KSTAT_FLAG_VIRTUAL);
1003 	if (dbuf_ksp != NULL) {
1004 		for (int i = 0; i < DN_MAX_LEVELS; i++) {
1005 			snprintf(dbuf_stats.cache_levels[i].name,
1006 			    KSTAT_STRLEN, "cache_level_%d", i);
1007 			dbuf_stats.cache_levels[i].data_type =
1008 			    KSTAT_DATA_UINT64;
1009 			snprintf(dbuf_stats.cache_levels_bytes[i].name,
1010 			    KSTAT_STRLEN, "cache_level_%d_bytes", i);
1011 			dbuf_stats.cache_levels_bytes[i].data_type =
1012 			    KSTAT_DATA_UINT64;
1013 		}
1014 		dbuf_ksp->ks_data = &dbuf_stats;
1015 		dbuf_ksp->ks_update = dbuf_kstat_update;
1016 		kstat_install(dbuf_ksp);
1017 	}
1018 }
1019 
1020 void
1021 dbuf_fini(void)
1022 {
1023 	dbuf_hash_table_t *h = &dbuf_hash_table;
1024 
1025 	dbuf_stats_destroy();
1026 
1027 	for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1028 		mutex_destroy(&h->hash_mutexes[i]);
1029 
1030 	vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1031 	vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1032 	    sizeof (kmutex_t));
1033 
1034 	kmem_cache_destroy(dbuf_kmem_cache);
1035 	taskq_destroy(dbu_evict_taskq);
1036 
1037 	mutex_enter(&dbuf_evict_lock);
1038 	dbuf_evict_thread_exit = B_TRUE;
1039 	while (dbuf_evict_thread_exit) {
1040 		cv_signal(&dbuf_evict_cv);
1041 		cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1042 	}
1043 	mutex_exit(&dbuf_evict_lock);
1044 
1045 	mutex_destroy(&dbuf_evict_lock);
1046 	cv_destroy(&dbuf_evict_cv);
1047 
1048 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1049 		zfs_refcount_destroy(&dbuf_caches[dcs].size);
1050 		multilist_destroy(&dbuf_caches[dcs].cache);
1051 	}
1052 
1053 	if (dbuf_ksp != NULL) {
1054 		kstat_delete(dbuf_ksp);
1055 		dbuf_ksp = NULL;
1056 	}
1057 
1058 	wmsum_fini(&dbuf_sums.cache_count);
1059 	wmsum_fini(&dbuf_sums.cache_total_evicts);
1060 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
1061 		wmsum_fini(&dbuf_sums.cache_levels[i]);
1062 		wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1063 	}
1064 	wmsum_fini(&dbuf_sums.hash_hits);
1065 	wmsum_fini(&dbuf_sums.hash_misses);
1066 	wmsum_fini(&dbuf_sums.hash_collisions);
1067 	wmsum_fini(&dbuf_sums.hash_chains);
1068 	wmsum_fini(&dbuf_sums.hash_insert_race);
1069 	wmsum_fini(&dbuf_sums.metadata_cache_count);
1070 	wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1071 }
1072 
1073 /*
1074  * Other stuff.
1075  */
1076 
1077 #ifdef ZFS_DEBUG
1078 static void
1079 dbuf_verify(dmu_buf_impl_t *db)
1080 {
1081 	dnode_t *dn;
1082 	dbuf_dirty_record_t *dr;
1083 	uint32_t txg_prev;
1084 
1085 	ASSERT(MUTEX_HELD(&db->db_mtx));
1086 
1087 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1088 		return;
1089 
1090 	ASSERT(db->db_objset != NULL);
1091 	DB_DNODE_ENTER(db);
1092 	dn = DB_DNODE(db);
1093 	if (dn == NULL) {
1094 		ASSERT(db->db_parent == NULL);
1095 		ASSERT(db->db_blkptr == NULL);
1096 	} else {
1097 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
1098 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
1099 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
1100 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1101 		    db->db_blkid == DMU_SPILL_BLKID ||
1102 		    !avl_is_empty(&dn->dn_dbufs));
1103 	}
1104 	if (db->db_blkid == DMU_BONUS_BLKID) {
1105 		ASSERT(dn != NULL);
1106 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1107 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1108 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
1109 		ASSERT(dn != NULL);
1110 		ASSERT0(db->db.db_offset);
1111 	} else {
1112 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1113 	}
1114 
1115 	if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1116 		ASSERT(dr->dr_dbuf == db);
1117 		txg_prev = dr->dr_txg;
1118 		for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1119 		    dr = list_next(&db->db_dirty_records, dr)) {
1120 			ASSERT(dr->dr_dbuf == db);
1121 			ASSERT(txg_prev > dr->dr_txg);
1122 			txg_prev = dr->dr_txg;
1123 		}
1124 	}
1125 
1126 	/*
1127 	 * We can't assert that db_size matches dn_datablksz because it
1128 	 * can be momentarily different when another thread is doing
1129 	 * dnode_set_blksz().
1130 	 */
1131 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1132 		dr = db->db_data_pending;
1133 		/*
1134 		 * It should only be modified in syncing context, so
1135 		 * make sure we only have one copy of the data.
1136 		 */
1137 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1138 	}
1139 
1140 	/* verify db->db_blkptr */
1141 	if (db->db_blkptr) {
1142 		if (db->db_parent == dn->dn_dbuf) {
1143 			/* db is pointed to by the dnode */
1144 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1145 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1146 				ASSERT(db->db_parent == NULL);
1147 			else
1148 				ASSERT(db->db_parent != NULL);
1149 			if (db->db_blkid != DMU_SPILL_BLKID)
1150 				ASSERT3P(db->db_blkptr, ==,
1151 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
1152 		} else {
1153 			/* db is pointed to by an indirect block */
1154 			int epb __maybe_unused = db->db_parent->db.db_size >>
1155 			    SPA_BLKPTRSHIFT;
1156 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1157 			ASSERT3U(db->db_parent->db.db_object, ==,
1158 			    db->db.db_object);
1159 			/*
1160 			 * dnode_grow_indblksz() can make this fail if we don't
1161 			 * have the parent's rwlock.  XXX indblksz no longer
1162 			 * grows.  safe to do this now?
1163 			 */
1164 			if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1165 				ASSERT3P(db->db_blkptr, ==,
1166 				    ((blkptr_t *)db->db_parent->db.db_data +
1167 				    db->db_blkid % epb));
1168 			}
1169 		}
1170 	}
1171 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1172 	    (db->db_buf == NULL || db->db_buf->b_data) &&
1173 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1174 	    db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1175 		/*
1176 		 * If the blkptr isn't set but they have nonzero data,
1177 		 * it had better be dirty, otherwise we'll lose that
1178 		 * data when we evict this buffer.
1179 		 *
1180 		 * There is an exception to this rule for indirect blocks; in
1181 		 * this case, if the indirect block is a hole, we fill in a few
1182 		 * fields on each of the child blocks (importantly, birth time)
1183 		 * to prevent hole birth times from being lost when you
1184 		 * partially fill in a hole.
1185 		 */
1186 		if (db->db_dirtycnt == 0) {
1187 			if (db->db_level == 0) {
1188 				uint64_t *buf = db->db.db_data;
1189 				int i;
1190 
1191 				for (i = 0; i < db->db.db_size >> 3; i++) {
1192 					ASSERT(buf[i] == 0);
1193 				}
1194 			} else {
1195 				blkptr_t *bps = db->db.db_data;
1196 				ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1197 				    db->db.db_size);
1198 				/*
1199 				 * We want to verify that all the blkptrs in the
1200 				 * indirect block are holes, but we may have
1201 				 * automatically set up a few fields for them.
1202 				 * We iterate through each blkptr and verify
1203 				 * they only have those fields set.
1204 				 */
1205 				for (int i = 0;
1206 				    i < db->db.db_size / sizeof (blkptr_t);
1207 				    i++) {
1208 					blkptr_t *bp = &bps[i];
1209 					ASSERT(ZIO_CHECKSUM_IS_ZERO(
1210 					    &bp->blk_cksum));
1211 					ASSERT(
1212 					    DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1213 					    DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1214 					    DVA_IS_EMPTY(&bp->blk_dva[2]));
1215 					ASSERT0(bp->blk_fill);
1216 					ASSERT0(bp->blk_pad[0]);
1217 					ASSERT0(bp->blk_pad[1]);
1218 					ASSERT(!BP_IS_EMBEDDED(bp));
1219 					ASSERT(BP_IS_HOLE(bp));
1220 					ASSERT0(bp->blk_phys_birth);
1221 				}
1222 			}
1223 		}
1224 	}
1225 	DB_DNODE_EXIT(db);
1226 }
1227 #endif
1228 
1229 static void
1230 dbuf_clear_data(dmu_buf_impl_t *db)
1231 {
1232 	ASSERT(MUTEX_HELD(&db->db_mtx));
1233 	dbuf_evict_user(db);
1234 	ASSERT3P(db->db_buf, ==, NULL);
1235 	db->db.db_data = NULL;
1236 	if (db->db_state != DB_NOFILL) {
1237 		db->db_state = DB_UNCACHED;
1238 		DTRACE_SET_STATE(db, "clear data");
1239 	}
1240 }
1241 
1242 static void
1243 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1244 {
1245 	ASSERT(MUTEX_HELD(&db->db_mtx));
1246 	ASSERT(buf != NULL);
1247 
1248 	db->db_buf = buf;
1249 	ASSERT(buf->b_data != NULL);
1250 	db->db.db_data = buf->b_data;
1251 }
1252 
1253 static arc_buf_t *
1254 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1255 {
1256 	spa_t *spa = db->db_objset->os_spa;
1257 
1258 	return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1259 }
1260 
1261 /*
1262  * Loan out an arc_buf for read.  Return the loaned arc_buf.
1263  */
1264 arc_buf_t *
1265 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1266 {
1267 	arc_buf_t *abuf;
1268 
1269 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1270 	mutex_enter(&db->db_mtx);
1271 	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1272 		int blksz = db->db.db_size;
1273 		spa_t *spa = db->db_objset->os_spa;
1274 
1275 		mutex_exit(&db->db_mtx);
1276 		abuf = arc_loan_buf(spa, B_FALSE, blksz);
1277 		memcpy(abuf->b_data, db->db.db_data, blksz);
1278 	} else {
1279 		abuf = db->db_buf;
1280 		arc_loan_inuse_buf(abuf, db);
1281 		db->db_buf = NULL;
1282 		dbuf_clear_data(db);
1283 		mutex_exit(&db->db_mtx);
1284 	}
1285 	return (abuf);
1286 }
1287 
1288 /*
1289  * Calculate which level n block references the data at the level 0 offset
1290  * provided.
1291  */
1292 uint64_t
1293 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1294 {
1295 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1296 		/*
1297 		 * The level n blkid is equal to the level 0 blkid divided by
1298 		 * the number of level 0s in a level n block.
1299 		 *
1300 		 * The level 0 blkid is offset >> datablkshift =
1301 		 * offset / 2^datablkshift.
1302 		 *
1303 		 * The number of level 0s in a level n is the number of block
1304 		 * pointers in an indirect block, raised to the power of level.
1305 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1306 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1307 		 *
1308 		 * Thus, the level n blkid is: offset /
1309 		 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1310 		 * = offset / 2^(datablkshift + level *
1311 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1312 		 * = offset >> (datablkshift + level *
1313 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1314 		 */
1315 
1316 		const unsigned exp = dn->dn_datablkshift +
1317 		    level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1318 
1319 		if (exp >= 8 * sizeof (offset)) {
1320 			/* This only happens on the highest indirection level */
1321 			ASSERT3U(level, ==, dn->dn_nlevels - 1);
1322 			return (0);
1323 		}
1324 
1325 		ASSERT3U(exp, <, 8 * sizeof (offset));
1326 
1327 		return (offset >> exp);
1328 	} else {
1329 		ASSERT3U(offset, <, dn->dn_datablksz);
1330 		return (0);
1331 	}
1332 }
1333 
1334 /*
1335  * This function is used to lock the parent of the provided dbuf. This should be
1336  * used when modifying or reading db_blkptr.
1337  */
1338 db_lock_type_t
1339 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1340 {
1341 	enum db_lock_type ret = DLT_NONE;
1342 	if (db->db_parent != NULL) {
1343 		rw_enter(&db->db_parent->db_rwlock, rw);
1344 		ret = DLT_PARENT;
1345 	} else if (dmu_objset_ds(db->db_objset) != NULL) {
1346 		rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1347 		    tag);
1348 		ret = DLT_OBJSET;
1349 	}
1350 	/*
1351 	 * We only return a DLT_NONE lock when it's the top-most indirect block
1352 	 * of the meta-dnode of the MOS.
1353 	 */
1354 	return (ret);
1355 }
1356 
1357 /*
1358  * We need to pass the lock type in because it's possible that the block will
1359  * move from being the topmost indirect block in a dnode (and thus, have no
1360  * parent) to not the top-most via an indirection increase. This would cause a
1361  * panic if we didn't pass the lock type in.
1362  */
1363 void
1364 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1365 {
1366 	if (type == DLT_PARENT)
1367 		rw_exit(&db->db_parent->db_rwlock);
1368 	else if (type == DLT_OBJSET)
1369 		rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1370 }
1371 
1372 static void
1373 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1374     arc_buf_t *buf, void *vdb)
1375 {
1376 	(void) zb, (void) bp;
1377 	dmu_buf_impl_t *db = vdb;
1378 
1379 	mutex_enter(&db->db_mtx);
1380 	ASSERT3U(db->db_state, ==, DB_READ);
1381 	/*
1382 	 * All reads are synchronous, so we must have a hold on the dbuf
1383 	 */
1384 	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1385 	ASSERT(db->db_buf == NULL);
1386 	ASSERT(db->db.db_data == NULL);
1387 	if (buf == NULL) {
1388 		/* i/o error */
1389 		ASSERT(zio == NULL || zio->io_error != 0);
1390 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1391 		ASSERT3P(db->db_buf, ==, NULL);
1392 		db->db_state = DB_UNCACHED;
1393 		DTRACE_SET_STATE(db, "i/o error");
1394 	} else if (db->db_level == 0 && db->db_freed_in_flight) {
1395 		/* freed in flight */
1396 		ASSERT(zio == NULL || zio->io_error == 0);
1397 		arc_release(buf, db);
1398 		memset(buf->b_data, 0, db->db.db_size);
1399 		arc_buf_freeze(buf);
1400 		db->db_freed_in_flight = FALSE;
1401 		dbuf_set_data(db, buf);
1402 		db->db_state = DB_CACHED;
1403 		DTRACE_SET_STATE(db, "freed in flight");
1404 	} else {
1405 		/* success */
1406 		ASSERT(zio == NULL || zio->io_error == 0);
1407 		dbuf_set_data(db, buf);
1408 		db->db_state = DB_CACHED;
1409 		DTRACE_SET_STATE(db, "successful read");
1410 	}
1411 	cv_broadcast(&db->db_changed);
1412 	dbuf_rele_and_unlock(db, NULL, B_FALSE);
1413 }
1414 
1415 /*
1416  * Shortcut for performing reads on bonus dbufs.  Returns
1417  * an error if we fail to verify the dnode associated with
1418  * a decrypted block. Otherwise success.
1419  */
1420 static int
1421 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1422 {
1423 	int bonuslen, max_bonuslen, err;
1424 
1425 	err = dbuf_read_verify_dnode_crypt(db, flags);
1426 	if (err)
1427 		return (err);
1428 
1429 	bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1430 	max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1431 	ASSERT(MUTEX_HELD(&db->db_mtx));
1432 	ASSERT(DB_DNODE_HELD(db));
1433 	ASSERT3U(bonuslen, <=, db->db.db_size);
1434 	db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1435 	arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1436 	if (bonuslen < max_bonuslen)
1437 		memset(db->db.db_data, 0, max_bonuslen);
1438 	if (bonuslen)
1439 		memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1440 	db->db_state = DB_CACHED;
1441 	DTRACE_SET_STATE(db, "bonus buffer filled");
1442 	return (0);
1443 }
1444 
1445 static void
1446 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1447 {
1448 	blkptr_t *bps = db->db.db_data;
1449 	uint32_t indbs = 1ULL << dn->dn_indblkshift;
1450 	int n_bps = indbs >> SPA_BLKPTRSHIFT;
1451 
1452 	for (int i = 0; i < n_bps; i++) {
1453 		blkptr_t *bp = &bps[i];
1454 
1455 		ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1456 		BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1457 		    dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1458 		BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1459 		BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1460 		BP_SET_BIRTH(bp, dbbp->blk_birth, 0);
1461 	}
1462 }
1463 
1464 /*
1465  * Handle reads on dbufs that are holes, if necessary.  This function
1466  * requires that the dbuf's mutex is held. Returns success (0) if action
1467  * was taken, ENOENT if no action was taken.
1468  */
1469 static int
1470 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1471 {
1472 	ASSERT(MUTEX_HELD(&db->db_mtx));
1473 
1474 	int is_hole = bp == NULL || BP_IS_HOLE(bp);
1475 	/*
1476 	 * For level 0 blocks only, if the above check fails:
1477 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1478 	 * processes the delete record and clears the bp while we are waiting
1479 	 * for the dn_mtx (resulting in a "no" from block_freed).
1480 	 */
1481 	if (!is_hole && db->db_level == 0)
1482 		is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1483 
1484 	if (is_hole) {
1485 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1486 		memset(db->db.db_data, 0, db->db.db_size);
1487 
1488 		if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1489 		    bp->blk_birth != 0) {
1490 			dbuf_handle_indirect_hole(db, dn, bp);
1491 		}
1492 		db->db_state = DB_CACHED;
1493 		DTRACE_SET_STATE(db, "hole read satisfied");
1494 		return (0);
1495 	}
1496 	return (ENOENT);
1497 }
1498 
1499 /*
1500  * This function ensures that, when doing a decrypting read of a block,
1501  * we make sure we have decrypted the dnode associated with it. We must do
1502  * this so that we ensure we are fully authenticating the checksum-of-MACs
1503  * tree from the root of the objset down to this block. Indirect blocks are
1504  * always verified against their secure checksum-of-MACs assuming that the
1505  * dnode containing them is correct. Now that we are doing a decrypting read,
1506  * we can be sure that the key is loaded and verify that assumption. This is
1507  * especially important considering that we always read encrypted dnode
1508  * blocks as raw data (without verifying their MACs) to start, and
1509  * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1510  */
1511 static int
1512 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1513 {
1514 	int err = 0;
1515 	objset_t *os = db->db_objset;
1516 	arc_buf_t *dnode_abuf;
1517 	dnode_t *dn;
1518 	zbookmark_phys_t zb;
1519 
1520 	ASSERT(MUTEX_HELD(&db->db_mtx));
1521 
1522 	if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1523 	    !os->os_encrypted || os->os_raw_receive)
1524 		return (0);
1525 
1526 	DB_DNODE_ENTER(db);
1527 	dn = DB_DNODE(db);
1528 	dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1529 
1530 	if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1531 		DB_DNODE_EXIT(db);
1532 		return (0);
1533 	}
1534 
1535 	SET_BOOKMARK(&zb, dmu_objset_id(os),
1536 	    DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1537 	err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1538 
1539 	/*
1540 	 * An error code of EACCES tells us that the key is still not
1541 	 * available. This is ok if we are only reading authenticated
1542 	 * (and therefore non-encrypted) blocks.
1543 	 */
1544 	if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1545 	    !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1546 	    (db->db_blkid == DMU_BONUS_BLKID &&
1547 	    !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1548 		err = 0;
1549 
1550 	DB_DNODE_EXIT(db);
1551 
1552 	return (err);
1553 }
1554 
1555 /*
1556  * Drops db_mtx and the parent lock specified by dblt and tag before
1557  * returning.
1558  */
1559 static int
1560 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1561     db_lock_type_t dblt, const void *tag)
1562 {
1563 	dnode_t *dn;
1564 	zbookmark_phys_t zb;
1565 	uint32_t aflags = ARC_FLAG_NOWAIT;
1566 	int err, zio_flags;
1567 	blkptr_t bp, *bpp;
1568 
1569 	DB_DNODE_ENTER(db);
1570 	dn = DB_DNODE(db);
1571 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1572 	ASSERT(MUTEX_HELD(&db->db_mtx));
1573 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1574 	ASSERT(db->db_buf == NULL);
1575 	ASSERT(db->db_parent == NULL ||
1576 	    RW_LOCK_HELD(&db->db_parent->db_rwlock));
1577 
1578 	if (db->db_blkid == DMU_BONUS_BLKID) {
1579 		err = dbuf_read_bonus(db, dn, flags);
1580 		goto early_unlock;
1581 	}
1582 
1583 	if (db->db_state == DB_UNCACHED) {
1584 		if (db->db_blkptr == NULL) {
1585 			bpp = NULL;
1586 		} else {
1587 			bp = *db->db_blkptr;
1588 			bpp = &bp;
1589 		}
1590 	} else {
1591 		dbuf_dirty_record_t *dr;
1592 
1593 		ASSERT3S(db->db_state, ==, DB_NOFILL);
1594 
1595 		/*
1596 		 * Block cloning: If we have a pending block clone,
1597 		 * we don't want to read the underlying block, but the content
1598 		 * of the block being cloned, so we have the most recent data.
1599 		 */
1600 		dr = list_head(&db->db_dirty_records);
1601 		if (dr == NULL || !dr->dt.dl.dr_brtwrite) {
1602 			err = EIO;
1603 			goto early_unlock;
1604 		}
1605 		bp = dr->dt.dl.dr_overridden_by;
1606 		bpp = &bp;
1607 	}
1608 
1609 	err = dbuf_read_hole(db, dn, bpp);
1610 	if (err == 0)
1611 		goto early_unlock;
1612 
1613 	ASSERT(bpp != NULL);
1614 
1615 	/*
1616 	 * Any attempt to read a redacted block should result in an error. This
1617 	 * will never happen under normal conditions, but can be useful for
1618 	 * debugging purposes.
1619 	 */
1620 	if (BP_IS_REDACTED(bpp)) {
1621 		ASSERT(dsl_dataset_feature_is_active(
1622 		    db->db_objset->os_dsl_dataset,
1623 		    SPA_FEATURE_REDACTED_DATASETS));
1624 		err = SET_ERROR(EIO);
1625 		goto early_unlock;
1626 	}
1627 
1628 	SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1629 	    db->db.db_object, db->db_level, db->db_blkid);
1630 
1631 	/*
1632 	 * All bps of an encrypted os should have the encryption bit set.
1633 	 * If this is not true it indicates tampering and we report an error.
1634 	 */
1635 	if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
1636 		spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth);
1637 		zfs_panic_recover("unencrypted block in encrypted "
1638 		    "object set %llu", dmu_objset_id(db->db_objset));
1639 		err = SET_ERROR(EIO);
1640 		goto early_unlock;
1641 	}
1642 
1643 	err = dbuf_read_verify_dnode_crypt(db, flags);
1644 	if (err != 0)
1645 		goto early_unlock;
1646 
1647 	DB_DNODE_EXIT(db);
1648 
1649 	db->db_state = DB_READ;
1650 	DTRACE_SET_STATE(db, "read issued");
1651 	mutex_exit(&db->db_mtx);
1652 
1653 	if (!DBUF_IS_CACHEABLE(db))
1654 		aflags |= ARC_FLAG_UNCACHED;
1655 	else if (dbuf_is_l2cacheable(db))
1656 		aflags |= ARC_FLAG_L2CACHE;
1657 
1658 	dbuf_add_ref(db, NULL);
1659 
1660 	zio_flags = (flags & DB_RF_CANFAIL) ?
1661 	    ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1662 
1663 	if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1664 		zio_flags |= ZIO_FLAG_RAW;
1665 	/*
1666 	 * The zio layer will copy the provided blkptr later, but we have our
1667 	 * own copy so that we can release the parent's rwlock. We have to
1668 	 * do that so that if dbuf_read_done is called synchronously (on
1669 	 * an l1 cache hit) we don't acquire the db_mtx while holding the
1670 	 * parent's rwlock, which would be a lock ordering violation.
1671 	 */
1672 	dmu_buf_unlock_parent(db, dblt, tag);
1673 	(void) arc_read(zio, db->db_objset->os_spa, bpp,
1674 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1675 	    &aflags, &zb);
1676 	return (err);
1677 early_unlock:
1678 	DB_DNODE_EXIT(db);
1679 	mutex_exit(&db->db_mtx);
1680 	dmu_buf_unlock_parent(db, dblt, tag);
1681 	return (err);
1682 }
1683 
1684 /*
1685  * This is our just-in-time copy function.  It makes a copy of buffers that
1686  * have been modified in a previous transaction group before we access them in
1687  * the current active group.
1688  *
1689  * This function is used in three places: when we are dirtying a buffer for the
1690  * first time in a txg, when we are freeing a range in a dnode that includes
1691  * this buffer, and when we are accessing a buffer which was received compressed
1692  * and later referenced in a WRITE_BYREF record.
1693  *
1694  * Note that when we are called from dbuf_free_range() we do not put a hold on
1695  * the buffer, we just traverse the active dbuf list for the dnode.
1696  */
1697 static void
1698 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1699 {
1700 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1701 
1702 	ASSERT(MUTEX_HELD(&db->db_mtx));
1703 	ASSERT(db->db.db_data != NULL);
1704 	ASSERT(db->db_level == 0);
1705 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1706 
1707 	if (dr == NULL ||
1708 	    (dr->dt.dl.dr_data !=
1709 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1710 		return;
1711 
1712 	/*
1713 	 * If the last dirty record for this dbuf has not yet synced
1714 	 * and its referencing the dbuf data, either:
1715 	 *	reset the reference to point to a new copy,
1716 	 * or (if there a no active holders)
1717 	 *	just null out the current db_data pointer.
1718 	 */
1719 	ASSERT3U(dr->dr_txg, >=, txg - 2);
1720 	if (db->db_blkid == DMU_BONUS_BLKID) {
1721 		dnode_t *dn = DB_DNODE(db);
1722 		int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1723 		dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1724 		arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1725 		memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1726 	} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1727 		dnode_t *dn = DB_DNODE(db);
1728 		int size = arc_buf_size(db->db_buf);
1729 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1730 		spa_t *spa = db->db_objset->os_spa;
1731 		enum zio_compress compress_type =
1732 		    arc_get_compression(db->db_buf);
1733 		uint8_t complevel = arc_get_complevel(db->db_buf);
1734 
1735 		if (arc_is_encrypted(db->db_buf)) {
1736 			boolean_t byteorder;
1737 			uint8_t salt[ZIO_DATA_SALT_LEN];
1738 			uint8_t iv[ZIO_DATA_IV_LEN];
1739 			uint8_t mac[ZIO_DATA_MAC_LEN];
1740 
1741 			arc_get_raw_params(db->db_buf, &byteorder, salt,
1742 			    iv, mac);
1743 			dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1744 			    dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1745 			    mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1746 			    compress_type, complevel);
1747 		} else if (compress_type != ZIO_COMPRESS_OFF) {
1748 			ASSERT3U(type, ==, ARC_BUFC_DATA);
1749 			dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1750 			    size, arc_buf_lsize(db->db_buf), compress_type,
1751 			    complevel);
1752 		} else {
1753 			dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1754 		}
1755 		memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1756 	} else {
1757 		db->db_buf = NULL;
1758 		dbuf_clear_data(db);
1759 	}
1760 }
1761 
1762 int
1763 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1764 {
1765 	int err = 0;
1766 	boolean_t prefetch;
1767 	dnode_t *dn;
1768 
1769 	/*
1770 	 * We don't have to hold the mutex to check db_state because it
1771 	 * can't be freed while we have a hold on the buffer.
1772 	 */
1773 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1774 
1775 	DB_DNODE_ENTER(db);
1776 	dn = DB_DNODE(db);
1777 
1778 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1779 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
1780 
1781 	mutex_enter(&db->db_mtx);
1782 	if (flags & DB_RF_PARTIAL_FIRST)
1783 		db->db_partial_read = B_TRUE;
1784 	else if (!(flags & DB_RF_PARTIAL_MORE))
1785 		db->db_partial_read = B_FALSE;
1786 	if (db->db_state == DB_CACHED) {
1787 		/*
1788 		 * Ensure that this block's dnode has been decrypted if
1789 		 * the caller has requested decrypted data.
1790 		 */
1791 		err = dbuf_read_verify_dnode_crypt(db, flags);
1792 
1793 		/*
1794 		 * If the arc buf is compressed or encrypted and the caller
1795 		 * requested uncompressed data, we need to untransform it
1796 		 * before returning. We also call arc_untransform() on any
1797 		 * unauthenticated blocks, which will verify their MAC if
1798 		 * the key is now available.
1799 		 */
1800 		if (err == 0 && db->db_buf != NULL &&
1801 		    (flags & DB_RF_NO_DECRYPT) == 0 &&
1802 		    (arc_is_encrypted(db->db_buf) ||
1803 		    arc_is_unauthenticated(db->db_buf) ||
1804 		    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1805 			spa_t *spa = dn->dn_objset->os_spa;
1806 			zbookmark_phys_t zb;
1807 
1808 			SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1809 			    db->db.db_object, db->db_level, db->db_blkid);
1810 			dbuf_fix_old_data(db, spa_syncing_txg(spa));
1811 			err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1812 			dbuf_set_data(db, db->db_buf);
1813 		}
1814 		mutex_exit(&db->db_mtx);
1815 		if (err == 0 && prefetch) {
1816 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1817 			    B_FALSE, flags & DB_RF_HAVESTRUCT);
1818 		}
1819 		DB_DNODE_EXIT(db);
1820 		DBUF_STAT_BUMP(hash_hits);
1821 	} else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
1822 		boolean_t need_wait = B_FALSE;
1823 
1824 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1825 
1826 		if (zio == NULL && (db->db_state == DB_NOFILL ||
1827 		    (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
1828 			spa_t *spa = dn->dn_objset->os_spa;
1829 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1830 			need_wait = B_TRUE;
1831 		}
1832 		err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1833 		/*
1834 		 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1835 		 * for us
1836 		 */
1837 		if (!err && prefetch) {
1838 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1839 			    db->db_state != DB_CACHED,
1840 			    flags & DB_RF_HAVESTRUCT);
1841 		}
1842 
1843 		DB_DNODE_EXIT(db);
1844 		DBUF_STAT_BUMP(hash_misses);
1845 
1846 		/*
1847 		 * If we created a zio_root we must execute it to avoid
1848 		 * leaking it, even if it isn't attached to any work due
1849 		 * to an error in dbuf_read_impl().
1850 		 */
1851 		if (need_wait) {
1852 			if (err == 0)
1853 				err = zio_wait(zio);
1854 			else
1855 				VERIFY0(zio_wait(zio));
1856 		}
1857 	} else {
1858 		/*
1859 		 * Another reader came in while the dbuf was in flight
1860 		 * between UNCACHED and CACHED.  Either a writer will finish
1861 		 * writing the buffer (sending the dbuf to CACHED) or the
1862 		 * first reader's request will reach the read_done callback
1863 		 * and send the dbuf to CACHED.  Otherwise, a failure
1864 		 * occurred and the dbuf went to UNCACHED.
1865 		 */
1866 		mutex_exit(&db->db_mtx);
1867 		if (prefetch) {
1868 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1869 			    B_TRUE, flags & DB_RF_HAVESTRUCT);
1870 		}
1871 		DB_DNODE_EXIT(db);
1872 		DBUF_STAT_BUMP(hash_misses);
1873 
1874 		/* Skip the wait per the caller's request. */
1875 		if ((flags & DB_RF_NEVERWAIT) == 0) {
1876 			mutex_enter(&db->db_mtx);
1877 			while (db->db_state == DB_READ ||
1878 			    db->db_state == DB_FILL) {
1879 				ASSERT(db->db_state == DB_READ ||
1880 				    (flags & DB_RF_HAVESTRUCT) == 0);
1881 				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1882 				    db, zio_t *, zio);
1883 				cv_wait(&db->db_changed, &db->db_mtx);
1884 			}
1885 			if (db->db_state == DB_UNCACHED)
1886 				err = SET_ERROR(EIO);
1887 			mutex_exit(&db->db_mtx);
1888 		}
1889 	}
1890 
1891 	return (err);
1892 }
1893 
1894 static void
1895 dbuf_noread(dmu_buf_impl_t *db)
1896 {
1897 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1898 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1899 	mutex_enter(&db->db_mtx);
1900 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1901 		cv_wait(&db->db_changed, &db->db_mtx);
1902 	if (db->db_state == DB_UNCACHED) {
1903 		ASSERT(db->db_buf == NULL);
1904 		ASSERT(db->db.db_data == NULL);
1905 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1906 		db->db_state = DB_FILL;
1907 		DTRACE_SET_STATE(db, "assigning filled buffer");
1908 	} else if (db->db_state == DB_NOFILL) {
1909 		dbuf_clear_data(db);
1910 	} else {
1911 		ASSERT3U(db->db_state, ==, DB_CACHED);
1912 	}
1913 	mutex_exit(&db->db_mtx);
1914 }
1915 
1916 void
1917 dbuf_unoverride(dbuf_dirty_record_t *dr)
1918 {
1919 	dmu_buf_impl_t *db = dr->dr_dbuf;
1920 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1921 	uint64_t txg = dr->dr_txg;
1922 
1923 	ASSERT(MUTEX_HELD(&db->db_mtx));
1924 	/*
1925 	 * This assert is valid because dmu_sync() expects to be called by
1926 	 * a zilog's get_data while holding a range lock.  This call only
1927 	 * comes from dbuf_dirty() callers who must also hold a range lock.
1928 	 */
1929 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1930 	ASSERT(db->db_level == 0);
1931 
1932 	if (db->db_blkid == DMU_BONUS_BLKID ||
1933 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1934 		return;
1935 
1936 	ASSERT(db->db_data_pending != dr);
1937 
1938 	/* free this block */
1939 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1940 		zio_free(db->db_objset->os_spa, txg, bp);
1941 
1942 	if (dr->dt.dl.dr_brtwrite) {
1943 		ASSERT0P(dr->dt.dl.dr_data);
1944 		dr->dt.dl.dr_data = db->db_buf;
1945 	}
1946 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1947 	dr->dt.dl.dr_nopwrite = B_FALSE;
1948 	dr->dt.dl.dr_brtwrite = B_FALSE;
1949 	dr->dt.dl.dr_has_raw_params = B_FALSE;
1950 
1951 	/*
1952 	 * Release the already-written buffer, so we leave it in
1953 	 * a consistent dirty state.  Note that all callers are
1954 	 * modifying the buffer, so they will immediately do
1955 	 * another (redundant) arc_release().  Therefore, leave
1956 	 * the buf thawed to save the effort of freezing &
1957 	 * immediately re-thawing it.
1958 	 */
1959 	if (dr->dt.dl.dr_data)
1960 		arc_release(dr->dt.dl.dr_data, db);
1961 }
1962 
1963 /*
1964  * Evict (if its unreferenced) or clear (if its referenced) any level-0
1965  * data blocks in the free range, so that any future readers will find
1966  * empty blocks.
1967  */
1968 void
1969 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1970     dmu_tx_t *tx)
1971 {
1972 	dmu_buf_impl_t *db_search;
1973 	dmu_buf_impl_t *db, *db_next;
1974 	uint64_t txg = tx->tx_txg;
1975 	avl_index_t where;
1976 	dbuf_dirty_record_t *dr;
1977 
1978 	if (end_blkid > dn->dn_maxblkid &&
1979 	    !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1980 		end_blkid = dn->dn_maxblkid;
1981 	dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1982 	    (u_longlong_t)end_blkid);
1983 
1984 	db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1985 	db_search->db_level = 0;
1986 	db_search->db_blkid = start_blkid;
1987 	db_search->db_state = DB_SEARCH;
1988 
1989 	mutex_enter(&dn->dn_dbufs_mtx);
1990 	db = avl_find(&dn->dn_dbufs, db_search, &where);
1991 	ASSERT3P(db, ==, NULL);
1992 
1993 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1994 
1995 	for (; db != NULL; db = db_next) {
1996 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
1997 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1998 
1999 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
2000 			break;
2001 		}
2002 		ASSERT3U(db->db_blkid, >=, start_blkid);
2003 
2004 		/* found a level 0 buffer in the range */
2005 		mutex_enter(&db->db_mtx);
2006 		if (dbuf_undirty(db, tx)) {
2007 			/* mutex has been dropped and dbuf destroyed */
2008 			continue;
2009 		}
2010 
2011 		if (db->db_state == DB_UNCACHED ||
2012 		    db->db_state == DB_NOFILL ||
2013 		    db->db_state == DB_EVICTING) {
2014 			ASSERT(db->db.db_data == NULL);
2015 			mutex_exit(&db->db_mtx);
2016 			continue;
2017 		}
2018 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2019 			/* will be handled in dbuf_read_done or dbuf_rele */
2020 			db->db_freed_in_flight = TRUE;
2021 			mutex_exit(&db->db_mtx);
2022 			continue;
2023 		}
2024 		if (zfs_refcount_count(&db->db_holds) == 0) {
2025 			ASSERT(db->db_buf);
2026 			dbuf_destroy(db);
2027 			continue;
2028 		}
2029 		/* The dbuf is referenced */
2030 
2031 		dr = list_head(&db->db_dirty_records);
2032 		if (dr != NULL) {
2033 			if (dr->dr_txg == txg) {
2034 				/*
2035 				 * This buffer is "in-use", re-adjust the file
2036 				 * size to reflect that this buffer may
2037 				 * contain new data when we sync.
2038 				 */
2039 				if (db->db_blkid != DMU_SPILL_BLKID &&
2040 				    db->db_blkid > dn->dn_maxblkid)
2041 					dn->dn_maxblkid = db->db_blkid;
2042 				dbuf_unoverride(dr);
2043 			} else {
2044 				/*
2045 				 * This dbuf is not dirty in the open context.
2046 				 * Either uncache it (if its not referenced in
2047 				 * the open context) or reset its contents to
2048 				 * empty.
2049 				 */
2050 				dbuf_fix_old_data(db, txg);
2051 			}
2052 		}
2053 		/* clear the contents if its cached */
2054 		if (db->db_state == DB_CACHED) {
2055 			ASSERT(db->db.db_data != NULL);
2056 			arc_release(db->db_buf, db);
2057 			rw_enter(&db->db_rwlock, RW_WRITER);
2058 			memset(db->db.db_data, 0, db->db.db_size);
2059 			rw_exit(&db->db_rwlock);
2060 			arc_buf_freeze(db->db_buf);
2061 		}
2062 
2063 		mutex_exit(&db->db_mtx);
2064 	}
2065 
2066 	mutex_exit(&dn->dn_dbufs_mtx);
2067 	kmem_free(db_search, sizeof (dmu_buf_impl_t));
2068 }
2069 
2070 void
2071 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2072 {
2073 	arc_buf_t *buf, *old_buf;
2074 	dbuf_dirty_record_t *dr;
2075 	int osize = db->db.db_size;
2076 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2077 	dnode_t *dn;
2078 
2079 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2080 
2081 	DB_DNODE_ENTER(db);
2082 	dn = DB_DNODE(db);
2083 
2084 	/*
2085 	 * XXX we should be doing a dbuf_read, checking the return
2086 	 * value and returning that up to our callers
2087 	 */
2088 	dmu_buf_will_dirty(&db->db, tx);
2089 
2090 	/* create the data buffer for the new block */
2091 	buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2092 
2093 	/* copy old block data to the new block */
2094 	old_buf = db->db_buf;
2095 	memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2096 	/* zero the remainder */
2097 	if (size > osize)
2098 		memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2099 
2100 	mutex_enter(&db->db_mtx);
2101 	dbuf_set_data(db, buf);
2102 	arc_buf_destroy(old_buf, db);
2103 	db->db.db_size = size;
2104 
2105 	dr = list_head(&db->db_dirty_records);
2106 	/* dirty record added by dmu_buf_will_dirty() */
2107 	VERIFY(dr != NULL);
2108 	if (db->db_level == 0)
2109 		dr->dt.dl.dr_data = buf;
2110 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2111 	ASSERT3U(dr->dr_accounted, ==, osize);
2112 	dr->dr_accounted = size;
2113 	mutex_exit(&db->db_mtx);
2114 
2115 	dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2116 	DB_DNODE_EXIT(db);
2117 }
2118 
2119 void
2120 dbuf_release_bp(dmu_buf_impl_t *db)
2121 {
2122 	objset_t *os __maybe_unused = db->db_objset;
2123 
2124 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2125 	ASSERT(arc_released(os->os_phys_buf) ||
2126 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
2127 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2128 
2129 	(void) arc_release(db->db_buf, db);
2130 }
2131 
2132 /*
2133  * We already have a dirty record for this TXG, and we are being
2134  * dirtied again.
2135  */
2136 static void
2137 dbuf_redirty(dbuf_dirty_record_t *dr)
2138 {
2139 	dmu_buf_impl_t *db = dr->dr_dbuf;
2140 
2141 	ASSERT(MUTEX_HELD(&db->db_mtx));
2142 
2143 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2144 		/*
2145 		 * If this buffer has already been written out,
2146 		 * we now need to reset its state.
2147 		 */
2148 		dbuf_unoverride(dr);
2149 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2150 		    db->db_state != DB_NOFILL) {
2151 			/* Already released on initial dirty, so just thaw. */
2152 			ASSERT(arc_released(db->db_buf));
2153 			arc_buf_thaw(db->db_buf);
2154 		}
2155 	}
2156 }
2157 
2158 dbuf_dirty_record_t *
2159 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2160 {
2161 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2162 	IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2163 	dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2164 	ASSERT(dn->dn_maxblkid >= blkid);
2165 
2166 	dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2167 	list_link_init(&dr->dr_dirty_node);
2168 	list_link_init(&dr->dr_dbuf_node);
2169 	dr->dr_dnode = dn;
2170 	dr->dr_txg = tx->tx_txg;
2171 	dr->dt.dll.dr_blkid = blkid;
2172 	dr->dr_accounted = dn->dn_datablksz;
2173 
2174 	/*
2175 	 * There should not be any dbuf for the block that we're dirtying.
2176 	 * Otherwise the buffer contents could be inconsistent between the
2177 	 * dbuf and the lightweight dirty record.
2178 	 */
2179 	ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2180 	    NULL));
2181 
2182 	mutex_enter(&dn->dn_mtx);
2183 	int txgoff = tx->tx_txg & TXG_MASK;
2184 	if (dn->dn_free_ranges[txgoff] != NULL) {
2185 		range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2186 	}
2187 
2188 	if (dn->dn_nlevels == 1) {
2189 		ASSERT3U(blkid, <, dn->dn_nblkptr);
2190 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2191 		mutex_exit(&dn->dn_mtx);
2192 		rw_exit(&dn->dn_struct_rwlock);
2193 		dnode_setdirty(dn, tx);
2194 	} else {
2195 		mutex_exit(&dn->dn_mtx);
2196 
2197 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2198 		dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2199 		    1, blkid >> epbs, FTAG);
2200 		rw_exit(&dn->dn_struct_rwlock);
2201 		if (parent_db == NULL) {
2202 			kmem_free(dr, sizeof (*dr));
2203 			return (NULL);
2204 		}
2205 		int err = dbuf_read(parent_db, NULL,
2206 		    (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2207 		if (err != 0) {
2208 			dbuf_rele(parent_db, FTAG);
2209 			kmem_free(dr, sizeof (*dr));
2210 			return (NULL);
2211 		}
2212 
2213 		dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2214 		dbuf_rele(parent_db, FTAG);
2215 		mutex_enter(&parent_dr->dt.di.dr_mtx);
2216 		ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2217 		list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2218 		mutex_exit(&parent_dr->dt.di.dr_mtx);
2219 		dr->dr_parent = parent_dr;
2220 	}
2221 
2222 	dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2223 
2224 	return (dr);
2225 }
2226 
2227 dbuf_dirty_record_t *
2228 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2229 {
2230 	dnode_t *dn;
2231 	objset_t *os;
2232 	dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2233 	int txgoff = tx->tx_txg & TXG_MASK;
2234 	boolean_t drop_struct_rwlock = B_FALSE;
2235 
2236 	ASSERT(tx->tx_txg != 0);
2237 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2238 	DMU_TX_DIRTY_BUF(tx, db);
2239 
2240 	DB_DNODE_ENTER(db);
2241 	dn = DB_DNODE(db);
2242 	/*
2243 	 * Shouldn't dirty a regular buffer in syncing context.  Private
2244 	 * objects may be dirtied in syncing context, but only if they
2245 	 * were already pre-dirtied in open context.
2246 	 */
2247 #ifdef ZFS_DEBUG
2248 	if (dn->dn_objset->os_dsl_dataset != NULL) {
2249 		rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2250 		    RW_READER, FTAG);
2251 	}
2252 	ASSERT(!dmu_tx_is_syncing(tx) ||
2253 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2254 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2255 	    dn->dn_objset->os_dsl_dataset == NULL);
2256 	if (dn->dn_objset->os_dsl_dataset != NULL)
2257 		rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2258 #endif
2259 	/*
2260 	 * We make this assert for private objects as well, but after we
2261 	 * check if we're already dirty.  They are allowed to re-dirty
2262 	 * in syncing context.
2263 	 */
2264 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2265 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2266 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2267 
2268 	mutex_enter(&db->db_mtx);
2269 	/*
2270 	 * XXX make this true for indirects too?  The problem is that
2271 	 * transactions created with dmu_tx_create_assigned() from
2272 	 * syncing context don't bother holding ahead.
2273 	 */
2274 	ASSERT(db->db_level != 0 ||
2275 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2276 	    db->db_state == DB_NOFILL);
2277 
2278 	mutex_enter(&dn->dn_mtx);
2279 	dnode_set_dirtyctx(dn, tx, db);
2280 	if (tx->tx_txg > dn->dn_dirty_txg)
2281 		dn->dn_dirty_txg = tx->tx_txg;
2282 	mutex_exit(&dn->dn_mtx);
2283 
2284 	if (db->db_blkid == DMU_SPILL_BLKID)
2285 		dn->dn_have_spill = B_TRUE;
2286 
2287 	/*
2288 	 * If this buffer is already dirty, we're done.
2289 	 */
2290 	dr_head = list_head(&db->db_dirty_records);
2291 	ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2292 	    db->db.db_object == DMU_META_DNODE_OBJECT);
2293 	dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2294 	if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2295 		DB_DNODE_EXIT(db);
2296 
2297 		dbuf_redirty(dr_next);
2298 		mutex_exit(&db->db_mtx);
2299 		return (dr_next);
2300 	}
2301 
2302 	/*
2303 	 * Only valid if not already dirty.
2304 	 */
2305 	ASSERT(dn->dn_object == 0 ||
2306 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2307 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2308 
2309 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
2310 
2311 	/*
2312 	 * We should only be dirtying in syncing context if it's the
2313 	 * mos or we're initializing the os or it's a special object.
2314 	 * However, we are allowed to dirty in syncing context provided
2315 	 * we already dirtied it in open context.  Hence we must make
2316 	 * this assertion only if we're not already dirty.
2317 	 */
2318 	os = dn->dn_objset;
2319 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2320 #ifdef ZFS_DEBUG
2321 	if (dn->dn_objset->os_dsl_dataset != NULL)
2322 		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2323 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2324 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2325 	if (dn->dn_objset->os_dsl_dataset != NULL)
2326 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2327 #endif
2328 	ASSERT(db->db.db_size != 0);
2329 
2330 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2331 
2332 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2333 		dmu_objset_willuse_space(os, db->db.db_size, tx);
2334 	}
2335 
2336 	/*
2337 	 * If this buffer is dirty in an old transaction group we need
2338 	 * to make a copy of it so that the changes we make in this
2339 	 * transaction group won't leak out when we sync the older txg.
2340 	 */
2341 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2342 	list_link_init(&dr->dr_dirty_node);
2343 	list_link_init(&dr->dr_dbuf_node);
2344 	dr->dr_dnode = dn;
2345 	if (db->db_level == 0) {
2346 		void *data_old = db->db_buf;
2347 
2348 		if (db->db_state != DB_NOFILL) {
2349 			if (db->db_blkid == DMU_BONUS_BLKID) {
2350 				dbuf_fix_old_data(db, tx->tx_txg);
2351 				data_old = db->db.db_data;
2352 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2353 				/*
2354 				 * Release the data buffer from the cache so
2355 				 * that we can modify it without impacting
2356 				 * possible other users of this cached data
2357 				 * block.  Note that indirect blocks and
2358 				 * private objects are not released until the
2359 				 * syncing state (since they are only modified
2360 				 * then).
2361 				 */
2362 				arc_release(db->db_buf, db);
2363 				dbuf_fix_old_data(db, tx->tx_txg);
2364 				data_old = db->db_buf;
2365 			}
2366 			ASSERT(data_old != NULL);
2367 		}
2368 		dr->dt.dl.dr_data = data_old;
2369 	} else {
2370 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2371 		list_create(&dr->dt.di.dr_children,
2372 		    sizeof (dbuf_dirty_record_t),
2373 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
2374 	}
2375 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2376 		dr->dr_accounted = db->db.db_size;
2377 	}
2378 	dr->dr_dbuf = db;
2379 	dr->dr_txg = tx->tx_txg;
2380 	list_insert_before(&db->db_dirty_records, dr_next, dr);
2381 
2382 	/*
2383 	 * We could have been freed_in_flight between the dbuf_noread
2384 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
2385 	 * happened after the free.
2386 	 */
2387 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2388 	    db->db_blkid != DMU_SPILL_BLKID) {
2389 		mutex_enter(&dn->dn_mtx);
2390 		if (dn->dn_free_ranges[txgoff] != NULL) {
2391 			range_tree_clear(dn->dn_free_ranges[txgoff],
2392 			    db->db_blkid, 1);
2393 		}
2394 		mutex_exit(&dn->dn_mtx);
2395 		db->db_freed_in_flight = FALSE;
2396 	}
2397 
2398 	/*
2399 	 * This buffer is now part of this txg
2400 	 */
2401 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2402 	db->db_dirtycnt += 1;
2403 	ASSERT3U(db->db_dirtycnt, <=, 3);
2404 
2405 	mutex_exit(&db->db_mtx);
2406 
2407 	if (db->db_blkid == DMU_BONUS_BLKID ||
2408 	    db->db_blkid == DMU_SPILL_BLKID) {
2409 		mutex_enter(&dn->dn_mtx);
2410 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2411 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2412 		mutex_exit(&dn->dn_mtx);
2413 		dnode_setdirty(dn, tx);
2414 		DB_DNODE_EXIT(db);
2415 		return (dr);
2416 	}
2417 
2418 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2419 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2420 		drop_struct_rwlock = B_TRUE;
2421 	}
2422 
2423 	/*
2424 	 * If we are overwriting a dedup BP, then unless it is snapshotted,
2425 	 * when we get to syncing context we will need to decrement its
2426 	 * refcount in the DDT.  Prefetch the relevant DDT block so that
2427 	 * syncing context won't have to wait for the i/o.
2428 	 */
2429 	if (db->db_blkptr != NULL) {
2430 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2431 		ddt_prefetch(os->os_spa, db->db_blkptr);
2432 		dmu_buf_unlock_parent(db, dblt, FTAG);
2433 	}
2434 
2435 	/*
2436 	 * We need to hold the dn_struct_rwlock to make this assertion,
2437 	 * because it protects dn_phys / dn_next_nlevels from changing.
2438 	 */
2439 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2440 	    dn->dn_phys->dn_nlevels > db->db_level ||
2441 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
2442 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2443 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2444 
2445 
2446 	if (db->db_level == 0) {
2447 		ASSERT(!db->db_objset->os_raw_receive ||
2448 		    dn->dn_maxblkid >= db->db_blkid);
2449 		dnode_new_blkid(dn, db->db_blkid, tx,
2450 		    drop_struct_rwlock, B_FALSE);
2451 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
2452 	}
2453 
2454 	if (db->db_level+1 < dn->dn_nlevels) {
2455 		dmu_buf_impl_t *parent = db->db_parent;
2456 		dbuf_dirty_record_t *di;
2457 		int parent_held = FALSE;
2458 
2459 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2460 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2461 			parent = dbuf_hold_level(dn, db->db_level + 1,
2462 			    db->db_blkid >> epbs, FTAG);
2463 			ASSERT(parent != NULL);
2464 			parent_held = TRUE;
2465 		}
2466 		if (drop_struct_rwlock)
2467 			rw_exit(&dn->dn_struct_rwlock);
2468 		ASSERT3U(db->db_level + 1, ==, parent->db_level);
2469 		di = dbuf_dirty(parent, tx);
2470 		if (parent_held)
2471 			dbuf_rele(parent, FTAG);
2472 
2473 		mutex_enter(&db->db_mtx);
2474 		/*
2475 		 * Since we've dropped the mutex, it's possible that
2476 		 * dbuf_undirty() might have changed this out from under us.
2477 		 */
2478 		if (list_head(&db->db_dirty_records) == dr ||
2479 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
2480 			mutex_enter(&di->dt.di.dr_mtx);
2481 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2482 			ASSERT(!list_link_active(&dr->dr_dirty_node));
2483 			list_insert_tail(&di->dt.di.dr_children, dr);
2484 			mutex_exit(&di->dt.di.dr_mtx);
2485 			dr->dr_parent = di;
2486 		}
2487 		mutex_exit(&db->db_mtx);
2488 	} else {
2489 		ASSERT(db->db_level + 1 == dn->dn_nlevels);
2490 		ASSERT(db->db_blkid < dn->dn_nblkptr);
2491 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2492 		mutex_enter(&dn->dn_mtx);
2493 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2494 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2495 		mutex_exit(&dn->dn_mtx);
2496 		if (drop_struct_rwlock)
2497 			rw_exit(&dn->dn_struct_rwlock);
2498 	}
2499 
2500 	dnode_setdirty(dn, tx);
2501 	DB_DNODE_EXIT(db);
2502 	return (dr);
2503 }
2504 
2505 static void
2506 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2507 {
2508 	dmu_buf_impl_t *db = dr->dr_dbuf;
2509 
2510 	if (dr->dt.dl.dr_data != db->db.db_data) {
2511 		struct dnode *dn = dr->dr_dnode;
2512 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2513 
2514 		kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2515 		arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2516 	}
2517 	db->db_data_pending = NULL;
2518 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2519 	list_remove(&db->db_dirty_records, dr);
2520 	if (dr->dr_dbuf->db_level != 0) {
2521 		mutex_destroy(&dr->dt.di.dr_mtx);
2522 		list_destroy(&dr->dt.di.dr_children);
2523 	}
2524 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2525 	ASSERT3U(db->db_dirtycnt, >, 0);
2526 	db->db_dirtycnt -= 1;
2527 }
2528 
2529 /*
2530  * Undirty a buffer in the transaction group referenced by the given
2531  * transaction.  Return whether this evicted the dbuf.
2532  */
2533 boolean_t
2534 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2535 {
2536 	uint64_t txg = tx->tx_txg;
2537 	boolean_t brtwrite;
2538 
2539 	ASSERT(txg != 0);
2540 
2541 	/*
2542 	 * Due to our use of dn_nlevels below, this can only be called
2543 	 * in open context, unless we are operating on the MOS.
2544 	 * From syncing context, dn_nlevels may be different from the
2545 	 * dn_nlevels used when dbuf was dirtied.
2546 	 */
2547 	ASSERT(db->db_objset ==
2548 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2549 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2550 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2551 	ASSERT0(db->db_level);
2552 	ASSERT(MUTEX_HELD(&db->db_mtx));
2553 
2554 	/*
2555 	 * If this buffer is not dirty, we're done.
2556 	 */
2557 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2558 	if (dr == NULL)
2559 		return (B_FALSE);
2560 	ASSERT(dr->dr_dbuf == db);
2561 
2562 	brtwrite = dr->dt.dl.dr_brtwrite;
2563 	if (brtwrite) {
2564 		/*
2565 		 * We are freeing a block that we cloned in the same
2566 		 * transaction group.
2567 		 */
2568 		brt_pending_remove(dmu_objset_spa(db->db_objset),
2569 		    &dr->dt.dl.dr_overridden_by, tx);
2570 	}
2571 
2572 	dnode_t *dn = dr->dr_dnode;
2573 
2574 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2575 
2576 	ASSERT(db->db.db_size != 0);
2577 
2578 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2579 	    dr->dr_accounted, txg);
2580 
2581 	list_remove(&db->db_dirty_records, dr);
2582 
2583 	/*
2584 	 * Note that there are three places in dbuf_dirty()
2585 	 * where this dirty record may be put on a list.
2586 	 * Make sure to do a list_remove corresponding to
2587 	 * every one of those list_insert calls.
2588 	 */
2589 	if (dr->dr_parent) {
2590 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2591 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2592 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2593 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
2594 	    db->db_level + 1 == dn->dn_nlevels) {
2595 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2596 		mutex_enter(&dn->dn_mtx);
2597 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2598 		mutex_exit(&dn->dn_mtx);
2599 	}
2600 
2601 	if (db->db_state != DB_NOFILL && !brtwrite) {
2602 		dbuf_unoverride(dr);
2603 
2604 		ASSERT(db->db_buf != NULL);
2605 		ASSERT(dr->dt.dl.dr_data != NULL);
2606 		if (dr->dt.dl.dr_data != db->db_buf)
2607 			arc_buf_destroy(dr->dt.dl.dr_data, db);
2608 	}
2609 
2610 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2611 
2612 	ASSERT(db->db_dirtycnt > 0);
2613 	db->db_dirtycnt -= 1;
2614 
2615 	if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2616 		ASSERT(db->db_state == DB_NOFILL || brtwrite ||
2617 		    arc_released(db->db_buf));
2618 		dbuf_destroy(db);
2619 		return (B_TRUE);
2620 	}
2621 
2622 	return (B_FALSE);
2623 }
2624 
2625 static void
2626 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2627 {
2628 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2629 	boolean_t undirty = B_FALSE;
2630 
2631 	ASSERT(tx->tx_txg != 0);
2632 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2633 
2634 	/*
2635 	 * Quick check for dirtiness.  For already dirty blocks, this
2636 	 * reduces runtime of this function by >90%, and overall performance
2637 	 * by 50% for some workloads (e.g. file deletion with indirect blocks
2638 	 * cached).
2639 	 */
2640 	mutex_enter(&db->db_mtx);
2641 
2642 	if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
2643 		dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2644 		/*
2645 		 * It's possible that it is already dirty but not cached,
2646 		 * because there are some calls to dbuf_dirty() that don't
2647 		 * go through dmu_buf_will_dirty().
2648 		 */
2649 		if (dr != NULL) {
2650 			if (dr->dt.dl.dr_brtwrite) {
2651 				/*
2652 				 * Block cloning: If we are dirtying a cloned
2653 				 * block, we cannot simply redirty it, because
2654 				 * this dr has no data associated with it.
2655 				 * We will go through a full undirtying below,
2656 				 * before dirtying it again.
2657 				 */
2658 				undirty = B_TRUE;
2659 			} else {
2660 				/* This dbuf is already dirty and cached. */
2661 				dbuf_redirty(dr);
2662 				mutex_exit(&db->db_mtx);
2663 				return;
2664 			}
2665 		}
2666 	}
2667 	mutex_exit(&db->db_mtx);
2668 
2669 	DB_DNODE_ENTER(db);
2670 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2671 		flags |= DB_RF_HAVESTRUCT;
2672 	DB_DNODE_EXIT(db);
2673 
2674 	/*
2675 	 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
2676 	 * want to make sure dbuf_read() will read the pending cloned block and
2677 	 * not the uderlying block that is being replaced. dbuf_undirty() will
2678 	 * do dbuf_unoverride(), so we will end up with cloned block content,
2679 	 * without overridden BP.
2680 	 */
2681 	(void) dbuf_read(db, NULL, flags);
2682 	if (undirty) {
2683 		mutex_enter(&db->db_mtx);
2684 		VERIFY(!dbuf_undirty(db, tx));
2685 		mutex_exit(&db->db_mtx);
2686 	}
2687 	(void) dbuf_dirty(db, tx);
2688 }
2689 
2690 void
2691 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2692 {
2693 	dmu_buf_will_dirty_impl(db_fake,
2694 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2695 }
2696 
2697 boolean_t
2698 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2699 {
2700 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2701 	dbuf_dirty_record_t *dr;
2702 
2703 	mutex_enter(&db->db_mtx);
2704 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2705 	mutex_exit(&db->db_mtx);
2706 	return (dr != NULL);
2707 }
2708 
2709 void
2710 dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx)
2711 {
2712 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2713 
2714 	/*
2715 	 * Block cloning: We are going to clone into this block, so undirty
2716 	 * modifications done to this block so far in this txg. This includes
2717 	 * writes and clones into this block.
2718 	 */
2719 	mutex_enter(&db->db_mtx);
2720 	DBUF_VERIFY(db);
2721 	VERIFY(!dbuf_undirty(db, tx));
2722 	ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg));
2723 	if (db->db_buf != NULL) {
2724 		arc_buf_destroy(db->db_buf, db);
2725 		db->db_buf = NULL;
2726 		dbuf_clear_data(db);
2727 	}
2728 
2729 	db->db_state = DB_NOFILL;
2730 	DTRACE_SET_STATE(db, "allocating NOFILL buffer for clone");
2731 
2732 	DBUF_VERIFY(db);
2733 	mutex_exit(&db->db_mtx);
2734 
2735 	dbuf_noread(db);
2736 	(void) dbuf_dirty(db, tx);
2737 }
2738 
2739 void
2740 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2741 {
2742 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2743 
2744 	mutex_enter(&db->db_mtx);
2745 	db->db_state = DB_NOFILL;
2746 	DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2747 	mutex_exit(&db->db_mtx);
2748 
2749 	dbuf_noread(db);
2750 	(void) dbuf_dirty(db, tx);
2751 }
2752 
2753 void
2754 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2755 {
2756 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2757 
2758 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2759 	ASSERT(tx->tx_txg != 0);
2760 	ASSERT(db->db_level == 0);
2761 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2762 
2763 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2764 	    dmu_tx_private_ok(tx));
2765 
2766 	mutex_enter(&db->db_mtx);
2767 	if (db->db_state == DB_NOFILL) {
2768 		/*
2769 		 * Block cloning: We will be completely overwriting a block
2770 		 * cloned in this transaction group, so let's undirty the
2771 		 * pending clone and mark the block as uncached. This will be
2772 		 * as if the clone was never done.
2773 		 */
2774 		VERIFY(!dbuf_undirty(db, tx));
2775 		db->db_state = DB_UNCACHED;
2776 	}
2777 	mutex_exit(&db->db_mtx);
2778 
2779 	dbuf_noread(db);
2780 	(void) dbuf_dirty(db, tx);
2781 }
2782 
2783 /*
2784  * This function is effectively the same as dmu_buf_will_dirty(), but
2785  * indicates the caller expects raw encrypted data in the db, and provides
2786  * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2787  * blkptr_t when this dbuf is written.  This is only used for blocks of
2788  * dnodes, during raw receive.
2789  */
2790 void
2791 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2792     const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2793 {
2794 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2795 	dbuf_dirty_record_t *dr;
2796 
2797 	/*
2798 	 * dr_has_raw_params is only processed for blocks of dnodes
2799 	 * (see dbuf_sync_dnode_leaf_crypt()).
2800 	 */
2801 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2802 	ASSERT3U(db->db_level, ==, 0);
2803 	ASSERT(db->db_objset->os_raw_receive);
2804 
2805 	dmu_buf_will_dirty_impl(db_fake,
2806 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2807 
2808 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2809 
2810 	ASSERT3P(dr, !=, NULL);
2811 
2812 	dr->dt.dl.dr_has_raw_params = B_TRUE;
2813 	dr->dt.dl.dr_byteorder = byteorder;
2814 	memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2815 	memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2816 	memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2817 }
2818 
2819 static void
2820 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2821 {
2822 	struct dirty_leaf *dl;
2823 	dbuf_dirty_record_t *dr;
2824 
2825 	dr = list_head(&db->db_dirty_records);
2826 	ASSERT3P(dr, !=, NULL);
2827 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2828 	dl = &dr->dt.dl;
2829 	dl->dr_overridden_by = *bp;
2830 	dl->dr_override_state = DR_OVERRIDDEN;
2831 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2832 }
2833 
2834 void
2835 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2836 {
2837 	(void) tx;
2838 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2839 	dbuf_states_t old_state;
2840 	mutex_enter(&db->db_mtx);
2841 	DBUF_VERIFY(db);
2842 
2843 	old_state = db->db_state;
2844 	db->db_state = DB_CACHED;
2845 	if (old_state == DB_FILL) {
2846 		if (db->db_level == 0 && db->db_freed_in_flight) {
2847 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2848 			/* we were freed while filling */
2849 			/* XXX dbuf_undirty? */
2850 			memset(db->db.db_data, 0, db->db.db_size);
2851 			db->db_freed_in_flight = FALSE;
2852 			DTRACE_SET_STATE(db,
2853 			    "fill done handling freed in flight");
2854 		} else {
2855 			DTRACE_SET_STATE(db, "fill done");
2856 		}
2857 		cv_broadcast(&db->db_changed);
2858 	}
2859 	mutex_exit(&db->db_mtx);
2860 }
2861 
2862 void
2863 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2864     bp_embedded_type_t etype, enum zio_compress comp,
2865     int uncompressed_size, int compressed_size, int byteorder,
2866     dmu_tx_t *tx)
2867 {
2868 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2869 	struct dirty_leaf *dl;
2870 	dmu_object_type_t type;
2871 	dbuf_dirty_record_t *dr;
2872 
2873 	if (etype == BP_EMBEDDED_TYPE_DATA) {
2874 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2875 		    SPA_FEATURE_EMBEDDED_DATA));
2876 	}
2877 
2878 	DB_DNODE_ENTER(db);
2879 	type = DB_DNODE(db)->dn_type;
2880 	DB_DNODE_EXIT(db);
2881 
2882 	ASSERT0(db->db_level);
2883 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2884 
2885 	dmu_buf_will_not_fill(dbuf, tx);
2886 
2887 	dr = list_head(&db->db_dirty_records);
2888 	ASSERT3P(dr, !=, NULL);
2889 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2890 	dl = &dr->dt.dl;
2891 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
2892 	    data, comp, uncompressed_size, compressed_size);
2893 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2894 	BP_SET_TYPE(&dl->dr_overridden_by, type);
2895 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2896 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2897 
2898 	dl->dr_override_state = DR_OVERRIDDEN;
2899 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2900 }
2901 
2902 void
2903 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2904 {
2905 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2906 	dmu_object_type_t type;
2907 	ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2908 	    SPA_FEATURE_REDACTED_DATASETS));
2909 
2910 	DB_DNODE_ENTER(db);
2911 	type = DB_DNODE(db)->dn_type;
2912 	DB_DNODE_EXIT(db);
2913 
2914 	ASSERT0(db->db_level);
2915 	dmu_buf_will_not_fill(dbuf, tx);
2916 
2917 	blkptr_t bp = { { { {0} } } };
2918 	BP_SET_TYPE(&bp, type);
2919 	BP_SET_LEVEL(&bp, 0);
2920 	BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2921 	BP_SET_REDACTED(&bp);
2922 	BPE_SET_LSIZE(&bp, dbuf->db_size);
2923 
2924 	dbuf_override_impl(db, &bp, tx);
2925 }
2926 
2927 /*
2928  * Directly assign a provided arc buf to a given dbuf if it's not referenced
2929  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2930  */
2931 void
2932 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2933 {
2934 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2935 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2936 	ASSERT(db->db_level == 0);
2937 	ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2938 	ASSERT(buf != NULL);
2939 	ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2940 	ASSERT(tx->tx_txg != 0);
2941 
2942 	arc_return_buf(buf, db);
2943 	ASSERT(arc_released(buf));
2944 
2945 	mutex_enter(&db->db_mtx);
2946 
2947 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
2948 		cv_wait(&db->db_changed, &db->db_mtx);
2949 
2950 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
2951 	    db->db_state == DB_NOFILL);
2952 
2953 	if (db->db_state == DB_CACHED &&
2954 	    zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2955 		/*
2956 		 * In practice, we will never have a case where we have an
2957 		 * encrypted arc buffer while additional holds exist on the
2958 		 * dbuf. We don't handle this here so we simply assert that
2959 		 * fact instead.
2960 		 */
2961 		ASSERT(!arc_is_encrypted(buf));
2962 		mutex_exit(&db->db_mtx);
2963 		(void) dbuf_dirty(db, tx);
2964 		memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2965 		arc_buf_destroy(buf, db);
2966 		return;
2967 	}
2968 
2969 	if (db->db_state == DB_CACHED) {
2970 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2971 
2972 		ASSERT(db->db_buf != NULL);
2973 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2974 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
2975 
2976 			if (!arc_released(db->db_buf)) {
2977 				ASSERT(dr->dt.dl.dr_override_state ==
2978 				    DR_OVERRIDDEN);
2979 				arc_release(db->db_buf, db);
2980 			}
2981 			dr->dt.dl.dr_data = buf;
2982 			arc_buf_destroy(db->db_buf, db);
2983 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2984 			arc_release(db->db_buf, db);
2985 			arc_buf_destroy(db->db_buf, db);
2986 		}
2987 		db->db_buf = NULL;
2988 	} else if (db->db_state == DB_NOFILL) {
2989 		/*
2990 		 * We will be completely replacing the cloned block.  In case
2991 		 * it was cloned in this transaction group, let's undirty the
2992 		 * pending clone and mark the block as uncached. This will be
2993 		 * as if the clone was never done.
2994 		 */
2995 		VERIFY(!dbuf_undirty(db, tx));
2996 		db->db_state = DB_UNCACHED;
2997 	}
2998 	ASSERT(db->db_buf == NULL);
2999 	dbuf_set_data(db, buf);
3000 	db->db_state = DB_FILL;
3001 	DTRACE_SET_STATE(db, "filling assigned arcbuf");
3002 	mutex_exit(&db->db_mtx);
3003 	(void) dbuf_dirty(db, tx);
3004 	dmu_buf_fill_done(&db->db, tx);
3005 }
3006 
3007 void
3008 dbuf_destroy(dmu_buf_impl_t *db)
3009 {
3010 	dnode_t *dn;
3011 	dmu_buf_impl_t *parent = db->db_parent;
3012 	dmu_buf_impl_t *dndb;
3013 
3014 	ASSERT(MUTEX_HELD(&db->db_mtx));
3015 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3016 
3017 	if (db->db_buf != NULL) {
3018 		arc_buf_destroy(db->db_buf, db);
3019 		db->db_buf = NULL;
3020 	}
3021 
3022 	if (db->db_blkid == DMU_BONUS_BLKID) {
3023 		int slots = DB_DNODE(db)->dn_num_slots;
3024 		int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
3025 		if (db->db.db_data != NULL) {
3026 			kmem_free(db->db.db_data, bonuslen);
3027 			arc_space_return(bonuslen, ARC_SPACE_BONUS);
3028 			db->db_state = DB_UNCACHED;
3029 			DTRACE_SET_STATE(db, "buffer cleared");
3030 		}
3031 	}
3032 
3033 	dbuf_clear_data(db);
3034 
3035 	if (multilist_link_active(&db->db_cache_link)) {
3036 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3037 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3038 
3039 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3040 
3041 		ASSERT0(dmu_buf_user_size(&db->db));
3042 		(void) zfs_refcount_remove_many(
3043 		    &dbuf_caches[db->db_caching_status].size,
3044 		    db->db.db_size, db);
3045 
3046 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3047 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3048 		} else {
3049 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3050 			DBUF_STAT_BUMPDOWN(cache_count);
3051 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3052 			    db->db.db_size);
3053 		}
3054 		db->db_caching_status = DB_NO_CACHE;
3055 	}
3056 
3057 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
3058 	ASSERT(db->db_data_pending == NULL);
3059 	ASSERT(list_is_empty(&db->db_dirty_records));
3060 
3061 	db->db_state = DB_EVICTING;
3062 	DTRACE_SET_STATE(db, "buffer eviction started");
3063 	db->db_blkptr = NULL;
3064 
3065 	/*
3066 	 * Now that db_state is DB_EVICTING, nobody else can find this via
3067 	 * the hash table.  We can now drop db_mtx, which allows us to
3068 	 * acquire the dn_dbufs_mtx.
3069 	 */
3070 	mutex_exit(&db->db_mtx);
3071 
3072 	DB_DNODE_ENTER(db);
3073 	dn = DB_DNODE(db);
3074 	dndb = dn->dn_dbuf;
3075 	if (db->db_blkid != DMU_BONUS_BLKID) {
3076 		boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
3077 		if (needlock)
3078 			mutex_enter_nested(&dn->dn_dbufs_mtx,
3079 			    NESTED_SINGLE);
3080 		avl_remove(&dn->dn_dbufs, db);
3081 		membar_producer();
3082 		DB_DNODE_EXIT(db);
3083 		if (needlock)
3084 			mutex_exit(&dn->dn_dbufs_mtx);
3085 		/*
3086 		 * Decrementing the dbuf count means that the hold corresponding
3087 		 * to the removed dbuf is no longer discounted in dnode_move(),
3088 		 * so the dnode cannot be moved until after we release the hold.
3089 		 * The membar_producer() ensures visibility of the decremented
3090 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
3091 		 * release any lock.
3092 		 */
3093 		mutex_enter(&dn->dn_mtx);
3094 		dnode_rele_and_unlock(dn, db, B_TRUE);
3095 		db->db_dnode_handle = NULL;
3096 
3097 		dbuf_hash_remove(db);
3098 	} else {
3099 		DB_DNODE_EXIT(db);
3100 	}
3101 
3102 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3103 
3104 	db->db_parent = NULL;
3105 
3106 	ASSERT(db->db_buf == NULL);
3107 	ASSERT(db->db.db_data == NULL);
3108 	ASSERT(db->db_hash_next == NULL);
3109 	ASSERT(db->db_blkptr == NULL);
3110 	ASSERT(db->db_data_pending == NULL);
3111 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3112 	ASSERT(!multilist_link_active(&db->db_cache_link));
3113 
3114 	/*
3115 	 * If this dbuf is referenced from an indirect dbuf,
3116 	 * decrement the ref count on the indirect dbuf.
3117 	 */
3118 	if (parent && parent != dndb) {
3119 		mutex_enter(&parent->db_mtx);
3120 		dbuf_rele_and_unlock(parent, db, B_TRUE);
3121 	}
3122 
3123 	kmem_cache_free(dbuf_kmem_cache, db);
3124 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3125 }
3126 
3127 /*
3128  * Note: While bpp will always be updated if the function returns success,
3129  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3130  * this happens when the dnode is the meta-dnode, or {user|group|project}used
3131  * object.
3132  */
3133 __attribute__((always_inline))
3134 static inline int
3135 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3136     dmu_buf_impl_t **parentp, blkptr_t **bpp)
3137 {
3138 	*parentp = NULL;
3139 	*bpp = NULL;
3140 
3141 	ASSERT(blkid != DMU_BONUS_BLKID);
3142 
3143 	if (blkid == DMU_SPILL_BLKID) {
3144 		mutex_enter(&dn->dn_mtx);
3145 		if (dn->dn_have_spill &&
3146 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3147 			*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3148 		else
3149 			*bpp = NULL;
3150 		dbuf_add_ref(dn->dn_dbuf, NULL);
3151 		*parentp = dn->dn_dbuf;
3152 		mutex_exit(&dn->dn_mtx);
3153 		return (0);
3154 	}
3155 
3156 	int nlevels =
3157 	    (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3158 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3159 
3160 	ASSERT3U(level * epbs, <, 64);
3161 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3162 	/*
3163 	 * This assertion shouldn't trip as long as the max indirect block size
3164 	 * is less than 1M.  The reason for this is that up to that point,
3165 	 * the number of levels required to address an entire object with blocks
3166 	 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.	 In
3167 	 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3168 	 * (i.e. we can address the entire object), objects will all use at most
3169 	 * N-1 levels and the assertion won't overflow.	 However, once epbs is
3170 	 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
3171 	 * enough to address an entire object, so objects will have 5 levels,
3172 	 * but then this assertion will overflow.
3173 	 *
3174 	 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3175 	 * need to redo this logic to handle overflows.
3176 	 */
3177 	ASSERT(level >= nlevels ||
3178 	    ((nlevels - level - 1) * epbs) +
3179 	    highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3180 	if (level >= nlevels ||
3181 	    blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3182 	    ((nlevels - level - 1) * epbs)) ||
3183 	    (fail_sparse &&
3184 	    blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3185 		/* the buffer has no parent yet */
3186 		return (SET_ERROR(ENOENT));
3187 	} else if (level < nlevels-1) {
3188 		/* this block is referenced from an indirect block */
3189 		int err;
3190 
3191 		err = dbuf_hold_impl(dn, level + 1,
3192 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3193 
3194 		if (err)
3195 			return (err);
3196 		err = dbuf_read(*parentp, NULL,
3197 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3198 		if (err) {
3199 			dbuf_rele(*parentp, NULL);
3200 			*parentp = NULL;
3201 			return (err);
3202 		}
3203 		rw_enter(&(*parentp)->db_rwlock, RW_READER);
3204 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3205 		    (blkid & ((1ULL << epbs) - 1));
3206 		if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3207 			ASSERT(BP_IS_HOLE(*bpp));
3208 		rw_exit(&(*parentp)->db_rwlock);
3209 		return (0);
3210 	} else {
3211 		/* the block is referenced from the dnode */
3212 		ASSERT3U(level, ==, nlevels-1);
3213 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3214 		    blkid < dn->dn_phys->dn_nblkptr);
3215 		if (dn->dn_dbuf) {
3216 			dbuf_add_ref(dn->dn_dbuf, NULL);
3217 			*parentp = dn->dn_dbuf;
3218 		}
3219 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
3220 		return (0);
3221 	}
3222 }
3223 
3224 static dmu_buf_impl_t *
3225 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3226     dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3227 {
3228 	objset_t *os = dn->dn_objset;
3229 	dmu_buf_impl_t *db, *odb;
3230 
3231 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3232 	ASSERT(dn->dn_type != DMU_OT_NONE);
3233 
3234 	db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3235 
3236 	list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3237 	    offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3238 
3239 	db->db_objset = os;
3240 	db->db.db_object = dn->dn_object;
3241 	db->db_level = level;
3242 	db->db_blkid = blkid;
3243 	db->db_dirtycnt = 0;
3244 	db->db_dnode_handle = dn->dn_handle;
3245 	db->db_parent = parent;
3246 	db->db_blkptr = blkptr;
3247 	db->db_hash = hash;
3248 
3249 	db->db_user = NULL;
3250 	db->db_user_immediate_evict = FALSE;
3251 	db->db_freed_in_flight = FALSE;
3252 	db->db_pending_evict = FALSE;
3253 
3254 	if (blkid == DMU_BONUS_BLKID) {
3255 		ASSERT3P(parent, ==, dn->dn_dbuf);
3256 		db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3257 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3258 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3259 		db->db.db_offset = DMU_BONUS_BLKID;
3260 		db->db_state = DB_UNCACHED;
3261 		DTRACE_SET_STATE(db, "bonus buffer created");
3262 		db->db_caching_status = DB_NO_CACHE;
3263 		/* the bonus dbuf is not placed in the hash table */
3264 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3265 		return (db);
3266 	} else if (blkid == DMU_SPILL_BLKID) {
3267 		db->db.db_size = (blkptr != NULL) ?
3268 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3269 		db->db.db_offset = 0;
3270 	} else {
3271 		int blocksize =
3272 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3273 		db->db.db_size = blocksize;
3274 		db->db.db_offset = db->db_blkid * blocksize;
3275 	}
3276 
3277 	/*
3278 	 * Hold the dn_dbufs_mtx while we get the new dbuf
3279 	 * in the hash table *and* added to the dbufs list.
3280 	 * This prevents a possible deadlock with someone
3281 	 * trying to look up this dbuf before it's added to the
3282 	 * dn_dbufs list.
3283 	 */
3284 	mutex_enter(&dn->dn_dbufs_mtx);
3285 	db->db_state = DB_EVICTING; /* not worth logging this state change */
3286 	if ((odb = dbuf_hash_insert(db)) != NULL) {
3287 		/* someone else inserted it first */
3288 		mutex_exit(&dn->dn_dbufs_mtx);
3289 		kmem_cache_free(dbuf_kmem_cache, db);
3290 		DBUF_STAT_BUMP(hash_insert_race);
3291 		return (odb);
3292 	}
3293 	avl_add(&dn->dn_dbufs, db);
3294 
3295 	db->db_state = DB_UNCACHED;
3296 	DTRACE_SET_STATE(db, "regular buffer created");
3297 	db->db_caching_status = DB_NO_CACHE;
3298 	mutex_exit(&dn->dn_dbufs_mtx);
3299 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3300 
3301 	if (parent && parent != dn->dn_dbuf)
3302 		dbuf_add_ref(parent, db);
3303 
3304 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3305 	    zfs_refcount_count(&dn->dn_holds) > 0);
3306 	(void) zfs_refcount_add(&dn->dn_holds, db);
3307 
3308 	dprintf_dbuf(db, "db=%p\n", db);
3309 
3310 	return (db);
3311 }
3312 
3313 /*
3314  * This function returns a block pointer and information about the object,
3315  * given a dnode and a block.  This is a publicly accessible version of
3316  * dbuf_findbp that only returns some information, rather than the
3317  * dbuf.  Note that the dnode passed in must be held, and the dn_struct_rwlock
3318  * should be locked as (at least) a reader.
3319  */
3320 int
3321 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3322     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3323 {
3324 	dmu_buf_impl_t *dbp = NULL;
3325 	blkptr_t *bp2;
3326 	int err = 0;
3327 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3328 
3329 	err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3330 	if (err == 0) {
3331 		ASSERT3P(bp2, !=, NULL);
3332 		*bp = *bp2;
3333 		if (dbp != NULL)
3334 			dbuf_rele(dbp, NULL);
3335 		if (datablkszsec != NULL)
3336 			*datablkszsec = dn->dn_phys->dn_datablkszsec;
3337 		if (indblkshift != NULL)
3338 			*indblkshift = dn->dn_phys->dn_indblkshift;
3339 	}
3340 
3341 	return (err);
3342 }
3343 
3344 typedef struct dbuf_prefetch_arg {
3345 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
3346 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3347 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3348 	int dpa_curlevel; /* The current level that we're reading */
3349 	dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3350 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3351 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3352 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3353 	dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3354 	void *dpa_arg; /* prefetch completion arg */
3355 } dbuf_prefetch_arg_t;
3356 
3357 static void
3358 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3359 {
3360 	if (dpa->dpa_cb != NULL) {
3361 		dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3362 		    dpa->dpa_zb.zb_blkid, io_done);
3363 	}
3364 	kmem_free(dpa, sizeof (*dpa));
3365 }
3366 
3367 static void
3368 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3369     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3370 {
3371 	(void) zio, (void) zb, (void) iobp;
3372 	dbuf_prefetch_arg_t *dpa = private;
3373 
3374 	if (abuf != NULL)
3375 		arc_buf_destroy(abuf, private);
3376 
3377 	dbuf_prefetch_fini(dpa, B_TRUE);
3378 }
3379 
3380 /*
3381  * Actually issue the prefetch read for the block given.
3382  */
3383 static void
3384 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3385 {
3386 	ASSERT(!BP_IS_REDACTED(bp) ||
3387 	    dsl_dataset_feature_is_active(
3388 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3389 	    SPA_FEATURE_REDACTED_DATASETS));
3390 
3391 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3392 		return (dbuf_prefetch_fini(dpa, B_FALSE));
3393 
3394 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3395 	arc_flags_t aflags =
3396 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3397 	    ARC_FLAG_NO_BUF;
3398 
3399 	/* dnodes are always read as raw and then converted later */
3400 	if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3401 	    dpa->dpa_curlevel == 0)
3402 		zio_flags |= ZIO_FLAG_RAW;
3403 
3404 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3405 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3406 	ASSERT(dpa->dpa_zio != NULL);
3407 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3408 	    dbuf_issue_final_prefetch_done, dpa,
3409 	    dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3410 }
3411 
3412 /*
3413  * Called when an indirect block above our prefetch target is read in.  This
3414  * will either read in the next indirect block down the tree or issue the actual
3415  * prefetch if the next block down is our target.
3416  */
3417 static void
3418 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3419     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3420 {
3421 	(void) zb, (void) iobp;
3422 	dbuf_prefetch_arg_t *dpa = private;
3423 
3424 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3425 	ASSERT3S(dpa->dpa_curlevel, >, 0);
3426 
3427 	if (abuf == NULL) {
3428 		ASSERT(zio == NULL || zio->io_error != 0);
3429 		dbuf_prefetch_fini(dpa, B_TRUE);
3430 		return;
3431 	}
3432 	ASSERT(zio == NULL || zio->io_error == 0);
3433 
3434 	/*
3435 	 * The dpa_dnode is only valid if we are called with a NULL
3436 	 * zio. This indicates that the arc_read() returned without
3437 	 * first calling zio_read() to issue a physical read. Once
3438 	 * a physical read is made the dpa_dnode must be invalidated
3439 	 * as the locks guarding it may have been dropped. If the
3440 	 * dpa_dnode is still valid, then we want to add it to the dbuf
3441 	 * cache. To do so, we must hold the dbuf associated with the block
3442 	 * we just prefetched, read its contents so that we associate it
3443 	 * with an arc_buf_t, and then release it.
3444 	 */
3445 	if (zio != NULL) {
3446 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3447 		if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3448 			ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3449 		} else {
3450 			ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3451 		}
3452 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3453 
3454 		dpa->dpa_dnode = NULL;
3455 	} else if (dpa->dpa_dnode != NULL) {
3456 		uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3457 		    (dpa->dpa_epbs * (dpa->dpa_curlevel -
3458 		    dpa->dpa_zb.zb_level));
3459 		dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3460 		    dpa->dpa_curlevel, curblkid, FTAG);
3461 		if (db == NULL) {
3462 			arc_buf_destroy(abuf, private);
3463 			dbuf_prefetch_fini(dpa, B_TRUE);
3464 			return;
3465 		}
3466 		(void) dbuf_read(db, NULL,
3467 		    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3468 		dbuf_rele(db, FTAG);
3469 	}
3470 
3471 	dpa->dpa_curlevel--;
3472 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3473 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3474 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3475 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3476 
3477 	ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3478 	    dsl_dataset_feature_is_active(
3479 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3480 	    SPA_FEATURE_REDACTED_DATASETS)));
3481 	if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3482 		arc_buf_destroy(abuf, private);
3483 		dbuf_prefetch_fini(dpa, B_TRUE);
3484 		return;
3485 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3486 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3487 		dbuf_issue_final_prefetch(dpa, bp);
3488 	} else {
3489 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3490 		zbookmark_phys_t zb;
3491 
3492 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3493 		if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3494 			iter_aflags |= ARC_FLAG_L2CACHE;
3495 
3496 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3497 
3498 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3499 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3500 
3501 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3502 		    bp, dbuf_prefetch_indirect_done, dpa,
3503 		    ZIO_PRIORITY_SYNC_READ,
3504 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3505 		    &iter_aflags, &zb);
3506 	}
3507 
3508 	arc_buf_destroy(abuf, private);
3509 }
3510 
3511 /*
3512  * Issue prefetch reads for the given block on the given level.  If the indirect
3513  * blocks above that block are not in memory, we will read them in
3514  * asynchronously.  As a result, this call never blocks waiting for a read to
3515  * complete. Note that the prefetch might fail if the dataset is encrypted and
3516  * the encryption key is unmapped before the IO completes.
3517  */
3518 int
3519 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3520     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3521     void *arg)
3522 {
3523 	blkptr_t bp;
3524 	int epbs, nlevels, curlevel;
3525 	uint64_t curblkid;
3526 
3527 	ASSERT(blkid != DMU_BONUS_BLKID);
3528 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3529 
3530 	if (blkid > dn->dn_maxblkid)
3531 		goto no_issue;
3532 
3533 	if (level == 0 && dnode_block_freed(dn, blkid))
3534 		goto no_issue;
3535 
3536 	/*
3537 	 * This dnode hasn't been written to disk yet, so there's nothing to
3538 	 * prefetch.
3539 	 */
3540 	nlevels = dn->dn_phys->dn_nlevels;
3541 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3542 		goto no_issue;
3543 
3544 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3545 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3546 		goto no_issue;
3547 
3548 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3549 	    level, blkid, NULL);
3550 	if (db != NULL) {
3551 		mutex_exit(&db->db_mtx);
3552 		/*
3553 		 * This dbuf already exists.  It is either CACHED, or
3554 		 * (we assume) about to be read or filled.
3555 		 */
3556 		goto no_issue;
3557 	}
3558 
3559 	/*
3560 	 * Find the closest ancestor (indirect block) of the target block
3561 	 * that is present in the cache.  In this indirect block, we will
3562 	 * find the bp that is at curlevel, curblkid.
3563 	 */
3564 	curlevel = level;
3565 	curblkid = blkid;
3566 	while (curlevel < nlevels - 1) {
3567 		int parent_level = curlevel + 1;
3568 		uint64_t parent_blkid = curblkid >> epbs;
3569 		dmu_buf_impl_t *db;
3570 
3571 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3572 		    FALSE, TRUE, FTAG, &db) == 0) {
3573 			blkptr_t *bpp = db->db_buf->b_data;
3574 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3575 			dbuf_rele(db, FTAG);
3576 			break;
3577 		}
3578 
3579 		curlevel = parent_level;
3580 		curblkid = parent_blkid;
3581 	}
3582 
3583 	if (curlevel == nlevels - 1) {
3584 		/* No cached indirect blocks found. */
3585 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3586 		bp = dn->dn_phys->dn_blkptr[curblkid];
3587 	}
3588 	ASSERT(!BP_IS_REDACTED(&bp) ||
3589 	    dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3590 	    SPA_FEATURE_REDACTED_DATASETS));
3591 	if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3592 		goto no_issue;
3593 
3594 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3595 
3596 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3597 	    ZIO_FLAG_CANFAIL);
3598 
3599 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3600 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3601 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3602 	    dn->dn_object, level, blkid);
3603 	dpa->dpa_curlevel = curlevel;
3604 	dpa->dpa_prio = prio;
3605 	dpa->dpa_aflags = aflags;
3606 	dpa->dpa_spa = dn->dn_objset->os_spa;
3607 	dpa->dpa_dnode = dn;
3608 	dpa->dpa_epbs = epbs;
3609 	dpa->dpa_zio = pio;
3610 	dpa->dpa_cb = cb;
3611 	dpa->dpa_arg = arg;
3612 
3613 	if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3614 		dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3615 	else if (dnode_level_is_l2cacheable(&bp, dn, level))
3616 		dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3617 
3618 	/*
3619 	 * If we have the indirect just above us, no need to do the asynchronous
3620 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
3621 	 * a higher level, though, we want to issue the prefetches for all the
3622 	 * indirect blocks asynchronously, so we can go on with whatever we were
3623 	 * doing.
3624 	 */
3625 	if (curlevel == level) {
3626 		ASSERT3U(curblkid, ==, blkid);
3627 		dbuf_issue_final_prefetch(dpa, &bp);
3628 	} else {
3629 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3630 		zbookmark_phys_t zb;
3631 
3632 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3633 		if (dnode_level_is_l2cacheable(&bp, dn, level))
3634 			iter_aflags |= ARC_FLAG_L2CACHE;
3635 
3636 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3637 		    dn->dn_object, curlevel, curblkid);
3638 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3639 		    &bp, dbuf_prefetch_indirect_done, dpa,
3640 		    ZIO_PRIORITY_SYNC_READ,
3641 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3642 		    &iter_aflags, &zb);
3643 	}
3644 	/*
3645 	 * We use pio here instead of dpa_zio since it's possible that
3646 	 * dpa may have already been freed.
3647 	 */
3648 	zio_nowait(pio);
3649 	return (1);
3650 no_issue:
3651 	if (cb != NULL)
3652 		cb(arg, level, blkid, B_FALSE);
3653 	return (0);
3654 }
3655 
3656 int
3657 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3658     arc_flags_t aflags)
3659 {
3660 
3661 	return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3662 }
3663 
3664 /*
3665  * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3666  * the case of encrypted, compressed and uncompressed buffers by
3667  * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3668  * arc_alloc_compressed_buf() or arc_alloc_buf().*
3669  *
3670  * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3671  */
3672 noinline static void
3673 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3674 {
3675 	dbuf_dirty_record_t *dr = db->db_data_pending;
3676 	arc_buf_t *data = dr->dt.dl.dr_data;
3677 	enum zio_compress compress_type = arc_get_compression(data);
3678 	uint8_t complevel = arc_get_complevel(data);
3679 
3680 	if (arc_is_encrypted(data)) {
3681 		boolean_t byteorder;
3682 		uint8_t salt[ZIO_DATA_SALT_LEN];
3683 		uint8_t iv[ZIO_DATA_IV_LEN];
3684 		uint8_t mac[ZIO_DATA_MAC_LEN];
3685 
3686 		arc_get_raw_params(data, &byteorder, salt, iv, mac);
3687 		dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3688 		    dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3689 		    dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3690 		    compress_type, complevel));
3691 	} else if (compress_type != ZIO_COMPRESS_OFF) {
3692 		dbuf_set_data(db, arc_alloc_compressed_buf(
3693 		    dn->dn_objset->os_spa, db, arc_buf_size(data),
3694 		    arc_buf_lsize(data), compress_type, complevel));
3695 	} else {
3696 		dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3697 		    DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3698 	}
3699 
3700 	rw_enter(&db->db_rwlock, RW_WRITER);
3701 	memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3702 	rw_exit(&db->db_rwlock);
3703 }
3704 
3705 /*
3706  * Returns with db_holds incremented, and db_mtx not held.
3707  * Note: dn_struct_rwlock must be held.
3708  */
3709 int
3710 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3711     boolean_t fail_sparse, boolean_t fail_uncached,
3712     const void *tag, dmu_buf_impl_t **dbp)
3713 {
3714 	dmu_buf_impl_t *db, *parent = NULL;
3715 	uint64_t hv;
3716 
3717 	/* If the pool has been created, verify the tx_sync_lock is not held */
3718 	spa_t *spa = dn->dn_objset->os_spa;
3719 	dsl_pool_t *dp = spa->spa_dsl_pool;
3720 	if (dp != NULL) {
3721 		ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3722 	}
3723 
3724 	ASSERT(blkid != DMU_BONUS_BLKID);
3725 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3726 	ASSERT3U(dn->dn_nlevels, >, level);
3727 
3728 	*dbp = NULL;
3729 
3730 	/* dbuf_find() returns with db_mtx held */
3731 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3732 
3733 	if (db == NULL) {
3734 		blkptr_t *bp = NULL;
3735 		int err;
3736 
3737 		if (fail_uncached)
3738 			return (SET_ERROR(ENOENT));
3739 
3740 		ASSERT3P(parent, ==, NULL);
3741 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3742 		if (fail_sparse) {
3743 			if (err == 0 && bp && BP_IS_HOLE(bp))
3744 				err = SET_ERROR(ENOENT);
3745 			if (err) {
3746 				if (parent)
3747 					dbuf_rele(parent, NULL);
3748 				return (err);
3749 			}
3750 		}
3751 		if (err && err != ENOENT)
3752 			return (err);
3753 		db = dbuf_create(dn, level, blkid, parent, bp, hv);
3754 	}
3755 
3756 	if (fail_uncached && db->db_state != DB_CACHED) {
3757 		mutex_exit(&db->db_mtx);
3758 		return (SET_ERROR(ENOENT));
3759 	}
3760 
3761 	if (db->db_buf != NULL) {
3762 		arc_buf_access(db->db_buf);
3763 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3764 	}
3765 
3766 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3767 
3768 	/*
3769 	 * If this buffer is currently syncing out, and we are
3770 	 * still referencing it from db_data, we need to make a copy
3771 	 * of it in case we decide we want to dirty it again in this txg.
3772 	 */
3773 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3774 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
3775 	    db->db_state == DB_CACHED && db->db_data_pending) {
3776 		dbuf_dirty_record_t *dr = db->db_data_pending;
3777 		if (dr->dt.dl.dr_data == db->db_buf) {
3778 			ASSERT3P(db->db_buf, !=, NULL);
3779 			dbuf_hold_copy(dn, db);
3780 		}
3781 	}
3782 
3783 	if (multilist_link_active(&db->db_cache_link)) {
3784 		ASSERT(zfs_refcount_is_zero(&db->db_holds));
3785 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3786 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3787 
3788 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3789 
3790 		uint64_t size = db->db.db_size + dmu_buf_user_size(&db->db);
3791 		(void) zfs_refcount_remove_many(
3792 		    &dbuf_caches[db->db_caching_status].size, size, db);
3793 
3794 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3795 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3796 		} else {
3797 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3798 			DBUF_STAT_BUMPDOWN(cache_count);
3799 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
3800 		}
3801 		db->db_caching_status = DB_NO_CACHE;
3802 	}
3803 	(void) zfs_refcount_add(&db->db_holds, tag);
3804 	DBUF_VERIFY(db);
3805 	mutex_exit(&db->db_mtx);
3806 
3807 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
3808 	if (parent)
3809 		dbuf_rele(parent, NULL);
3810 
3811 	ASSERT3P(DB_DNODE(db), ==, dn);
3812 	ASSERT3U(db->db_blkid, ==, blkid);
3813 	ASSERT3U(db->db_level, ==, level);
3814 	*dbp = db;
3815 
3816 	return (0);
3817 }
3818 
3819 dmu_buf_impl_t *
3820 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3821 {
3822 	return (dbuf_hold_level(dn, 0, blkid, tag));
3823 }
3824 
3825 dmu_buf_impl_t *
3826 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3827 {
3828 	dmu_buf_impl_t *db;
3829 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3830 	return (err ? NULL : db);
3831 }
3832 
3833 void
3834 dbuf_create_bonus(dnode_t *dn)
3835 {
3836 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3837 
3838 	ASSERT(dn->dn_bonus == NULL);
3839 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
3840 	    dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
3841 }
3842 
3843 int
3844 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3845 {
3846 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3847 
3848 	if (db->db_blkid != DMU_SPILL_BLKID)
3849 		return (SET_ERROR(ENOTSUP));
3850 	if (blksz == 0)
3851 		blksz = SPA_MINBLOCKSIZE;
3852 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3853 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3854 
3855 	dbuf_new_size(db, blksz, tx);
3856 
3857 	return (0);
3858 }
3859 
3860 void
3861 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3862 {
3863 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3864 }
3865 
3866 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3867 void
3868 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3869 {
3870 	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3871 	VERIFY3S(holds, >, 1);
3872 }
3873 
3874 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3875 boolean_t
3876 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3877     const void *tag)
3878 {
3879 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3880 	dmu_buf_impl_t *found_db;
3881 	boolean_t result = B_FALSE;
3882 
3883 	if (blkid == DMU_BONUS_BLKID)
3884 		found_db = dbuf_find_bonus(os, obj);
3885 	else
3886 		found_db = dbuf_find(os, obj, 0, blkid, NULL);
3887 
3888 	if (found_db != NULL) {
3889 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3890 			(void) zfs_refcount_add(&db->db_holds, tag);
3891 			result = B_TRUE;
3892 		}
3893 		mutex_exit(&found_db->db_mtx);
3894 	}
3895 	return (result);
3896 }
3897 
3898 /*
3899  * If you call dbuf_rele() you had better not be referencing the dnode handle
3900  * unless you have some other direct or indirect hold on the dnode. (An indirect
3901  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3902  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3903  * dnode's parent dbuf evicting its dnode handles.
3904  */
3905 void
3906 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3907 {
3908 	mutex_enter(&db->db_mtx);
3909 	dbuf_rele_and_unlock(db, tag, B_FALSE);
3910 }
3911 
3912 void
3913 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3914 {
3915 	dbuf_rele((dmu_buf_impl_t *)db, tag);
3916 }
3917 
3918 /*
3919  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
3920  * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
3921  * argument should be set if we are already in the dbuf-evicting code
3922  * path, in which case we don't want to recursively evict.  This allows us to
3923  * avoid deeply nested stacks that would have a call flow similar to this:
3924  *
3925  * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3926  *	^						|
3927  *	|						|
3928  *	+-----dbuf_destroy()<--dbuf_evict_one()<--------+
3929  *
3930  */
3931 void
3932 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3933 {
3934 	int64_t holds;
3935 	uint64_t size;
3936 
3937 	ASSERT(MUTEX_HELD(&db->db_mtx));
3938 	DBUF_VERIFY(db);
3939 
3940 	/*
3941 	 * Remove the reference to the dbuf before removing its hold on the
3942 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
3943 	 * buffer has a corresponding dnode hold.
3944 	 */
3945 	holds = zfs_refcount_remove(&db->db_holds, tag);
3946 	ASSERT(holds >= 0);
3947 
3948 	/*
3949 	 * We can't freeze indirects if there is a possibility that they
3950 	 * may be modified in the current syncing context.
3951 	 */
3952 	if (db->db_buf != NULL &&
3953 	    holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3954 		arc_buf_freeze(db->db_buf);
3955 	}
3956 
3957 	if (holds == db->db_dirtycnt &&
3958 	    db->db_level == 0 && db->db_user_immediate_evict)
3959 		dbuf_evict_user(db);
3960 
3961 	if (holds == 0) {
3962 		if (db->db_blkid == DMU_BONUS_BLKID) {
3963 			dnode_t *dn;
3964 			boolean_t evict_dbuf = db->db_pending_evict;
3965 
3966 			/*
3967 			 * If the dnode moves here, we cannot cross this
3968 			 * barrier until the move completes.
3969 			 */
3970 			DB_DNODE_ENTER(db);
3971 
3972 			dn = DB_DNODE(db);
3973 			atomic_dec_32(&dn->dn_dbufs_count);
3974 
3975 			/*
3976 			 * Decrementing the dbuf count means that the bonus
3977 			 * buffer's dnode hold is no longer discounted in
3978 			 * dnode_move(). The dnode cannot move until after
3979 			 * the dnode_rele() below.
3980 			 */
3981 			DB_DNODE_EXIT(db);
3982 
3983 			/*
3984 			 * Do not reference db after its lock is dropped.
3985 			 * Another thread may evict it.
3986 			 */
3987 			mutex_exit(&db->db_mtx);
3988 
3989 			if (evict_dbuf)
3990 				dnode_evict_bonus(dn);
3991 
3992 			dnode_rele(dn, db);
3993 		} else if (db->db_buf == NULL) {
3994 			/*
3995 			 * This is a special case: we never associated this
3996 			 * dbuf with any data allocated from the ARC.
3997 			 */
3998 			ASSERT(db->db_state == DB_UNCACHED ||
3999 			    db->db_state == DB_NOFILL);
4000 			dbuf_destroy(db);
4001 		} else if (arc_released(db->db_buf)) {
4002 			/*
4003 			 * This dbuf has anonymous data associated with it.
4004 			 */
4005 			dbuf_destroy(db);
4006 		} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
4007 		    db->db_pending_evict) {
4008 			dbuf_destroy(db);
4009 		} else if (!multilist_link_active(&db->db_cache_link)) {
4010 			ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4011 
4012 			dbuf_cached_state_t dcs =
4013 			    dbuf_include_in_metadata_cache(db) ?
4014 			    DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
4015 			db->db_caching_status = dcs;
4016 
4017 			multilist_insert(&dbuf_caches[dcs].cache, db);
4018 			uint64_t db_size = db->db.db_size +
4019 			    dmu_buf_user_size(&db->db);
4020 			size = zfs_refcount_add_many(
4021 			    &dbuf_caches[dcs].size, db_size, db);
4022 			uint8_t db_level = db->db_level;
4023 			mutex_exit(&db->db_mtx);
4024 
4025 			if (dcs == DB_DBUF_METADATA_CACHE) {
4026 				DBUF_STAT_BUMP(metadata_cache_count);
4027 				DBUF_STAT_MAX(metadata_cache_size_bytes_max,
4028 				    size);
4029 			} else {
4030 				DBUF_STAT_BUMP(cache_count);
4031 				DBUF_STAT_MAX(cache_size_bytes_max, size);
4032 				DBUF_STAT_BUMP(cache_levels[db_level]);
4033 				DBUF_STAT_INCR(cache_levels_bytes[db_level],
4034 				    db_size);
4035 			}
4036 
4037 			if (dcs == DB_DBUF_CACHE && !evicting)
4038 				dbuf_evict_notify(size);
4039 		}
4040 	} else {
4041 		mutex_exit(&db->db_mtx);
4042 	}
4043 
4044 }
4045 
4046 #pragma weak dmu_buf_refcount = dbuf_refcount
4047 uint64_t
4048 dbuf_refcount(dmu_buf_impl_t *db)
4049 {
4050 	return (zfs_refcount_count(&db->db_holds));
4051 }
4052 
4053 uint64_t
4054 dmu_buf_user_refcount(dmu_buf_t *db_fake)
4055 {
4056 	uint64_t holds;
4057 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4058 
4059 	mutex_enter(&db->db_mtx);
4060 	ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
4061 	holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
4062 	mutex_exit(&db->db_mtx);
4063 
4064 	return (holds);
4065 }
4066 
4067 void *
4068 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
4069     dmu_buf_user_t *new_user)
4070 {
4071 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4072 
4073 	mutex_enter(&db->db_mtx);
4074 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4075 	if (db->db_user == old_user)
4076 		db->db_user = new_user;
4077 	else
4078 		old_user = db->db_user;
4079 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4080 	mutex_exit(&db->db_mtx);
4081 
4082 	return (old_user);
4083 }
4084 
4085 void *
4086 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4087 {
4088 	return (dmu_buf_replace_user(db_fake, NULL, user));
4089 }
4090 
4091 void *
4092 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4093 {
4094 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4095 
4096 	db->db_user_immediate_evict = TRUE;
4097 	return (dmu_buf_set_user(db_fake, user));
4098 }
4099 
4100 void *
4101 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4102 {
4103 	return (dmu_buf_replace_user(db_fake, user, NULL));
4104 }
4105 
4106 void *
4107 dmu_buf_get_user(dmu_buf_t *db_fake)
4108 {
4109 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4110 
4111 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4112 	return (db->db_user);
4113 }
4114 
4115 uint64_t
4116 dmu_buf_user_size(dmu_buf_t *db_fake)
4117 {
4118 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4119 	if (db->db_user == NULL)
4120 		return (0);
4121 	return (atomic_load_64(&db->db_user->dbu_size));
4122 }
4123 
4124 void
4125 dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd)
4126 {
4127 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4128 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4129 	ASSERT3P(db->db_user, !=, NULL);
4130 	ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd);
4131 	atomic_add_64(&db->db_user->dbu_size, nadd);
4132 }
4133 
4134 void
4135 dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub)
4136 {
4137 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4138 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4139 	ASSERT3P(db->db_user, !=, NULL);
4140 	ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub);
4141 	atomic_sub_64(&db->db_user->dbu_size, nsub);
4142 }
4143 
4144 void
4145 dmu_buf_user_evict_wait(void)
4146 {
4147 	taskq_wait(dbu_evict_taskq);
4148 }
4149 
4150 blkptr_t *
4151 dmu_buf_get_blkptr(dmu_buf_t *db)
4152 {
4153 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4154 	return (dbi->db_blkptr);
4155 }
4156 
4157 objset_t *
4158 dmu_buf_get_objset(dmu_buf_t *db)
4159 {
4160 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4161 	return (dbi->db_objset);
4162 }
4163 
4164 dnode_t *
4165 dmu_buf_dnode_enter(dmu_buf_t *db)
4166 {
4167 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4168 	DB_DNODE_ENTER(dbi);
4169 	return (DB_DNODE(dbi));
4170 }
4171 
4172 void
4173 dmu_buf_dnode_exit(dmu_buf_t *db)
4174 {
4175 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4176 	DB_DNODE_EXIT(dbi);
4177 }
4178 
4179 static void
4180 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4181 {
4182 	/* ASSERT(dmu_tx_is_syncing(tx) */
4183 	ASSERT(MUTEX_HELD(&db->db_mtx));
4184 
4185 	if (db->db_blkptr != NULL)
4186 		return;
4187 
4188 	if (db->db_blkid == DMU_SPILL_BLKID) {
4189 		db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4190 		BP_ZERO(db->db_blkptr);
4191 		return;
4192 	}
4193 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4194 		/*
4195 		 * This buffer was allocated at a time when there was
4196 		 * no available blkptrs from the dnode, or it was
4197 		 * inappropriate to hook it in (i.e., nlevels mismatch).
4198 		 */
4199 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4200 		ASSERT(db->db_parent == NULL);
4201 		db->db_parent = dn->dn_dbuf;
4202 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4203 		DBUF_VERIFY(db);
4204 	} else {
4205 		dmu_buf_impl_t *parent = db->db_parent;
4206 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4207 
4208 		ASSERT(dn->dn_phys->dn_nlevels > 1);
4209 		if (parent == NULL) {
4210 			mutex_exit(&db->db_mtx);
4211 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
4212 			parent = dbuf_hold_level(dn, db->db_level + 1,
4213 			    db->db_blkid >> epbs, db);
4214 			rw_exit(&dn->dn_struct_rwlock);
4215 			mutex_enter(&db->db_mtx);
4216 			db->db_parent = parent;
4217 		}
4218 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
4219 		    (db->db_blkid & ((1ULL << epbs) - 1));
4220 		DBUF_VERIFY(db);
4221 	}
4222 }
4223 
4224 static void
4225 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4226 {
4227 	dmu_buf_impl_t *db = dr->dr_dbuf;
4228 	void *data = dr->dt.dl.dr_data;
4229 
4230 	ASSERT0(db->db_level);
4231 	ASSERT(MUTEX_HELD(&db->db_mtx));
4232 	ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4233 	ASSERT(data != NULL);
4234 
4235 	dnode_t *dn = dr->dr_dnode;
4236 	ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4237 	    DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4238 	memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4239 
4240 	dbuf_sync_leaf_verify_bonus_dnode(dr);
4241 
4242 	dbuf_undirty_bonus(dr);
4243 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4244 }
4245 
4246 /*
4247  * When syncing out a blocks of dnodes, adjust the block to deal with
4248  * encryption.  Normally, we make sure the block is decrypted before writing
4249  * it.  If we have crypt params, then we are writing a raw (encrypted) block,
4250  * from a raw receive.  In this case, set the ARC buf's crypt params so
4251  * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4252  */
4253 static void
4254 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4255 {
4256 	int err;
4257 	dmu_buf_impl_t *db = dr->dr_dbuf;
4258 
4259 	ASSERT(MUTEX_HELD(&db->db_mtx));
4260 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4261 	ASSERT3U(db->db_level, ==, 0);
4262 
4263 	if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4264 		zbookmark_phys_t zb;
4265 
4266 		/*
4267 		 * Unfortunately, there is currently no mechanism for
4268 		 * syncing context to handle decryption errors. An error
4269 		 * here is only possible if an attacker maliciously
4270 		 * changed a dnode block and updated the associated
4271 		 * checksums going up the block tree.
4272 		 */
4273 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4274 		    db->db.db_object, db->db_level, db->db_blkid);
4275 		err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4276 		    &zb, B_TRUE);
4277 		if (err)
4278 			panic("Invalid dnode block MAC");
4279 	} else if (dr->dt.dl.dr_has_raw_params) {
4280 		(void) arc_release(dr->dt.dl.dr_data, db);
4281 		arc_convert_to_raw(dr->dt.dl.dr_data,
4282 		    dmu_objset_id(db->db_objset),
4283 		    dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4284 		    dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4285 	}
4286 }
4287 
4288 /*
4289  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4290  * is critical the we not allow the compiler to inline this function in to
4291  * dbuf_sync_list() thereby drastically bloating the stack usage.
4292  */
4293 noinline static void
4294 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4295 {
4296 	dmu_buf_impl_t *db = dr->dr_dbuf;
4297 	dnode_t *dn = dr->dr_dnode;
4298 
4299 	ASSERT(dmu_tx_is_syncing(tx));
4300 
4301 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4302 
4303 	mutex_enter(&db->db_mtx);
4304 
4305 	ASSERT(db->db_level > 0);
4306 	DBUF_VERIFY(db);
4307 
4308 	/* Read the block if it hasn't been read yet. */
4309 	if (db->db_buf == NULL) {
4310 		mutex_exit(&db->db_mtx);
4311 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4312 		mutex_enter(&db->db_mtx);
4313 	}
4314 	ASSERT3U(db->db_state, ==, DB_CACHED);
4315 	ASSERT(db->db_buf != NULL);
4316 
4317 	/* Indirect block size must match what the dnode thinks it is. */
4318 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4319 	dbuf_check_blkptr(dn, db);
4320 
4321 	/* Provide the pending dirty record to child dbufs */
4322 	db->db_data_pending = dr;
4323 
4324 	mutex_exit(&db->db_mtx);
4325 
4326 	dbuf_write(dr, db->db_buf, tx);
4327 
4328 	zio_t *zio = dr->dr_zio;
4329 	mutex_enter(&dr->dt.di.dr_mtx);
4330 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4331 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4332 	mutex_exit(&dr->dt.di.dr_mtx);
4333 	zio_nowait(zio);
4334 }
4335 
4336 /*
4337  * Verify that the size of the data in our bonus buffer does not exceed
4338  * its recorded size.
4339  *
4340  * The purpose of this verification is to catch any cases in development
4341  * where the size of a phys structure (i.e space_map_phys_t) grows and,
4342  * due to incorrect feature management, older pools expect to read more
4343  * data even though they didn't actually write it to begin with.
4344  *
4345  * For a example, this would catch an error in the feature logic where we
4346  * open an older pool and we expect to write the space map histogram of
4347  * a space map with size SPACE_MAP_SIZE_V0.
4348  */
4349 static void
4350 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4351 {
4352 #ifdef ZFS_DEBUG
4353 	dnode_t *dn = dr->dr_dnode;
4354 
4355 	/*
4356 	 * Encrypted bonus buffers can have data past their bonuslen.
4357 	 * Skip the verification of these blocks.
4358 	 */
4359 	if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4360 		return;
4361 
4362 	uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4363 	uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4364 	ASSERT3U(bonuslen, <=, maxbonuslen);
4365 
4366 	arc_buf_t *datap = dr->dt.dl.dr_data;
4367 	char *datap_end = ((char *)datap) + bonuslen;
4368 	char *datap_max = ((char *)datap) + maxbonuslen;
4369 
4370 	/* ensure that everything is zero after our data */
4371 	for (; datap_end < datap_max; datap_end++)
4372 		ASSERT(*datap_end == 0);
4373 #endif
4374 }
4375 
4376 static blkptr_t *
4377 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4378 {
4379 	/* This must be a lightweight dirty record. */
4380 	ASSERT3P(dr->dr_dbuf, ==, NULL);
4381 	dnode_t *dn = dr->dr_dnode;
4382 
4383 	if (dn->dn_phys->dn_nlevels == 1) {
4384 		VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4385 		return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4386 	} else {
4387 		dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4388 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4389 		VERIFY3U(parent_db->db_level, ==, 1);
4390 		VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4391 		VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4392 		blkptr_t *bp = parent_db->db.db_data;
4393 		return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4394 	}
4395 }
4396 
4397 static void
4398 dbuf_lightweight_ready(zio_t *zio)
4399 {
4400 	dbuf_dirty_record_t *dr = zio->io_private;
4401 	blkptr_t *bp = zio->io_bp;
4402 
4403 	if (zio->io_error != 0)
4404 		return;
4405 
4406 	dnode_t *dn = dr->dr_dnode;
4407 
4408 	blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4409 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4410 	int64_t delta = bp_get_dsize_sync(spa, bp) -
4411 	    bp_get_dsize_sync(spa, bp_orig);
4412 	dnode_diduse_space(dn, delta);
4413 
4414 	uint64_t blkid = dr->dt.dll.dr_blkid;
4415 	mutex_enter(&dn->dn_mtx);
4416 	if (blkid > dn->dn_phys->dn_maxblkid) {
4417 		ASSERT0(dn->dn_objset->os_raw_receive);
4418 		dn->dn_phys->dn_maxblkid = blkid;
4419 	}
4420 	mutex_exit(&dn->dn_mtx);
4421 
4422 	if (!BP_IS_EMBEDDED(bp)) {
4423 		uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4424 		BP_SET_FILL(bp, fill);
4425 	}
4426 
4427 	dmu_buf_impl_t *parent_db;
4428 	EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4429 	if (dr->dr_parent == NULL) {
4430 		parent_db = dn->dn_dbuf;
4431 	} else {
4432 		parent_db = dr->dr_parent->dr_dbuf;
4433 	}
4434 	rw_enter(&parent_db->db_rwlock, RW_WRITER);
4435 	*bp_orig = *bp;
4436 	rw_exit(&parent_db->db_rwlock);
4437 }
4438 
4439 static void
4440 dbuf_lightweight_done(zio_t *zio)
4441 {
4442 	dbuf_dirty_record_t *dr = zio->io_private;
4443 
4444 	VERIFY0(zio->io_error);
4445 
4446 	objset_t *os = dr->dr_dnode->dn_objset;
4447 	dmu_tx_t *tx = os->os_synctx;
4448 
4449 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4450 		ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4451 	} else {
4452 		dsl_dataset_t *ds = os->os_dsl_dataset;
4453 		(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4454 		dsl_dataset_block_born(ds, zio->io_bp, tx);
4455 	}
4456 
4457 	dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4458 	    zio->io_txg);
4459 
4460 	abd_free(dr->dt.dll.dr_abd);
4461 	kmem_free(dr, sizeof (*dr));
4462 }
4463 
4464 noinline static void
4465 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4466 {
4467 	dnode_t *dn = dr->dr_dnode;
4468 	zio_t *pio;
4469 	if (dn->dn_phys->dn_nlevels == 1) {
4470 		pio = dn->dn_zio;
4471 	} else {
4472 		pio = dr->dr_parent->dr_zio;
4473 	}
4474 
4475 	zbookmark_phys_t zb = {
4476 		.zb_objset = dmu_objset_id(dn->dn_objset),
4477 		.zb_object = dn->dn_object,
4478 		.zb_level = 0,
4479 		.zb_blkid = dr->dt.dll.dr_blkid,
4480 	};
4481 
4482 	/*
4483 	 * See comment in dbuf_write().  This is so that zio->io_bp_orig
4484 	 * will have the old BP in dbuf_lightweight_done().
4485 	 */
4486 	dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4487 
4488 	dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4489 	    dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4490 	    dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4491 	    &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4492 	    dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
4493 	    ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4494 
4495 	zio_nowait(dr->dr_zio);
4496 }
4497 
4498 /*
4499  * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4500  * critical the we not allow the compiler to inline this function in to
4501  * dbuf_sync_list() thereby drastically bloating the stack usage.
4502  */
4503 noinline static void
4504 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4505 {
4506 	arc_buf_t **datap = &dr->dt.dl.dr_data;
4507 	dmu_buf_impl_t *db = dr->dr_dbuf;
4508 	dnode_t *dn = dr->dr_dnode;
4509 	objset_t *os;
4510 	uint64_t txg = tx->tx_txg;
4511 
4512 	ASSERT(dmu_tx_is_syncing(tx));
4513 
4514 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4515 
4516 	mutex_enter(&db->db_mtx);
4517 	/*
4518 	 * To be synced, we must be dirtied.  But we
4519 	 * might have been freed after the dirty.
4520 	 */
4521 	if (db->db_state == DB_UNCACHED) {
4522 		/* This buffer has been freed since it was dirtied */
4523 		ASSERT(db->db.db_data == NULL);
4524 	} else if (db->db_state == DB_FILL) {
4525 		/* This buffer was freed and is now being re-filled */
4526 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4527 	} else if (db->db_state == DB_READ) {
4528 		/*
4529 		 * This buffer has a clone we need to write, and an in-flight
4530 		 * read on the BP we're about to clone. Its safe to issue the
4531 		 * write here because the read has already been issued and the
4532 		 * contents won't change.
4533 		 */
4534 		ASSERT(dr->dt.dl.dr_brtwrite &&
4535 		    dr->dt.dl.dr_override_state == DR_OVERRIDDEN);
4536 	} else {
4537 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4538 	}
4539 	DBUF_VERIFY(db);
4540 
4541 	if (db->db_blkid == DMU_SPILL_BLKID) {
4542 		mutex_enter(&dn->dn_mtx);
4543 		if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4544 			/*
4545 			 * In the previous transaction group, the bonus buffer
4546 			 * was entirely used to store the attributes for the
4547 			 * dnode which overrode the dn_spill field.  However,
4548 			 * when adding more attributes to the file a spill
4549 			 * block was required to hold the extra attributes.
4550 			 *
4551 			 * Make sure to clear the garbage left in the dn_spill
4552 			 * field from the previous attributes in the bonus
4553 			 * buffer.  Otherwise, after writing out the spill
4554 			 * block to the new allocated dva, it will free
4555 			 * the old block pointed to by the invalid dn_spill.
4556 			 */
4557 			db->db_blkptr = NULL;
4558 		}
4559 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4560 		mutex_exit(&dn->dn_mtx);
4561 	}
4562 
4563 	/*
4564 	 * If this is a bonus buffer, simply copy the bonus data into the
4565 	 * dnode.  It will be written out when the dnode is synced (and it
4566 	 * will be synced, since it must have been dirty for dbuf_sync to
4567 	 * be called).
4568 	 */
4569 	if (db->db_blkid == DMU_BONUS_BLKID) {
4570 		ASSERT(dr->dr_dbuf == db);
4571 		dbuf_sync_bonus(dr, tx);
4572 		return;
4573 	}
4574 
4575 	os = dn->dn_objset;
4576 
4577 	/*
4578 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
4579 	 * operation to sneak in. As a result, we need to ensure that we
4580 	 * don't check the dr_override_state until we have returned from
4581 	 * dbuf_check_blkptr.
4582 	 */
4583 	dbuf_check_blkptr(dn, db);
4584 
4585 	/*
4586 	 * If this buffer is in the middle of an immediate write,
4587 	 * wait for the synchronous IO to complete.
4588 	 */
4589 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4590 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4591 		cv_wait(&db->db_changed, &db->db_mtx);
4592 	}
4593 
4594 	/*
4595 	 * If this is a dnode block, ensure it is appropriately encrypted
4596 	 * or decrypted, depending on what we are writing to it this txg.
4597 	 */
4598 	if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4599 		dbuf_prepare_encrypted_dnode_leaf(dr);
4600 
4601 	if (db->db_state != DB_NOFILL &&
4602 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
4603 	    zfs_refcount_count(&db->db_holds) > 1 &&
4604 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4605 	    *datap == db->db_buf) {
4606 		/*
4607 		 * If this buffer is currently "in use" (i.e., there
4608 		 * are active holds and db_data still references it),
4609 		 * then make a copy before we start the write so that
4610 		 * any modifications from the open txg will not leak
4611 		 * into this write.
4612 		 *
4613 		 * NOTE: this copy does not need to be made for
4614 		 * objects only modified in the syncing context (e.g.
4615 		 * DNONE_DNODE blocks).
4616 		 */
4617 		int psize = arc_buf_size(*datap);
4618 		int lsize = arc_buf_lsize(*datap);
4619 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4620 		enum zio_compress compress_type = arc_get_compression(*datap);
4621 		uint8_t complevel = arc_get_complevel(*datap);
4622 
4623 		if (arc_is_encrypted(*datap)) {
4624 			boolean_t byteorder;
4625 			uint8_t salt[ZIO_DATA_SALT_LEN];
4626 			uint8_t iv[ZIO_DATA_IV_LEN];
4627 			uint8_t mac[ZIO_DATA_MAC_LEN];
4628 
4629 			arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4630 			*datap = arc_alloc_raw_buf(os->os_spa, db,
4631 			    dmu_objset_id(os), byteorder, salt, iv, mac,
4632 			    dn->dn_type, psize, lsize, compress_type,
4633 			    complevel);
4634 		} else if (compress_type != ZIO_COMPRESS_OFF) {
4635 			ASSERT3U(type, ==, ARC_BUFC_DATA);
4636 			*datap = arc_alloc_compressed_buf(os->os_spa, db,
4637 			    psize, lsize, compress_type, complevel);
4638 		} else {
4639 			*datap = arc_alloc_buf(os->os_spa, db, type, psize);
4640 		}
4641 		memcpy((*datap)->b_data, db->db.db_data, psize);
4642 	}
4643 	db->db_data_pending = dr;
4644 
4645 	mutex_exit(&db->db_mtx);
4646 
4647 	dbuf_write(dr, *datap, tx);
4648 
4649 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4650 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4651 		list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4652 	} else {
4653 		zio_nowait(dr->dr_zio);
4654 	}
4655 }
4656 
4657 /*
4658  * Syncs out a range of dirty records for indirect or leaf dbufs.  May be
4659  * called recursively from dbuf_sync_indirect().
4660  */
4661 void
4662 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4663 {
4664 	dbuf_dirty_record_t *dr;
4665 
4666 	while ((dr = list_head(list))) {
4667 		if (dr->dr_zio != NULL) {
4668 			/*
4669 			 * If we find an already initialized zio then we
4670 			 * are processing the meta-dnode, and we have finished.
4671 			 * The dbufs for all dnodes are put back on the list
4672 			 * during processing, so that we can zio_wait()
4673 			 * these IOs after initiating all child IOs.
4674 			 */
4675 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4676 			    DMU_META_DNODE_OBJECT);
4677 			break;
4678 		}
4679 		list_remove(list, dr);
4680 		if (dr->dr_dbuf == NULL) {
4681 			dbuf_sync_lightweight(dr, tx);
4682 		} else {
4683 			if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4684 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4685 				VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4686 			}
4687 			if (dr->dr_dbuf->db_level > 0)
4688 				dbuf_sync_indirect(dr, tx);
4689 			else
4690 				dbuf_sync_leaf(dr, tx);
4691 		}
4692 	}
4693 }
4694 
4695 static void
4696 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4697 {
4698 	(void) buf;
4699 	dmu_buf_impl_t *db = vdb;
4700 	dnode_t *dn;
4701 	blkptr_t *bp = zio->io_bp;
4702 	blkptr_t *bp_orig = &zio->io_bp_orig;
4703 	spa_t *spa = zio->io_spa;
4704 	int64_t delta;
4705 	uint64_t fill = 0;
4706 	int i;
4707 
4708 	ASSERT3P(db->db_blkptr, !=, NULL);
4709 	ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4710 
4711 	DB_DNODE_ENTER(db);
4712 	dn = DB_DNODE(db);
4713 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4714 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4715 	zio->io_prev_space_delta = delta;
4716 
4717 	if (bp->blk_birth != 0) {
4718 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4719 		    BP_GET_TYPE(bp) == dn->dn_type) ||
4720 		    (db->db_blkid == DMU_SPILL_BLKID &&
4721 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4722 		    BP_IS_EMBEDDED(bp));
4723 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4724 	}
4725 
4726 	mutex_enter(&db->db_mtx);
4727 
4728 #ifdef ZFS_DEBUG
4729 	if (db->db_blkid == DMU_SPILL_BLKID) {
4730 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4731 		ASSERT(!(BP_IS_HOLE(bp)) &&
4732 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4733 	}
4734 #endif
4735 
4736 	if (db->db_level == 0) {
4737 		mutex_enter(&dn->dn_mtx);
4738 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4739 		    db->db_blkid != DMU_SPILL_BLKID) {
4740 			ASSERT0(db->db_objset->os_raw_receive);
4741 			dn->dn_phys->dn_maxblkid = db->db_blkid;
4742 		}
4743 		mutex_exit(&dn->dn_mtx);
4744 
4745 		if (dn->dn_type == DMU_OT_DNODE) {
4746 			i = 0;
4747 			while (i < db->db.db_size) {
4748 				dnode_phys_t *dnp =
4749 				    (void *)(((char *)db->db.db_data) + i);
4750 
4751 				i += DNODE_MIN_SIZE;
4752 				if (dnp->dn_type != DMU_OT_NONE) {
4753 					fill++;
4754 					for (int j = 0; j < dnp->dn_nblkptr;
4755 					    j++) {
4756 						(void) zfs_blkptr_verify(spa,
4757 						    &dnp->dn_blkptr[j],
4758 						    BLK_CONFIG_SKIP,
4759 						    BLK_VERIFY_HALT);
4760 					}
4761 					if (dnp->dn_flags &
4762 					    DNODE_FLAG_SPILL_BLKPTR) {
4763 						(void) zfs_blkptr_verify(spa,
4764 						    DN_SPILL_BLKPTR(dnp),
4765 						    BLK_CONFIG_SKIP,
4766 						    BLK_VERIFY_HALT);
4767 					}
4768 					i += dnp->dn_extra_slots *
4769 					    DNODE_MIN_SIZE;
4770 				}
4771 			}
4772 		} else {
4773 			if (BP_IS_HOLE(bp)) {
4774 				fill = 0;
4775 			} else {
4776 				fill = 1;
4777 			}
4778 		}
4779 	} else {
4780 		blkptr_t *ibp = db->db.db_data;
4781 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4782 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4783 			if (BP_IS_HOLE(ibp))
4784 				continue;
4785 			(void) zfs_blkptr_verify(spa, ibp,
4786 			    BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
4787 			fill += BP_GET_FILL(ibp);
4788 		}
4789 	}
4790 	DB_DNODE_EXIT(db);
4791 
4792 	if (!BP_IS_EMBEDDED(bp))
4793 		BP_SET_FILL(bp, fill);
4794 
4795 	mutex_exit(&db->db_mtx);
4796 
4797 	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4798 	*db->db_blkptr = *bp;
4799 	dmu_buf_unlock_parent(db, dblt, FTAG);
4800 }
4801 
4802 /*
4803  * This function gets called just prior to running through the compression
4804  * stage of the zio pipeline. If we're an indirect block comprised of only
4805  * holes, then we want this indirect to be compressed away to a hole. In
4806  * order to do that we must zero out any information about the holes that
4807  * this indirect points to prior to before we try to compress it.
4808  */
4809 static void
4810 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4811 {
4812 	(void) zio, (void) buf;
4813 	dmu_buf_impl_t *db = vdb;
4814 	dnode_t *dn;
4815 	blkptr_t *bp;
4816 	unsigned int epbs, i;
4817 
4818 	ASSERT3U(db->db_level, >, 0);
4819 	DB_DNODE_ENTER(db);
4820 	dn = DB_DNODE(db);
4821 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4822 	ASSERT3U(epbs, <, 31);
4823 
4824 	/* Determine if all our children are holes */
4825 	for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4826 		if (!BP_IS_HOLE(bp))
4827 			break;
4828 	}
4829 
4830 	/*
4831 	 * If all the children are holes, then zero them all out so that
4832 	 * we may get compressed away.
4833 	 */
4834 	if (i == 1ULL << epbs) {
4835 		/*
4836 		 * We only found holes. Grab the rwlock to prevent
4837 		 * anybody from reading the blocks we're about to
4838 		 * zero out.
4839 		 */
4840 		rw_enter(&db->db_rwlock, RW_WRITER);
4841 		memset(db->db.db_data, 0, db->db.db_size);
4842 		rw_exit(&db->db_rwlock);
4843 	}
4844 	DB_DNODE_EXIT(db);
4845 }
4846 
4847 static void
4848 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4849 {
4850 	(void) buf;
4851 	dmu_buf_impl_t *db = vdb;
4852 	blkptr_t *bp_orig = &zio->io_bp_orig;
4853 	blkptr_t *bp = db->db_blkptr;
4854 	objset_t *os = db->db_objset;
4855 	dmu_tx_t *tx = os->os_synctx;
4856 
4857 	ASSERT0(zio->io_error);
4858 	ASSERT(db->db_blkptr == bp);
4859 
4860 	/*
4861 	 * For nopwrites and rewrites we ensure that the bp matches our
4862 	 * original and bypass all the accounting.
4863 	 */
4864 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4865 		ASSERT(BP_EQUAL(bp, bp_orig));
4866 	} else {
4867 		dsl_dataset_t *ds = os->os_dsl_dataset;
4868 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4869 		dsl_dataset_block_born(ds, bp, tx);
4870 	}
4871 
4872 	mutex_enter(&db->db_mtx);
4873 
4874 	DBUF_VERIFY(db);
4875 
4876 	dbuf_dirty_record_t *dr = db->db_data_pending;
4877 	dnode_t *dn = dr->dr_dnode;
4878 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4879 	ASSERT(dr->dr_dbuf == db);
4880 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4881 	list_remove(&db->db_dirty_records, dr);
4882 
4883 #ifdef ZFS_DEBUG
4884 	if (db->db_blkid == DMU_SPILL_BLKID) {
4885 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4886 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4887 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4888 	}
4889 #endif
4890 
4891 	if (db->db_level == 0) {
4892 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4893 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4894 		if (db->db_state != DB_NOFILL) {
4895 			if (dr->dt.dl.dr_data != NULL &&
4896 			    dr->dt.dl.dr_data != db->db_buf) {
4897 				arc_buf_destroy(dr->dt.dl.dr_data, db);
4898 			}
4899 		}
4900 	} else {
4901 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4902 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4903 		if (!BP_IS_HOLE(db->db_blkptr)) {
4904 			int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4905 			    SPA_BLKPTRSHIFT;
4906 			ASSERT3U(db->db_blkid, <=,
4907 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4908 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4909 			    db->db.db_size);
4910 		}
4911 		mutex_destroy(&dr->dt.di.dr_mtx);
4912 		list_destroy(&dr->dt.di.dr_children);
4913 	}
4914 
4915 	cv_broadcast(&db->db_changed);
4916 	ASSERT(db->db_dirtycnt > 0);
4917 	db->db_dirtycnt -= 1;
4918 	db->db_data_pending = NULL;
4919 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4920 
4921 	dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4922 	    zio->io_txg);
4923 
4924 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
4925 }
4926 
4927 static void
4928 dbuf_write_nofill_ready(zio_t *zio)
4929 {
4930 	dbuf_write_ready(zio, NULL, zio->io_private);
4931 }
4932 
4933 static void
4934 dbuf_write_nofill_done(zio_t *zio)
4935 {
4936 	dbuf_write_done(zio, NULL, zio->io_private);
4937 }
4938 
4939 static void
4940 dbuf_write_override_ready(zio_t *zio)
4941 {
4942 	dbuf_dirty_record_t *dr = zio->io_private;
4943 	dmu_buf_impl_t *db = dr->dr_dbuf;
4944 
4945 	dbuf_write_ready(zio, NULL, db);
4946 }
4947 
4948 static void
4949 dbuf_write_override_done(zio_t *zio)
4950 {
4951 	dbuf_dirty_record_t *dr = zio->io_private;
4952 	dmu_buf_impl_t *db = dr->dr_dbuf;
4953 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4954 
4955 	mutex_enter(&db->db_mtx);
4956 	if (!BP_EQUAL(zio->io_bp, obp)) {
4957 		if (!BP_IS_HOLE(obp))
4958 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4959 		arc_release(dr->dt.dl.dr_data, db);
4960 	}
4961 	mutex_exit(&db->db_mtx);
4962 
4963 	dbuf_write_done(zio, NULL, db);
4964 
4965 	if (zio->io_abd != NULL)
4966 		abd_free(zio->io_abd);
4967 }
4968 
4969 typedef struct dbuf_remap_impl_callback_arg {
4970 	objset_t	*drica_os;
4971 	uint64_t	drica_blk_birth;
4972 	dmu_tx_t	*drica_tx;
4973 } dbuf_remap_impl_callback_arg_t;
4974 
4975 static void
4976 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4977     void *arg)
4978 {
4979 	dbuf_remap_impl_callback_arg_t *drica = arg;
4980 	objset_t *os = drica->drica_os;
4981 	spa_t *spa = dmu_objset_spa(os);
4982 	dmu_tx_t *tx = drica->drica_tx;
4983 
4984 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4985 
4986 	if (os == spa_meta_objset(spa)) {
4987 		spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4988 	} else {
4989 		dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4990 		    size, drica->drica_blk_birth, tx);
4991 	}
4992 }
4993 
4994 static void
4995 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4996 {
4997 	blkptr_t bp_copy = *bp;
4998 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4999 	dbuf_remap_impl_callback_arg_t drica;
5000 
5001 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5002 
5003 	drica.drica_os = dn->dn_objset;
5004 	drica.drica_blk_birth = bp->blk_birth;
5005 	drica.drica_tx = tx;
5006 	if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
5007 	    &drica)) {
5008 		/*
5009 		 * If the blkptr being remapped is tracked by a livelist,
5010 		 * then we need to make sure the livelist reflects the update.
5011 		 * First, cancel out the old blkptr by appending a 'FREE'
5012 		 * entry. Next, add an 'ALLOC' to track the new version. This
5013 		 * way we avoid trying to free an inaccurate blkptr at delete.
5014 		 * Note that embedded blkptrs are not tracked in livelists.
5015 		 */
5016 		if (dn->dn_objset != spa_meta_objset(spa)) {
5017 			dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
5018 			if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
5019 			    bp->blk_birth > ds->ds_dir->dd_origin_txg) {
5020 				ASSERT(!BP_IS_EMBEDDED(bp));
5021 				ASSERT(dsl_dir_is_clone(ds->ds_dir));
5022 				ASSERT(spa_feature_is_enabled(spa,
5023 				    SPA_FEATURE_LIVELIST));
5024 				bplist_append(&ds->ds_dir->dd_pending_frees,
5025 				    bp);
5026 				bplist_append(&ds->ds_dir->dd_pending_allocs,
5027 				    &bp_copy);
5028 			}
5029 		}
5030 
5031 		/*
5032 		 * The db_rwlock prevents dbuf_read_impl() from
5033 		 * dereferencing the BP while we are changing it.  To
5034 		 * avoid lock contention, only grab it when we are actually
5035 		 * changing the BP.
5036 		 */
5037 		if (rw != NULL)
5038 			rw_enter(rw, RW_WRITER);
5039 		*bp = bp_copy;
5040 		if (rw != NULL)
5041 			rw_exit(rw);
5042 	}
5043 }
5044 
5045 /*
5046  * Remap any existing BP's to concrete vdevs, if possible.
5047  */
5048 static void
5049 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
5050 {
5051 	spa_t *spa = dmu_objset_spa(db->db_objset);
5052 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5053 
5054 	if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
5055 		return;
5056 
5057 	if (db->db_level > 0) {
5058 		blkptr_t *bp = db->db.db_data;
5059 		for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
5060 			dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
5061 		}
5062 	} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
5063 		dnode_phys_t *dnp = db->db.db_data;
5064 		ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
5065 		    DMU_OT_DNODE);
5066 		for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
5067 		    i += dnp[i].dn_extra_slots + 1) {
5068 			for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
5069 				krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
5070 				    &dn->dn_dbuf->db_rwlock);
5071 				dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
5072 				    tx);
5073 			}
5074 		}
5075 	}
5076 }
5077 
5078 
5079 /*
5080  * Populate dr->dr_zio with a zio to commit a dirty buffer to disk.
5081  * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio).
5082  */
5083 static void
5084 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5085 {
5086 	dmu_buf_impl_t *db = dr->dr_dbuf;
5087 	dnode_t *dn = dr->dr_dnode;
5088 	objset_t *os;
5089 	dmu_buf_impl_t *parent = db->db_parent;
5090 	uint64_t txg = tx->tx_txg;
5091 	zbookmark_phys_t zb;
5092 	zio_prop_t zp;
5093 	zio_t *pio; /* parent I/O */
5094 	int wp_flag = 0;
5095 
5096 	ASSERT(dmu_tx_is_syncing(tx));
5097 
5098 	os = dn->dn_objset;
5099 
5100 	if (db->db_state != DB_NOFILL) {
5101 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5102 			/*
5103 			 * Private object buffers are released here rather
5104 			 * than in dbuf_dirty() since they are only modified
5105 			 * in the syncing context and we don't want the
5106 			 * overhead of making multiple copies of the data.
5107 			 */
5108 			if (BP_IS_HOLE(db->db_blkptr)) {
5109 				arc_buf_thaw(data);
5110 			} else {
5111 				dbuf_release_bp(db);
5112 			}
5113 			dbuf_remap(dn, db, tx);
5114 		}
5115 	}
5116 
5117 	if (parent != dn->dn_dbuf) {
5118 		/* Our parent is an indirect block. */
5119 		/* We have a dirty parent that has been scheduled for write. */
5120 		ASSERT(parent && parent->db_data_pending);
5121 		/* Our parent's buffer is one level closer to the dnode. */
5122 		ASSERT(db->db_level == parent->db_level-1);
5123 		/*
5124 		 * We're about to modify our parent's db_data by modifying
5125 		 * our block pointer, so the parent must be released.
5126 		 */
5127 		ASSERT(arc_released(parent->db_buf));
5128 		pio = parent->db_data_pending->dr_zio;
5129 	} else {
5130 		/* Our parent is the dnode itself. */
5131 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5132 		    db->db_blkid != DMU_SPILL_BLKID) ||
5133 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5134 		if (db->db_blkid != DMU_SPILL_BLKID)
5135 			ASSERT3P(db->db_blkptr, ==,
5136 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
5137 		pio = dn->dn_zio;
5138 	}
5139 
5140 	ASSERT(db->db_level == 0 || data == db->db_buf);
5141 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
5142 	ASSERT(pio);
5143 
5144 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5145 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5146 	    db->db.db_object, db->db_level, db->db_blkid);
5147 
5148 	if (db->db_blkid == DMU_SPILL_BLKID)
5149 		wp_flag = WP_SPILL;
5150 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
5151 
5152 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5153 
5154 	/*
5155 	 * We copy the blkptr now (rather than when we instantiate the dirty
5156 	 * record), because its value can change between open context and
5157 	 * syncing context. We do not need to hold dn_struct_rwlock to read
5158 	 * db_blkptr because we are in syncing context.
5159 	 */
5160 	dr->dr_bp_copy = *db->db_blkptr;
5161 
5162 	if (db->db_level == 0 &&
5163 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5164 		/*
5165 		 * The BP for this block has been provided by open context
5166 		 * (by dmu_sync() or dmu_buf_write_embedded()).
5167 		 */
5168 		abd_t *contents = (data != NULL) ?
5169 		    abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5170 
5171 		dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5172 		    contents, db->db.db_size, db->db.db_size, &zp,
5173 		    dbuf_write_override_ready, NULL,
5174 		    dbuf_write_override_done,
5175 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5176 		mutex_enter(&db->db_mtx);
5177 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5178 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5179 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5180 		    dr->dt.dl.dr_brtwrite);
5181 		mutex_exit(&db->db_mtx);
5182 	} else if (db->db_state == DB_NOFILL) {
5183 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5184 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5185 		dr->dr_zio = zio_write(pio, os->os_spa, txg,
5186 		    &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5187 		    dbuf_write_nofill_ready, NULL,
5188 		    dbuf_write_nofill_done, db,
5189 		    ZIO_PRIORITY_ASYNC_WRITE,
5190 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5191 	} else {
5192 		ASSERT(arc_released(data));
5193 
5194 		/*
5195 		 * For indirect blocks, we want to setup the children
5196 		 * ready callback so that we can properly handle an indirect
5197 		 * block that only contains holes.
5198 		 */
5199 		arc_write_done_func_t *children_ready_cb = NULL;
5200 		if (db->db_level != 0)
5201 			children_ready_cb = dbuf_write_children_ready;
5202 
5203 		dr->dr_zio = arc_write(pio, os->os_spa, txg,
5204 		    &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5205 		    dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
5206 		    children_ready_cb, dbuf_write_done, db,
5207 		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5208 	}
5209 }
5210 
5211 EXPORT_SYMBOL(dbuf_find);
5212 EXPORT_SYMBOL(dbuf_is_metadata);
5213 EXPORT_SYMBOL(dbuf_destroy);
5214 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5215 EXPORT_SYMBOL(dbuf_whichblock);
5216 EXPORT_SYMBOL(dbuf_read);
5217 EXPORT_SYMBOL(dbuf_unoverride);
5218 EXPORT_SYMBOL(dbuf_free_range);
5219 EXPORT_SYMBOL(dbuf_new_size);
5220 EXPORT_SYMBOL(dbuf_release_bp);
5221 EXPORT_SYMBOL(dbuf_dirty);
5222 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5223 EXPORT_SYMBOL(dmu_buf_will_dirty);
5224 EXPORT_SYMBOL(dmu_buf_is_dirty);
5225 EXPORT_SYMBOL(dmu_buf_will_clone);
5226 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5227 EXPORT_SYMBOL(dmu_buf_will_fill);
5228 EXPORT_SYMBOL(dmu_buf_fill_done);
5229 EXPORT_SYMBOL(dmu_buf_rele);
5230 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5231 EXPORT_SYMBOL(dbuf_prefetch);
5232 EXPORT_SYMBOL(dbuf_hold_impl);
5233 EXPORT_SYMBOL(dbuf_hold);
5234 EXPORT_SYMBOL(dbuf_hold_level);
5235 EXPORT_SYMBOL(dbuf_create_bonus);
5236 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5237 EXPORT_SYMBOL(dbuf_rm_spill);
5238 EXPORT_SYMBOL(dbuf_add_ref);
5239 EXPORT_SYMBOL(dbuf_rele);
5240 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5241 EXPORT_SYMBOL(dbuf_refcount);
5242 EXPORT_SYMBOL(dbuf_sync_list);
5243 EXPORT_SYMBOL(dmu_buf_set_user);
5244 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5245 EXPORT_SYMBOL(dmu_buf_get_user);
5246 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5247 
5248 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5249 	"Maximum size in bytes of the dbuf cache.");
5250 
5251 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5252 	"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5253 
5254 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5255 	"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5256 
5257 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5258 	"Maximum size in bytes of dbuf metadata cache.");
5259 
5260 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5261 	"Set size of dbuf cache to log2 fraction of arc size.");
5262 
5263 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5264 	"Set size of dbuf metadata cache to log2 fraction of arc size.");
5265 
5266 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5267 	"Set size of dbuf cache mutex array as log2 shift.");
5268