xref: /freebsd/sys/contrib/openzfs/module/zfs/dbuf.c (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2019, Klara Inc.
28  * Copyright (c) 2019, Allan Jude
29  * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
59 
60 static kstat_t *dbuf_ksp;
61 
62 typedef struct dbuf_stats {
63 	/*
64 	 * Various statistics about the size of the dbuf cache.
65 	 */
66 	kstat_named_t cache_count;
67 	kstat_named_t cache_size_bytes;
68 	kstat_named_t cache_size_bytes_max;
69 	/*
70 	 * Statistics regarding the bounds on the dbuf cache size.
71 	 */
72 	kstat_named_t cache_target_bytes;
73 	kstat_named_t cache_lowater_bytes;
74 	kstat_named_t cache_hiwater_bytes;
75 	/*
76 	 * Total number of dbuf cache evictions that have occurred.
77 	 */
78 	kstat_named_t cache_total_evicts;
79 	/*
80 	 * The distribution of dbuf levels in the dbuf cache and
81 	 * the total size of all dbufs at each level.
82 	 */
83 	kstat_named_t cache_levels[DN_MAX_LEVELS];
84 	kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
85 	/*
86 	 * Statistics about the dbuf hash table.
87 	 */
88 	kstat_named_t hash_hits;
89 	kstat_named_t hash_misses;
90 	kstat_named_t hash_collisions;
91 	kstat_named_t hash_elements;
92 	kstat_named_t hash_elements_max;
93 	/*
94 	 * Number of sublists containing more than one dbuf in the dbuf
95 	 * hash table. Keep track of the longest hash chain.
96 	 */
97 	kstat_named_t hash_chains;
98 	kstat_named_t hash_chain_max;
99 	/*
100 	 * Number of times a dbuf_create() discovers that a dbuf was
101 	 * already created and in the dbuf hash table.
102 	 */
103 	kstat_named_t hash_insert_race;
104 	/*
105 	 * Number of entries in the hash table dbuf and mutex arrays.
106 	 */
107 	kstat_named_t hash_table_count;
108 	kstat_named_t hash_mutex_count;
109 	/*
110 	 * Statistics about the size of the metadata dbuf cache.
111 	 */
112 	kstat_named_t metadata_cache_count;
113 	kstat_named_t metadata_cache_size_bytes;
114 	kstat_named_t metadata_cache_size_bytes_max;
115 	/*
116 	 * For diagnostic purposes, this is incremented whenever we can't add
117 	 * something to the metadata cache because it's full, and instead put
118 	 * the data in the regular dbuf cache.
119 	 */
120 	kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122 
123 dbuf_stats_t dbuf_stats = {
124 	{ "cache_count",			KSTAT_DATA_UINT64 },
125 	{ "cache_size_bytes",			KSTAT_DATA_UINT64 },
126 	{ "cache_size_bytes_max",		KSTAT_DATA_UINT64 },
127 	{ "cache_target_bytes",			KSTAT_DATA_UINT64 },
128 	{ "cache_lowater_bytes",		KSTAT_DATA_UINT64 },
129 	{ "cache_hiwater_bytes",		KSTAT_DATA_UINT64 },
130 	{ "cache_total_evicts",			KSTAT_DATA_UINT64 },
131 	{ { "cache_levels_N",			KSTAT_DATA_UINT64 } },
132 	{ { "cache_levels_bytes_N",		KSTAT_DATA_UINT64 } },
133 	{ "hash_hits",				KSTAT_DATA_UINT64 },
134 	{ "hash_misses",			KSTAT_DATA_UINT64 },
135 	{ "hash_collisions",			KSTAT_DATA_UINT64 },
136 	{ "hash_elements",			KSTAT_DATA_UINT64 },
137 	{ "hash_elements_max",			KSTAT_DATA_UINT64 },
138 	{ "hash_chains",			KSTAT_DATA_UINT64 },
139 	{ "hash_chain_max",			KSTAT_DATA_UINT64 },
140 	{ "hash_insert_race",			KSTAT_DATA_UINT64 },
141 	{ "hash_table_count",			KSTAT_DATA_UINT64 },
142 	{ "hash_mutex_count",			KSTAT_DATA_UINT64 },
143 	{ "metadata_cache_count",		KSTAT_DATA_UINT64 },
144 	{ "metadata_cache_size_bytes",		KSTAT_DATA_UINT64 },
145 	{ "metadata_cache_size_bytes_max",	KSTAT_DATA_UINT64 },
146 	{ "metadata_cache_overflow",		KSTAT_DATA_UINT64 }
147 };
148 
149 struct {
150 	wmsum_t cache_count;
151 	wmsum_t cache_total_evicts;
152 	wmsum_t cache_levels[DN_MAX_LEVELS];
153 	wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 	wmsum_t hash_hits;
155 	wmsum_t hash_misses;
156 	wmsum_t hash_collisions;
157 	wmsum_t hash_chains;
158 	wmsum_t hash_insert_race;
159 	wmsum_t metadata_cache_count;
160 	wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162 
163 #define	DBUF_STAT_INCR(stat, val)	\
164 	wmsum_add(&dbuf_sums.stat, val);
165 #define	DBUF_STAT_DECR(stat, val)	\
166 	DBUF_STAT_INCR(stat, -(val));
167 #define	DBUF_STAT_BUMP(stat)		\
168 	DBUF_STAT_INCR(stat, 1);
169 #define	DBUF_STAT_BUMPDOWN(stat)	\
170 	DBUF_STAT_INCR(stat, -1);
171 #define	DBUF_STAT_MAX(stat, v) {					\
172 	uint64_t _m;							\
173 	while ((v) > (_m = dbuf_stats.stat.value.ui64) &&		\
174 	    (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 		continue;						\
176 }
177 
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
181 
182 /*
183  * Global data structures and functions for the dbuf cache.
184  */
185 static kmem_cache_t *dbuf_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
187 
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
192 
193 /*
194  * There are two dbuf caches; each dbuf can only be in one of them at a time.
195  *
196  * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197  *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198  *    that represent the metadata that describes filesystems/snapshots/
199  *    bookmarks/properties/etc. We only evict from this cache when we export a
200  *    pool, to short-circuit as much I/O as possible for all administrative
201  *    commands that need the metadata. There is no eviction policy for this
202  *    cache, because we try to only include types in it which would occupy a
203  *    very small amount of space per object but create a large impact on the
204  *    performance of these commands. Instead, after it reaches a maximum size
205  *    (which should only happen on very small memory systems with a very large
206  *    number of filesystem objects), we stop taking new dbufs into the
207  *    metadata cache, instead putting them in the normal dbuf cache.
208  *
209  * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210  *    are not currently held but have been recently released. These dbufs
211  *    are not eligible for arc eviction until they are aged out of the cache.
212  *    Dbufs that are aged out of the cache will be immediately destroyed and
213  *    become eligible for arc eviction.
214  *
215  * Dbufs are added to these caches once the last hold is released. If a dbuf is
216  * later accessed and still exists in the dbuf cache, then it will be removed
217  * from the cache and later re-added to the head of the cache.
218  *
219  * If a given dbuf meets the requirements for the metadata cache, it will go
220  * there, otherwise it will be considered for the generic LRU dbuf cache. The
221  * caches and the refcounts tracking their sizes are stored in an array indexed
222  * by those caches' matching enum values (from dbuf_cached_state_t).
223  */
224 typedef struct dbuf_cache {
225 	multilist_t cache;
226 	zfs_refcount_t size ____cacheline_aligned;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229 
230 /* Size limits for the caches */
231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
233 
234 /* Set the default sizes of the caches to log2 fraction of arc size */
235 static uint_t dbuf_cache_shift = 5;
236 static uint_t dbuf_metadata_cache_shift = 6;
237 
238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
239 static uint_t dbuf_mutex_cache_shift = 0;
240 
241 static unsigned long dbuf_cache_target_bytes(void);
242 static unsigned long dbuf_metadata_cache_target_bytes(void);
243 
244 /*
245  * The LRU dbuf cache uses a three-stage eviction policy:
246  *	- A low water marker designates when the dbuf eviction thread
247  *	should stop evicting from the dbuf cache.
248  *	- When we reach the maximum size (aka mid water mark), we
249  *	signal the eviction thread to run.
250  *	- The high water mark indicates when the eviction thread
251  *	is unable to keep up with the incoming load and eviction must
252  *	happen in the context of the calling thread.
253  *
254  * The dbuf cache:
255  *                                                 (max size)
256  *                                      low water   mid water   hi water
257  * +----------------------------------------+----------+----------+
258  * |                                        |          |          |
259  * |                                        |          |          |
260  * |                                        |          |          |
261  * |                                        |          |          |
262  * +----------------------------------------+----------+----------+
263  *                                        stop        signal     evict
264  *                                      evicting     eviction   directly
265  *                                                    thread
266  *
267  * The high and low water marks indicate the operating range for the eviction
268  * thread. The low water mark is, by default, 90% of the total size of the
269  * cache and the high water mark is at 110% (both of these percentages can be
270  * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
271  * respectively). The eviction thread will try to ensure that the cache remains
272  * within this range by waking up every second and checking if the cache is
273  * above the low water mark. The thread can also be woken up by callers adding
274  * elements into the cache if the cache is larger than the mid water (i.e max
275  * cache size). Once the eviction thread is woken up and eviction is required,
276  * it will continue evicting buffers until it's able to reduce the cache size
277  * to the low water mark. If the cache size continues to grow and hits the high
278  * water mark, then callers adding elements to the cache will begin to evict
279  * directly from the cache until the cache is no longer above the high water
280  * mark.
281  */
282 
283 /*
284  * The percentage above and below the maximum cache size.
285  */
286 static uint_t dbuf_cache_hiwater_pct = 10;
287 static uint_t dbuf_cache_lowater_pct = 10;
288 
289 static int
290 dbuf_cons(void *vdb, void *unused, int kmflag)
291 {
292 	(void) unused, (void) kmflag;
293 	dmu_buf_impl_t *db = vdb;
294 	memset(db, 0, sizeof (dmu_buf_impl_t));
295 
296 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
297 	rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
298 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
299 	multilist_link_init(&db->db_cache_link);
300 	zfs_refcount_create(&db->db_holds);
301 
302 	return (0);
303 }
304 
305 static void
306 dbuf_dest(void *vdb, void *unused)
307 {
308 	(void) unused;
309 	dmu_buf_impl_t *db = vdb;
310 	mutex_destroy(&db->db_mtx);
311 	rw_destroy(&db->db_rwlock);
312 	cv_destroy(&db->db_changed);
313 	ASSERT(!multilist_link_active(&db->db_cache_link));
314 	zfs_refcount_destroy(&db->db_holds);
315 }
316 
317 /*
318  * dbuf hash table routines
319  */
320 static dbuf_hash_table_t dbuf_hash_table;
321 
322 /*
323  * We use Cityhash for this. It's fast, and has good hash properties without
324  * requiring any large static buffers.
325  */
326 static uint64_t
327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
328 {
329 	return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
330 }
331 
332 #define	DTRACE_SET_STATE(db, why) \
333 	DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db,	\
334 	    const char *, why)
335 
336 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
337 	((dbuf)->db.db_object == (obj) &&		\
338 	(dbuf)->db_objset == (os) &&			\
339 	(dbuf)->db_level == (level) &&			\
340 	(dbuf)->db_blkid == (blkid))
341 
342 dmu_buf_impl_t *
343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
344     uint64_t *hash_out)
345 {
346 	dbuf_hash_table_t *h = &dbuf_hash_table;
347 	uint64_t hv;
348 	uint64_t idx;
349 	dmu_buf_impl_t *db;
350 
351 	hv = dbuf_hash(os, obj, level, blkid);
352 	idx = hv & h->hash_table_mask;
353 
354 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
355 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
356 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
357 			mutex_enter(&db->db_mtx);
358 			if (db->db_state != DB_EVICTING) {
359 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 				return (db);
361 			}
362 			mutex_exit(&db->db_mtx);
363 		}
364 	}
365 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
366 	if (hash_out != NULL)
367 		*hash_out = hv;
368 	return (NULL);
369 }
370 
371 static dmu_buf_impl_t *
372 dbuf_find_bonus(objset_t *os, uint64_t object)
373 {
374 	dnode_t *dn;
375 	dmu_buf_impl_t *db = NULL;
376 
377 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
378 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
379 		if (dn->dn_bonus != NULL) {
380 			db = dn->dn_bonus;
381 			mutex_enter(&db->db_mtx);
382 		}
383 		rw_exit(&dn->dn_struct_rwlock);
384 		dnode_rele(dn, FTAG);
385 	}
386 	return (db);
387 }
388 
389 /*
390  * Insert an entry into the hash table.  If there is already an element
391  * equal to elem in the hash table, then the already existing element
392  * will be returned and the new element will not be inserted.
393  * Otherwise returns NULL.
394  */
395 static dmu_buf_impl_t *
396 dbuf_hash_insert(dmu_buf_impl_t *db)
397 {
398 	dbuf_hash_table_t *h = &dbuf_hash_table;
399 	objset_t *os = db->db_objset;
400 	uint64_t obj = db->db.db_object;
401 	int level = db->db_level;
402 	uint64_t blkid, idx;
403 	dmu_buf_impl_t *dbf;
404 	uint32_t i;
405 
406 	blkid = db->db_blkid;
407 	ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
408 	idx = db->db_hash & h->hash_table_mask;
409 
410 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
411 	for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
412 	    dbf = dbf->db_hash_next, i++) {
413 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
414 			mutex_enter(&dbf->db_mtx);
415 			if (dbf->db_state != DB_EVICTING) {
416 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
417 				return (dbf);
418 			}
419 			mutex_exit(&dbf->db_mtx);
420 		}
421 	}
422 
423 	if (i > 0) {
424 		DBUF_STAT_BUMP(hash_collisions);
425 		if (i == 1)
426 			DBUF_STAT_BUMP(hash_chains);
427 
428 		DBUF_STAT_MAX(hash_chain_max, i);
429 	}
430 
431 	mutex_enter(&db->db_mtx);
432 	db->db_hash_next = h->hash_table[idx];
433 	h->hash_table[idx] = db;
434 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
435 	uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
436 	DBUF_STAT_MAX(hash_elements_max, he);
437 
438 	return (NULL);
439 }
440 
441 /*
442  * This returns whether this dbuf should be stored in the metadata cache, which
443  * is based on whether it's from one of the dnode types that store data related
444  * to traversing dataset hierarchies.
445  */
446 static boolean_t
447 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
448 {
449 	DB_DNODE_ENTER(db);
450 	dmu_object_type_t type = DB_DNODE(db)->dn_type;
451 	DB_DNODE_EXIT(db);
452 
453 	/* Check if this dbuf is one of the types we care about */
454 	if (DMU_OT_IS_METADATA_CACHED(type)) {
455 		/* If we hit this, then we set something up wrong in dmu_ot */
456 		ASSERT(DMU_OT_IS_METADATA(type));
457 
458 		/*
459 		 * Sanity check for small-memory systems: don't allocate too
460 		 * much memory for this purpose.
461 		 */
462 		if (zfs_refcount_count(
463 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
464 		    dbuf_metadata_cache_target_bytes()) {
465 			DBUF_STAT_BUMP(metadata_cache_overflow);
466 			return (B_FALSE);
467 		}
468 
469 		return (B_TRUE);
470 	}
471 
472 	return (B_FALSE);
473 }
474 
475 /*
476  * Remove an entry from the hash table.  It must be in the EVICTING state.
477  */
478 static void
479 dbuf_hash_remove(dmu_buf_impl_t *db)
480 {
481 	dbuf_hash_table_t *h = &dbuf_hash_table;
482 	uint64_t idx;
483 	dmu_buf_impl_t *dbf, **dbp;
484 
485 	ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
486 	    db->db_blkid), ==, db->db_hash);
487 	idx = db->db_hash & h->hash_table_mask;
488 
489 	/*
490 	 * We mustn't hold db_mtx to maintain lock ordering:
491 	 * DBUF_HASH_MUTEX > db_mtx.
492 	 */
493 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
494 	ASSERT(db->db_state == DB_EVICTING);
495 	ASSERT(!MUTEX_HELD(&db->db_mtx));
496 
497 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
498 	dbp = &h->hash_table[idx];
499 	while ((dbf = *dbp) != db) {
500 		dbp = &dbf->db_hash_next;
501 		ASSERT(dbf != NULL);
502 	}
503 	*dbp = db->db_hash_next;
504 	db->db_hash_next = NULL;
505 	if (h->hash_table[idx] &&
506 	    h->hash_table[idx]->db_hash_next == NULL)
507 		DBUF_STAT_BUMPDOWN(hash_chains);
508 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
509 	atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
510 }
511 
512 typedef enum {
513 	DBVU_EVICTING,
514 	DBVU_NOT_EVICTING
515 } dbvu_verify_type_t;
516 
517 static void
518 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
519 {
520 #ifdef ZFS_DEBUG
521 	int64_t holds;
522 
523 	if (db->db_user == NULL)
524 		return;
525 
526 	/* Only data blocks support the attachment of user data. */
527 	ASSERT(db->db_level == 0);
528 
529 	/* Clients must resolve a dbuf before attaching user data. */
530 	ASSERT(db->db.db_data != NULL);
531 	ASSERT3U(db->db_state, ==, DB_CACHED);
532 
533 	holds = zfs_refcount_count(&db->db_holds);
534 	if (verify_type == DBVU_EVICTING) {
535 		/*
536 		 * Immediate eviction occurs when holds == dirtycnt.
537 		 * For normal eviction buffers, holds is zero on
538 		 * eviction, except when dbuf_fix_old_data() calls
539 		 * dbuf_clear_data().  However, the hold count can grow
540 		 * during eviction even though db_mtx is held (see
541 		 * dmu_bonus_hold() for an example), so we can only
542 		 * test the generic invariant that holds >= dirtycnt.
543 		 */
544 		ASSERT3U(holds, >=, db->db_dirtycnt);
545 	} else {
546 		if (db->db_user_immediate_evict == TRUE)
547 			ASSERT3U(holds, >=, db->db_dirtycnt);
548 		else
549 			ASSERT3U(holds, >, 0);
550 	}
551 #endif
552 }
553 
554 static void
555 dbuf_evict_user(dmu_buf_impl_t *db)
556 {
557 	dmu_buf_user_t *dbu = db->db_user;
558 
559 	ASSERT(MUTEX_HELD(&db->db_mtx));
560 
561 	if (dbu == NULL)
562 		return;
563 
564 	dbuf_verify_user(db, DBVU_EVICTING);
565 	db->db_user = NULL;
566 
567 #ifdef ZFS_DEBUG
568 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
569 		*dbu->dbu_clear_on_evict_dbufp = NULL;
570 #endif
571 
572 	/*
573 	 * There are two eviction callbacks - one that we call synchronously
574 	 * and one that we invoke via a taskq.  The async one is useful for
575 	 * avoiding lock order reversals and limiting stack depth.
576 	 *
577 	 * Note that if we have a sync callback but no async callback,
578 	 * it's likely that the sync callback will free the structure
579 	 * containing the dbu.  In that case we need to take care to not
580 	 * dereference dbu after calling the sync evict func.
581 	 */
582 	boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
583 
584 	if (dbu->dbu_evict_func_sync != NULL)
585 		dbu->dbu_evict_func_sync(dbu);
586 
587 	if (has_async) {
588 		taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
589 		    dbu, 0, &dbu->dbu_tqent);
590 	}
591 }
592 
593 boolean_t
594 dbuf_is_metadata(dmu_buf_impl_t *db)
595 {
596 	/*
597 	 * Consider indirect blocks and spill blocks to be meta data.
598 	 */
599 	if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
600 		return (B_TRUE);
601 	} else {
602 		boolean_t is_metadata;
603 
604 		DB_DNODE_ENTER(db);
605 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
606 		DB_DNODE_EXIT(db);
607 
608 		return (is_metadata);
609 	}
610 }
611 
612 /*
613  * We want to exclude buffers that are on a special allocation class from
614  * L2ARC.
615  */
616 boolean_t
617 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
618 {
619 	if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
620 	    (db->db_objset->os_secondary_cache ==
621 	    ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
622 		if (l2arc_exclude_special == 0)
623 			return (B_TRUE);
624 
625 		blkptr_t *bp = db->db_blkptr;
626 		if (bp == NULL || BP_IS_HOLE(bp))
627 			return (B_FALSE);
628 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
629 		vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
630 		vdev_t *vd = NULL;
631 
632 		if (vdev < rvd->vdev_children)
633 			vd = rvd->vdev_child[vdev];
634 
635 		if (vd == NULL)
636 			return (B_TRUE);
637 
638 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
639 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
640 			return (B_TRUE);
641 	}
642 	return (B_FALSE);
643 }
644 
645 static inline boolean_t
646 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
647 {
648 	if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
649 	    (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
650 	    (level > 0 ||
651 	    DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
652 		if (l2arc_exclude_special == 0)
653 			return (B_TRUE);
654 
655 		if (bp == NULL || BP_IS_HOLE(bp))
656 			return (B_FALSE);
657 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
658 		vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
659 		vdev_t *vd = NULL;
660 
661 		if (vdev < rvd->vdev_children)
662 			vd = rvd->vdev_child[vdev];
663 
664 		if (vd == NULL)
665 			return (B_TRUE);
666 
667 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
668 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
669 			return (B_TRUE);
670 	}
671 	return (B_FALSE);
672 }
673 
674 
675 /*
676  * This function *must* return indices evenly distributed between all
677  * sublists of the multilist. This is needed due to how the dbuf eviction
678  * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
679  * distributed between all sublists and uses this assumption when
680  * deciding which sublist to evict from and how much to evict from it.
681  */
682 static unsigned int
683 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
684 {
685 	dmu_buf_impl_t *db = obj;
686 
687 	/*
688 	 * The assumption here, is the hash value for a given
689 	 * dmu_buf_impl_t will remain constant throughout it's lifetime
690 	 * (i.e. it's objset, object, level and blkid fields don't change).
691 	 * Thus, we don't need to store the dbuf's sublist index
692 	 * on insertion, as this index can be recalculated on removal.
693 	 *
694 	 * Also, the low order bits of the hash value are thought to be
695 	 * distributed evenly. Otherwise, in the case that the multilist
696 	 * has a power of two number of sublists, each sublists' usage
697 	 * would not be evenly distributed. In this context full 64bit
698 	 * division would be a waste of time, so limit it to 32 bits.
699 	 */
700 	return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
701 	    db->db_level, db->db_blkid) %
702 	    multilist_get_num_sublists(ml));
703 }
704 
705 /*
706  * The target size of the dbuf cache can grow with the ARC target,
707  * unless limited by the tunable dbuf_cache_max_bytes.
708  */
709 static inline unsigned long
710 dbuf_cache_target_bytes(void)
711 {
712 	return (MIN(dbuf_cache_max_bytes,
713 	    arc_target_bytes() >> dbuf_cache_shift));
714 }
715 
716 /*
717  * The target size of the dbuf metadata cache can grow with the ARC target,
718  * unless limited by the tunable dbuf_metadata_cache_max_bytes.
719  */
720 static inline unsigned long
721 dbuf_metadata_cache_target_bytes(void)
722 {
723 	return (MIN(dbuf_metadata_cache_max_bytes,
724 	    arc_target_bytes() >> dbuf_metadata_cache_shift));
725 }
726 
727 static inline uint64_t
728 dbuf_cache_hiwater_bytes(void)
729 {
730 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
731 	return (dbuf_cache_target +
732 	    (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
733 }
734 
735 static inline uint64_t
736 dbuf_cache_lowater_bytes(void)
737 {
738 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
739 	return (dbuf_cache_target -
740 	    (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
741 }
742 
743 static inline boolean_t
744 dbuf_cache_above_lowater(void)
745 {
746 	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
747 	    dbuf_cache_lowater_bytes());
748 }
749 
750 /*
751  * Evict the oldest eligible dbuf from the dbuf cache.
752  */
753 static void
754 dbuf_evict_one(void)
755 {
756 	int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
757 	multilist_sublist_t *mls = multilist_sublist_lock(
758 	    &dbuf_caches[DB_DBUF_CACHE].cache, idx);
759 
760 	ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
761 
762 	dmu_buf_impl_t *db = multilist_sublist_tail(mls);
763 	while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
764 		db = multilist_sublist_prev(mls, db);
765 	}
766 
767 	DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
768 	    multilist_sublist_t *, mls);
769 
770 	if (db != NULL) {
771 		multilist_sublist_remove(mls, db);
772 		multilist_sublist_unlock(mls);
773 		(void) zfs_refcount_remove_many(
774 		    &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
775 		DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
776 		DBUF_STAT_BUMPDOWN(cache_count);
777 		DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
778 		    db->db.db_size);
779 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
780 		db->db_caching_status = DB_NO_CACHE;
781 		dbuf_destroy(db);
782 		DBUF_STAT_BUMP(cache_total_evicts);
783 	} else {
784 		multilist_sublist_unlock(mls);
785 	}
786 }
787 
788 /*
789  * The dbuf evict thread is responsible for aging out dbufs from the
790  * cache. Once the cache has reached it's maximum size, dbufs are removed
791  * and destroyed. The eviction thread will continue running until the size
792  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
793  * out of the cache it is destroyed and becomes eligible for arc eviction.
794  */
795 static __attribute__((noreturn)) void
796 dbuf_evict_thread(void *unused)
797 {
798 	(void) unused;
799 	callb_cpr_t cpr;
800 
801 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
802 
803 	mutex_enter(&dbuf_evict_lock);
804 	while (!dbuf_evict_thread_exit) {
805 		while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
806 			CALLB_CPR_SAFE_BEGIN(&cpr);
807 			(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
808 			    &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
809 			CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
810 		}
811 		mutex_exit(&dbuf_evict_lock);
812 
813 		/*
814 		 * Keep evicting as long as we're above the low water mark
815 		 * for the cache. We do this without holding the locks to
816 		 * minimize lock contention.
817 		 */
818 		while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
819 			dbuf_evict_one();
820 		}
821 
822 		mutex_enter(&dbuf_evict_lock);
823 	}
824 
825 	dbuf_evict_thread_exit = B_FALSE;
826 	cv_broadcast(&dbuf_evict_cv);
827 	CALLB_CPR_EXIT(&cpr);	/* drops dbuf_evict_lock */
828 	thread_exit();
829 }
830 
831 /*
832  * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
833  * If the dbuf cache is at its high water mark, then evict a dbuf from the
834  * dbuf cache using the caller's context.
835  */
836 static void
837 dbuf_evict_notify(uint64_t size)
838 {
839 	/*
840 	 * We check if we should evict without holding the dbuf_evict_lock,
841 	 * because it's OK to occasionally make the wrong decision here,
842 	 * and grabbing the lock results in massive lock contention.
843 	 */
844 	if (size > dbuf_cache_target_bytes()) {
845 		if (size > dbuf_cache_hiwater_bytes())
846 			dbuf_evict_one();
847 		cv_signal(&dbuf_evict_cv);
848 	}
849 }
850 
851 static int
852 dbuf_kstat_update(kstat_t *ksp, int rw)
853 {
854 	dbuf_stats_t *ds = ksp->ks_data;
855 	dbuf_hash_table_t *h = &dbuf_hash_table;
856 
857 	if (rw == KSTAT_WRITE)
858 		return (SET_ERROR(EACCES));
859 
860 	ds->cache_count.value.ui64 =
861 	    wmsum_value(&dbuf_sums.cache_count);
862 	ds->cache_size_bytes.value.ui64 =
863 	    zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
864 	ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
865 	ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
866 	ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
867 	ds->cache_total_evicts.value.ui64 =
868 	    wmsum_value(&dbuf_sums.cache_total_evicts);
869 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
870 		ds->cache_levels[i].value.ui64 =
871 		    wmsum_value(&dbuf_sums.cache_levels[i]);
872 		ds->cache_levels_bytes[i].value.ui64 =
873 		    wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
874 	}
875 	ds->hash_hits.value.ui64 =
876 	    wmsum_value(&dbuf_sums.hash_hits);
877 	ds->hash_misses.value.ui64 =
878 	    wmsum_value(&dbuf_sums.hash_misses);
879 	ds->hash_collisions.value.ui64 =
880 	    wmsum_value(&dbuf_sums.hash_collisions);
881 	ds->hash_chains.value.ui64 =
882 	    wmsum_value(&dbuf_sums.hash_chains);
883 	ds->hash_insert_race.value.ui64 =
884 	    wmsum_value(&dbuf_sums.hash_insert_race);
885 	ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
886 	ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
887 	ds->metadata_cache_count.value.ui64 =
888 	    wmsum_value(&dbuf_sums.metadata_cache_count);
889 	ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
890 	    &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
891 	ds->metadata_cache_overflow.value.ui64 =
892 	    wmsum_value(&dbuf_sums.metadata_cache_overflow);
893 	return (0);
894 }
895 
896 void
897 dbuf_init(void)
898 {
899 	uint64_t hmsize, hsize = 1ULL << 16;
900 	dbuf_hash_table_t *h = &dbuf_hash_table;
901 
902 	/*
903 	 * The hash table is big enough to fill one eighth of physical memory
904 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
905 	 * By default, the table will take up
906 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
907 	 */
908 	while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
909 		hsize <<= 1;
910 
911 	h->hash_table = NULL;
912 	while (h->hash_table == NULL) {
913 		h->hash_table_mask = hsize - 1;
914 
915 		h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
916 		if (h->hash_table == NULL)
917 			hsize >>= 1;
918 
919 		ASSERT3U(hsize, >=, 1ULL << 10);
920 	}
921 
922 	/*
923 	 * The hash table buckets are protected by an array of mutexes where
924 	 * each mutex is reponsible for protecting 128 buckets.  A minimum
925 	 * array size of 8192 is targeted to avoid contention.
926 	 */
927 	if (dbuf_mutex_cache_shift == 0)
928 		hmsize = MAX(hsize >> 7, 1ULL << 13);
929 	else
930 		hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
931 
932 	h->hash_mutexes = NULL;
933 	while (h->hash_mutexes == NULL) {
934 		h->hash_mutex_mask = hmsize - 1;
935 
936 		h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
937 		    KM_SLEEP);
938 		if (h->hash_mutexes == NULL)
939 			hmsize >>= 1;
940 	}
941 
942 	dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
943 	    sizeof (dmu_buf_impl_t),
944 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
945 
946 	for (int i = 0; i < hmsize; i++)
947 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
948 
949 	dbuf_stats_init(h);
950 
951 	/*
952 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
953 	 * configuration is not required.
954 	 */
955 	dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
956 
957 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
958 		multilist_create(&dbuf_caches[dcs].cache,
959 		    sizeof (dmu_buf_impl_t),
960 		    offsetof(dmu_buf_impl_t, db_cache_link),
961 		    dbuf_cache_multilist_index_func);
962 		zfs_refcount_create(&dbuf_caches[dcs].size);
963 	}
964 
965 	dbuf_evict_thread_exit = B_FALSE;
966 	mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
967 	cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
968 	dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
969 	    NULL, 0, &p0, TS_RUN, minclsyspri);
970 
971 	wmsum_init(&dbuf_sums.cache_count, 0);
972 	wmsum_init(&dbuf_sums.cache_total_evicts, 0);
973 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
974 		wmsum_init(&dbuf_sums.cache_levels[i], 0);
975 		wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
976 	}
977 	wmsum_init(&dbuf_sums.hash_hits, 0);
978 	wmsum_init(&dbuf_sums.hash_misses, 0);
979 	wmsum_init(&dbuf_sums.hash_collisions, 0);
980 	wmsum_init(&dbuf_sums.hash_chains, 0);
981 	wmsum_init(&dbuf_sums.hash_insert_race, 0);
982 	wmsum_init(&dbuf_sums.metadata_cache_count, 0);
983 	wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
984 
985 	dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
986 	    KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
987 	    KSTAT_FLAG_VIRTUAL);
988 	if (dbuf_ksp != NULL) {
989 		for (int i = 0; i < DN_MAX_LEVELS; i++) {
990 			snprintf(dbuf_stats.cache_levels[i].name,
991 			    KSTAT_STRLEN, "cache_level_%d", i);
992 			dbuf_stats.cache_levels[i].data_type =
993 			    KSTAT_DATA_UINT64;
994 			snprintf(dbuf_stats.cache_levels_bytes[i].name,
995 			    KSTAT_STRLEN, "cache_level_%d_bytes", i);
996 			dbuf_stats.cache_levels_bytes[i].data_type =
997 			    KSTAT_DATA_UINT64;
998 		}
999 		dbuf_ksp->ks_data = &dbuf_stats;
1000 		dbuf_ksp->ks_update = dbuf_kstat_update;
1001 		kstat_install(dbuf_ksp);
1002 	}
1003 }
1004 
1005 void
1006 dbuf_fini(void)
1007 {
1008 	dbuf_hash_table_t *h = &dbuf_hash_table;
1009 
1010 	dbuf_stats_destroy();
1011 
1012 	for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1013 		mutex_destroy(&h->hash_mutexes[i]);
1014 
1015 	vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1016 	vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1017 	    sizeof (kmutex_t));
1018 
1019 	kmem_cache_destroy(dbuf_kmem_cache);
1020 	taskq_destroy(dbu_evict_taskq);
1021 
1022 	mutex_enter(&dbuf_evict_lock);
1023 	dbuf_evict_thread_exit = B_TRUE;
1024 	while (dbuf_evict_thread_exit) {
1025 		cv_signal(&dbuf_evict_cv);
1026 		cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1027 	}
1028 	mutex_exit(&dbuf_evict_lock);
1029 
1030 	mutex_destroy(&dbuf_evict_lock);
1031 	cv_destroy(&dbuf_evict_cv);
1032 
1033 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1034 		zfs_refcount_destroy(&dbuf_caches[dcs].size);
1035 		multilist_destroy(&dbuf_caches[dcs].cache);
1036 	}
1037 
1038 	if (dbuf_ksp != NULL) {
1039 		kstat_delete(dbuf_ksp);
1040 		dbuf_ksp = NULL;
1041 	}
1042 
1043 	wmsum_fini(&dbuf_sums.cache_count);
1044 	wmsum_fini(&dbuf_sums.cache_total_evicts);
1045 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
1046 		wmsum_fini(&dbuf_sums.cache_levels[i]);
1047 		wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1048 	}
1049 	wmsum_fini(&dbuf_sums.hash_hits);
1050 	wmsum_fini(&dbuf_sums.hash_misses);
1051 	wmsum_fini(&dbuf_sums.hash_collisions);
1052 	wmsum_fini(&dbuf_sums.hash_chains);
1053 	wmsum_fini(&dbuf_sums.hash_insert_race);
1054 	wmsum_fini(&dbuf_sums.metadata_cache_count);
1055 	wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1056 }
1057 
1058 /*
1059  * Other stuff.
1060  */
1061 
1062 #ifdef ZFS_DEBUG
1063 static void
1064 dbuf_verify(dmu_buf_impl_t *db)
1065 {
1066 	dnode_t *dn;
1067 	dbuf_dirty_record_t *dr;
1068 	uint32_t txg_prev;
1069 
1070 	ASSERT(MUTEX_HELD(&db->db_mtx));
1071 
1072 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1073 		return;
1074 
1075 	ASSERT(db->db_objset != NULL);
1076 	DB_DNODE_ENTER(db);
1077 	dn = DB_DNODE(db);
1078 	if (dn == NULL) {
1079 		ASSERT(db->db_parent == NULL);
1080 		ASSERT(db->db_blkptr == NULL);
1081 	} else {
1082 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
1083 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
1084 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
1085 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1086 		    db->db_blkid == DMU_SPILL_BLKID ||
1087 		    !avl_is_empty(&dn->dn_dbufs));
1088 	}
1089 	if (db->db_blkid == DMU_BONUS_BLKID) {
1090 		ASSERT(dn != NULL);
1091 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1092 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1093 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
1094 		ASSERT(dn != NULL);
1095 		ASSERT0(db->db.db_offset);
1096 	} else {
1097 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1098 	}
1099 
1100 	if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1101 		ASSERT(dr->dr_dbuf == db);
1102 		txg_prev = dr->dr_txg;
1103 		for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1104 		    dr = list_next(&db->db_dirty_records, dr)) {
1105 			ASSERT(dr->dr_dbuf == db);
1106 			ASSERT(txg_prev > dr->dr_txg);
1107 			txg_prev = dr->dr_txg;
1108 		}
1109 	}
1110 
1111 	/*
1112 	 * We can't assert that db_size matches dn_datablksz because it
1113 	 * can be momentarily different when another thread is doing
1114 	 * dnode_set_blksz().
1115 	 */
1116 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1117 		dr = db->db_data_pending;
1118 		/*
1119 		 * It should only be modified in syncing context, so
1120 		 * make sure we only have one copy of the data.
1121 		 */
1122 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1123 	}
1124 
1125 	/* verify db->db_blkptr */
1126 	if (db->db_blkptr) {
1127 		if (db->db_parent == dn->dn_dbuf) {
1128 			/* db is pointed to by the dnode */
1129 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1130 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1131 				ASSERT(db->db_parent == NULL);
1132 			else
1133 				ASSERT(db->db_parent != NULL);
1134 			if (db->db_blkid != DMU_SPILL_BLKID)
1135 				ASSERT3P(db->db_blkptr, ==,
1136 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
1137 		} else {
1138 			/* db is pointed to by an indirect block */
1139 			int epb __maybe_unused = db->db_parent->db.db_size >>
1140 			    SPA_BLKPTRSHIFT;
1141 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1142 			ASSERT3U(db->db_parent->db.db_object, ==,
1143 			    db->db.db_object);
1144 			/*
1145 			 * dnode_grow_indblksz() can make this fail if we don't
1146 			 * have the parent's rwlock.  XXX indblksz no longer
1147 			 * grows.  safe to do this now?
1148 			 */
1149 			if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1150 				ASSERT3P(db->db_blkptr, ==,
1151 				    ((blkptr_t *)db->db_parent->db.db_data +
1152 				    db->db_blkid % epb));
1153 			}
1154 		}
1155 	}
1156 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1157 	    (db->db_buf == NULL || db->db_buf->b_data) &&
1158 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1159 	    db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1160 		/*
1161 		 * If the blkptr isn't set but they have nonzero data,
1162 		 * it had better be dirty, otherwise we'll lose that
1163 		 * data when we evict this buffer.
1164 		 *
1165 		 * There is an exception to this rule for indirect blocks; in
1166 		 * this case, if the indirect block is a hole, we fill in a few
1167 		 * fields on each of the child blocks (importantly, birth time)
1168 		 * to prevent hole birth times from being lost when you
1169 		 * partially fill in a hole.
1170 		 */
1171 		if (db->db_dirtycnt == 0) {
1172 			if (db->db_level == 0) {
1173 				uint64_t *buf = db->db.db_data;
1174 				int i;
1175 
1176 				for (i = 0; i < db->db.db_size >> 3; i++) {
1177 					ASSERT(buf[i] == 0);
1178 				}
1179 			} else {
1180 				blkptr_t *bps = db->db.db_data;
1181 				ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1182 				    db->db.db_size);
1183 				/*
1184 				 * We want to verify that all the blkptrs in the
1185 				 * indirect block are holes, but we may have
1186 				 * automatically set up a few fields for them.
1187 				 * We iterate through each blkptr and verify
1188 				 * they only have those fields set.
1189 				 */
1190 				for (int i = 0;
1191 				    i < db->db.db_size / sizeof (blkptr_t);
1192 				    i++) {
1193 					blkptr_t *bp = &bps[i];
1194 					ASSERT(ZIO_CHECKSUM_IS_ZERO(
1195 					    &bp->blk_cksum));
1196 					ASSERT(
1197 					    DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1198 					    DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1199 					    DVA_IS_EMPTY(&bp->blk_dva[2]));
1200 					ASSERT0(bp->blk_fill);
1201 					ASSERT0(bp->blk_pad[0]);
1202 					ASSERT0(bp->blk_pad[1]);
1203 					ASSERT(!BP_IS_EMBEDDED(bp));
1204 					ASSERT(BP_IS_HOLE(bp));
1205 					ASSERT0(bp->blk_phys_birth);
1206 				}
1207 			}
1208 		}
1209 	}
1210 	DB_DNODE_EXIT(db);
1211 }
1212 #endif
1213 
1214 static void
1215 dbuf_clear_data(dmu_buf_impl_t *db)
1216 {
1217 	ASSERT(MUTEX_HELD(&db->db_mtx));
1218 	dbuf_evict_user(db);
1219 	ASSERT3P(db->db_buf, ==, NULL);
1220 	db->db.db_data = NULL;
1221 	if (db->db_state != DB_NOFILL) {
1222 		db->db_state = DB_UNCACHED;
1223 		DTRACE_SET_STATE(db, "clear data");
1224 	}
1225 }
1226 
1227 static void
1228 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1229 {
1230 	ASSERT(MUTEX_HELD(&db->db_mtx));
1231 	ASSERT(buf != NULL);
1232 
1233 	db->db_buf = buf;
1234 	ASSERT(buf->b_data != NULL);
1235 	db->db.db_data = buf->b_data;
1236 }
1237 
1238 static arc_buf_t *
1239 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1240 {
1241 	spa_t *spa = db->db_objset->os_spa;
1242 
1243 	return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1244 }
1245 
1246 /*
1247  * Loan out an arc_buf for read.  Return the loaned arc_buf.
1248  */
1249 arc_buf_t *
1250 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1251 {
1252 	arc_buf_t *abuf;
1253 
1254 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1255 	mutex_enter(&db->db_mtx);
1256 	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1257 		int blksz = db->db.db_size;
1258 		spa_t *spa = db->db_objset->os_spa;
1259 
1260 		mutex_exit(&db->db_mtx);
1261 		abuf = arc_loan_buf(spa, B_FALSE, blksz);
1262 		memcpy(abuf->b_data, db->db.db_data, blksz);
1263 	} else {
1264 		abuf = db->db_buf;
1265 		arc_loan_inuse_buf(abuf, db);
1266 		db->db_buf = NULL;
1267 		dbuf_clear_data(db);
1268 		mutex_exit(&db->db_mtx);
1269 	}
1270 	return (abuf);
1271 }
1272 
1273 /*
1274  * Calculate which level n block references the data at the level 0 offset
1275  * provided.
1276  */
1277 uint64_t
1278 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1279 {
1280 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1281 		/*
1282 		 * The level n blkid is equal to the level 0 blkid divided by
1283 		 * the number of level 0s in a level n block.
1284 		 *
1285 		 * The level 0 blkid is offset >> datablkshift =
1286 		 * offset / 2^datablkshift.
1287 		 *
1288 		 * The number of level 0s in a level n is the number of block
1289 		 * pointers in an indirect block, raised to the power of level.
1290 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1291 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1292 		 *
1293 		 * Thus, the level n blkid is: offset /
1294 		 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1295 		 * = offset / 2^(datablkshift + level *
1296 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1297 		 * = offset >> (datablkshift + level *
1298 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1299 		 */
1300 
1301 		const unsigned exp = dn->dn_datablkshift +
1302 		    level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1303 
1304 		if (exp >= 8 * sizeof (offset)) {
1305 			/* This only happens on the highest indirection level */
1306 			ASSERT3U(level, ==, dn->dn_nlevels - 1);
1307 			return (0);
1308 		}
1309 
1310 		ASSERT3U(exp, <, 8 * sizeof (offset));
1311 
1312 		return (offset >> exp);
1313 	} else {
1314 		ASSERT3U(offset, <, dn->dn_datablksz);
1315 		return (0);
1316 	}
1317 }
1318 
1319 /*
1320  * This function is used to lock the parent of the provided dbuf. This should be
1321  * used when modifying or reading db_blkptr.
1322  */
1323 db_lock_type_t
1324 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1325 {
1326 	enum db_lock_type ret = DLT_NONE;
1327 	if (db->db_parent != NULL) {
1328 		rw_enter(&db->db_parent->db_rwlock, rw);
1329 		ret = DLT_PARENT;
1330 	} else if (dmu_objset_ds(db->db_objset) != NULL) {
1331 		rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1332 		    tag);
1333 		ret = DLT_OBJSET;
1334 	}
1335 	/*
1336 	 * We only return a DLT_NONE lock when it's the top-most indirect block
1337 	 * of the meta-dnode of the MOS.
1338 	 */
1339 	return (ret);
1340 }
1341 
1342 /*
1343  * We need to pass the lock type in because it's possible that the block will
1344  * move from being the topmost indirect block in a dnode (and thus, have no
1345  * parent) to not the top-most via an indirection increase. This would cause a
1346  * panic if we didn't pass the lock type in.
1347  */
1348 void
1349 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1350 {
1351 	if (type == DLT_PARENT)
1352 		rw_exit(&db->db_parent->db_rwlock);
1353 	else if (type == DLT_OBJSET)
1354 		rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1355 }
1356 
1357 static void
1358 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1359     arc_buf_t *buf, void *vdb)
1360 {
1361 	(void) zb, (void) bp;
1362 	dmu_buf_impl_t *db = vdb;
1363 
1364 	mutex_enter(&db->db_mtx);
1365 	ASSERT3U(db->db_state, ==, DB_READ);
1366 	/*
1367 	 * All reads are synchronous, so we must have a hold on the dbuf
1368 	 */
1369 	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1370 	ASSERT(db->db_buf == NULL);
1371 	ASSERT(db->db.db_data == NULL);
1372 	if (buf == NULL) {
1373 		/* i/o error */
1374 		ASSERT(zio == NULL || zio->io_error != 0);
1375 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1376 		ASSERT3P(db->db_buf, ==, NULL);
1377 		db->db_state = DB_UNCACHED;
1378 		DTRACE_SET_STATE(db, "i/o error");
1379 	} else if (db->db_level == 0 && db->db_freed_in_flight) {
1380 		/* freed in flight */
1381 		ASSERT(zio == NULL || zio->io_error == 0);
1382 		arc_release(buf, db);
1383 		memset(buf->b_data, 0, db->db.db_size);
1384 		arc_buf_freeze(buf);
1385 		db->db_freed_in_flight = FALSE;
1386 		dbuf_set_data(db, buf);
1387 		db->db_state = DB_CACHED;
1388 		DTRACE_SET_STATE(db, "freed in flight");
1389 	} else {
1390 		/* success */
1391 		ASSERT(zio == NULL || zio->io_error == 0);
1392 		dbuf_set_data(db, buf);
1393 		db->db_state = DB_CACHED;
1394 		DTRACE_SET_STATE(db, "successful read");
1395 	}
1396 	cv_broadcast(&db->db_changed);
1397 	dbuf_rele_and_unlock(db, NULL, B_FALSE);
1398 }
1399 
1400 /*
1401  * Shortcut for performing reads on bonus dbufs.  Returns
1402  * an error if we fail to verify the dnode associated with
1403  * a decrypted block. Otherwise success.
1404  */
1405 static int
1406 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1407 {
1408 	int bonuslen, max_bonuslen, err;
1409 
1410 	err = dbuf_read_verify_dnode_crypt(db, flags);
1411 	if (err)
1412 		return (err);
1413 
1414 	bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1415 	max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1416 	ASSERT(MUTEX_HELD(&db->db_mtx));
1417 	ASSERT(DB_DNODE_HELD(db));
1418 	ASSERT3U(bonuslen, <=, db->db.db_size);
1419 	db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1420 	arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1421 	if (bonuslen < max_bonuslen)
1422 		memset(db->db.db_data, 0, max_bonuslen);
1423 	if (bonuslen)
1424 		memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1425 	db->db_state = DB_CACHED;
1426 	DTRACE_SET_STATE(db, "bonus buffer filled");
1427 	return (0);
1428 }
1429 
1430 static void
1431 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1432 {
1433 	blkptr_t *bps = db->db.db_data;
1434 	uint32_t indbs = 1ULL << dn->dn_indblkshift;
1435 	int n_bps = indbs >> SPA_BLKPTRSHIFT;
1436 
1437 	for (int i = 0; i < n_bps; i++) {
1438 		blkptr_t *bp = &bps[i];
1439 
1440 		ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1441 		BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1442 		    dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1443 		BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1444 		BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1445 		BP_SET_BIRTH(bp, dbbp->blk_birth, 0);
1446 	}
1447 }
1448 
1449 /*
1450  * Handle reads on dbufs that are holes, if necessary.  This function
1451  * requires that the dbuf's mutex is held. Returns success (0) if action
1452  * was taken, ENOENT if no action was taken.
1453  */
1454 static int
1455 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1456 {
1457 	ASSERT(MUTEX_HELD(&db->db_mtx));
1458 
1459 	int is_hole = bp == NULL || BP_IS_HOLE(bp);
1460 	/*
1461 	 * For level 0 blocks only, if the above check fails:
1462 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1463 	 * processes the delete record and clears the bp while we are waiting
1464 	 * for the dn_mtx (resulting in a "no" from block_freed).
1465 	 */
1466 	if (!is_hole && db->db_level == 0)
1467 		is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1468 
1469 	if (is_hole) {
1470 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1471 		memset(db->db.db_data, 0, db->db.db_size);
1472 
1473 		if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1474 		    bp->blk_birth != 0) {
1475 			dbuf_handle_indirect_hole(db, dn, bp);
1476 		}
1477 		db->db_state = DB_CACHED;
1478 		DTRACE_SET_STATE(db, "hole read satisfied");
1479 		return (0);
1480 	}
1481 	return (ENOENT);
1482 }
1483 
1484 /*
1485  * This function ensures that, when doing a decrypting read of a block,
1486  * we make sure we have decrypted the dnode associated with it. We must do
1487  * this so that we ensure we are fully authenticating the checksum-of-MACs
1488  * tree from the root of the objset down to this block. Indirect blocks are
1489  * always verified against their secure checksum-of-MACs assuming that the
1490  * dnode containing them is correct. Now that we are doing a decrypting read,
1491  * we can be sure that the key is loaded and verify that assumption. This is
1492  * especially important considering that we always read encrypted dnode
1493  * blocks as raw data (without verifying their MACs) to start, and
1494  * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1495  */
1496 static int
1497 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1498 {
1499 	int err = 0;
1500 	objset_t *os = db->db_objset;
1501 	arc_buf_t *dnode_abuf;
1502 	dnode_t *dn;
1503 	zbookmark_phys_t zb;
1504 
1505 	ASSERT(MUTEX_HELD(&db->db_mtx));
1506 
1507 	if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1508 	    !os->os_encrypted || os->os_raw_receive)
1509 		return (0);
1510 
1511 	DB_DNODE_ENTER(db);
1512 	dn = DB_DNODE(db);
1513 	dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1514 
1515 	if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1516 		DB_DNODE_EXIT(db);
1517 		return (0);
1518 	}
1519 
1520 	SET_BOOKMARK(&zb, dmu_objset_id(os),
1521 	    DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1522 	err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1523 
1524 	/*
1525 	 * An error code of EACCES tells us that the key is still not
1526 	 * available. This is ok if we are only reading authenticated
1527 	 * (and therefore non-encrypted) blocks.
1528 	 */
1529 	if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1530 	    !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1531 	    (db->db_blkid == DMU_BONUS_BLKID &&
1532 	    !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1533 		err = 0;
1534 
1535 	DB_DNODE_EXIT(db);
1536 
1537 	return (err);
1538 }
1539 
1540 /*
1541  * Drops db_mtx and the parent lock specified by dblt and tag before
1542  * returning.
1543  */
1544 static int
1545 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1546     db_lock_type_t dblt, const void *tag)
1547 {
1548 	dnode_t *dn;
1549 	zbookmark_phys_t zb;
1550 	uint32_t aflags = ARC_FLAG_NOWAIT;
1551 	int err, zio_flags;
1552 	blkptr_t bp, *bpp;
1553 
1554 	DB_DNODE_ENTER(db);
1555 	dn = DB_DNODE(db);
1556 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1557 	ASSERT(MUTEX_HELD(&db->db_mtx));
1558 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1559 	ASSERT(db->db_buf == NULL);
1560 	ASSERT(db->db_parent == NULL ||
1561 	    RW_LOCK_HELD(&db->db_parent->db_rwlock));
1562 
1563 	if (db->db_blkid == DMU_BONUS_BLKID) {
1564 		err = dbuf_read_bonus(db, dn, flags);
1565 		goto early_unlock;
1566 	}
1567 
1568 	if (db->db_state == DB_UNCACHED) {
1569 		if (db->db_blkptr == NULL) {
1570 			bpp = NULL;
1571 		} else {
1572 			bp = *db->db_blkptr;
1573 			bpp = &bp;
1574 		}
1575 	} else {
1576 		dbuf_dirty_record_t *dr;
1577 
1578 		ASSERT3S(db->db_state, ==, DB_NOFILL);
1579 
1580 		/*
1581 		 * Block cloning: If we have a pending block clone,
1582 		 * we don't want to read the underlying block, but the content
1583 		 * of the block being cloned, so we have the most recent data.
1584 		 */
1585 		dr = list_head(&db->db_dirty_records);
1586 		if (dr == NULL || !dr->dt.dl.dr_brtwrite) {
1587 			err = EIO;
1588 			goto early_unlock;
1589 		}
1590 		bp = dr->dt.dl.dr_overridden_by;
1591 		bpp = &bp;
1592 	}
1593 
1594 	err = dbuf_read_hole(db, dn, bpp);
1595 	if (err == 0)
1596 		goto early_unlock;
1597 
1598 	ASSERT(bpp != NULL);
1599 
1600 	/*
1601 	 * Any attempt to read a redacted block should result in an error. This
1602 	 * will never happen under normal conditions, but can be useful for
1603 	 * debugging purposes.
1604 	 */
1605 	if (BP_IS_REDACTED(bpp)) {
1606 		ASSERT(dsl_dataset_feature_is_active(
1607 		    db->db_objset->os_dsl_dataset,
1608 		    SPA_FEATURE_REDACTED_DATASETS));
1609 		err = SET_ERROR(EIO);
1610 		goto early_unlock;
1611 	}
1612 
1613 	SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1614 	    db->db.db_object, db->db_level, db->db_blkid);
1615 
1616 	/*
1617 	 * All bps of an encrypted os should have the encryption bit set.
1618 	 * If this is not true it indicates tampering and we report an error.
1619 	 */
1620 	if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
1621 		spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth);
1622 		zfs_panic_recover("unencrypted block in encrypted "
1623 		    "object set %llu", dmu_objset_id(db->db_objset));
1624 		err = SET_ERROR(EIO);
1625 		goto early_unlock;
1626 	}
1627 
1628 	err = dbuf_read_verify_dnode_crypt(db, flags);
1629 	if (err != 0)
1630 		goto early_unlock;
1631 
1632 	DB_DNODE_EXIT(db);
1633 
1634 	db->db_state = DB_READ;
1635 	DTRACE_SET_STATE(db, "read issued");
1636 	mutex_exit(&db->db_mtx);
1637 
1638 	if (!DBUF_IS_CACHEABLE(db))
1639 		aflags |= ARC_FLAG_UNCACHED;
1640 	else if (dbuf_is_l2cacheable(db))
1641 		aflags |= ARC_FLAG_L2CACHE;
1642 
1643 	dbuf_add_ref(db, NULL);
1644 
1645 	zio_flags = (flags & DB_RF_CANFAIL) ?
1646 	    ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1647 
1648 	if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1649 		zio_flags |= ZIO_FLAG_RAW;
1650 	/*
1651 	 * The zio layer will copy the provided blkptr later, but we have our
1652 	 * own copy so that we can release the parent's rwlock. We have to
1653 	 * do that so that if dbuf_read_done is called synchronously (on
1654 	 * an l1 cache hit) we don't acquire the db_mtx while holding the
1655 	 * parent's rwlock, which would be a lock ordering violation.
1656 	 */
1657 	dmu_buf_unlock_parent(db, dblt, tag);
1658 	(void) arc_read(zio, db->db_objset->os_spa, bpp,
1659 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1660 	    &aflags, &zb);
1661 	return (err);
1662 early_unlock:
1663 	DB_DNODE_EXIT(db);
1664 	mutex_exit(&db->db_mtx);
1665 	dmu_buf_unlock_parent(db, dblt, tag);
1666 	return (err);
1667 }
1668 
1669 /*
1670  * This is our just-in-time copy function.  It makes a copy of buffers that
1671  * have been modified in a previous transaction group before we access them in
1672  * the current active group.
1673  *
1674  * This function is used in three places: when we are dirtying a buffer for the
1675  * first time in a txg, when we are freeing a range in a dnode that includes
1676  * this buffer, and when we are accessing a buffer which was received compressed
1677  * and later referenced in a WRITE_BYREF record.
1678  *
1679  * Note that when we are called from dbuf_free_range() we do not put a hold on
1680  * the buffer, we just traverse the active dbuf list for the dnode.
1681  */
1682 static void
1683 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1684 {
1685 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1686 
1687 	ASSERT(MUTEX_HELD(&db->db_mtx));
1688 	ASSERT(db->db.db_data != NULL);
1689 	ASSERT(db->db_level == 0);
1690 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1691 
1692 	if (dr == NULL ||
1693 	    (dr->dt.dl.dr_data !=
1694 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1695 		return;
1696 
1697 	/*
1698 	 * If the last dirty record for this dbuf has not yet synced
1699 	 * and its referencing the dbuf data, either:
1700 	 *	reset the reference to point to a new copy,
1701 	 * or (if there a no active holders)
1702 	 *	just null out the current db_data pointer.
1703 	 */
1704 	ASSERT3U(dr->dr_txg, >=, txg - 2);
1705 	if (db->db_blkid == DMU_BONUS_BLKID) {
1706 		dnode_t *dn = DB_DNODE(db);
1707 		int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1708 		dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1709 		arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1710 		memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1711 	} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1712 		dnode_t *dn = DB_DNODE(db);
1713 		int size = arc_buf_size(db->db_buf);
1714 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1715 		spa_t *spa = db->db_objset->os_spa;
1716 		enum zio_compress compress_type =
1717 		    arc_get_compression(db->db_buf);
1718 		uint8_t complevel = arc_get_complevel(db->db_buf);
1719 
1720 		if (arc_is_encrypted(db->db_buf)) {
1721 			boolean_t byteorder;
1722 			uint8_t salt[ZIO_DATA_SALT_LEN];
1723 			uint8_t iv[ZIO_DATA_IV_LEN];
1724 			uint8_t mac[ZIO_DATA_MAC_LEN];
1725 
1726 			arc_get_raw_params(db->db_buf, &byteorder, salt,
1727 			    iv, mac);
1728 			dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1729 			    dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1730 			    mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1731 			    compress_type, complevel);
1732 		} else if (compress_type != ZIO_COMPRESS_OFF) {
1733 			ASSERT3U(type, ==, ARC_BUFC_DATA);
1734 			dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1735 			    size, arc_buf_lsize(db->db_buf), compress_type,
1736 			    complevel);
1737 		} else {
1738 			dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1739 		}
1740 		memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1741 	} else {
1742 		db->db_buf = NULL;
1743 		dbuf_clear_data(db);
1744 	}
1745 }
1746 
1747 int
1748 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1749 {
1750 	int err = 0;
1751 	boolean_t prefetch;
1752 	dnode_t *dn;
1753 
1754 	/*
1755 	 * We don't have to hold the mutex to check db_state because it
1756 	 * can't be freed while we have a hold on the buffer.
1757 	 */
1758 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1759 
1760 	DB_DNODE_ENTER(db);
1761 	dn = DB_DNODE(db);
1762 
1763 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1764 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
1765 
1766 	mutex_enter(&db->db_mtx);
1767 	if (flags & DB_RF_PARTIAL_FIRST)
1768 		db->db_partial_read = B_TRUE;
1769 	else if (!(flags & DB_RF_PARTIAL_MORE))
1770 		db->db_partial_read = B_FALSE;
1771 	if (db->db_state == DB_CACHED) {
1772 		/*
1773 		 * Ensure that this block's dnode has been decrypted if
1774 		 * the caller has requested decrypted data.
1775 		 */
1776 		err = dbuf_read_verify_dnode_crypt(db, flags);
1777 
1778 		/*
1779 		 * If the arc buf is compressed or encrypted and the caller
1780 		 * requested uncompressed data, we need to untransform it
1781 		 * before returning. We also call arc_untransform() on any
1782 		 * unauthenticated blocks, which will verify their MAC if
1783 		 * the key is now available.
1784 		 */
1785 		if (err == 0 && db->db_buf != NULL &&
1786 		    (flags & DB_RF_NO_DECRYPT) == 0 &&
1787 		    (arc_is_encrypted(db->db_buf) ||
1788 		    arc_is_unauthenticated(db->db_buf) ||
1789 		    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1790 			spa_t *spa = dn->dn_objset->os_spa;
1791 			zbookmark_phys_t zb;
1792 
1793 			SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1794 			    db->db.db_object, db->db_level, db->db_blkid);
1795 			dbuf_fix_old_data(db, spa_syncing_txg(spa));
1796 			err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1797 			dbuf_set_data(db, db->db_buf);
1798 		}
1799 		mutex_exit(&db->db_mtx);
1800 		if (err == 0 && prefetch) {
1801 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1802 			    B_FALSE, flags & DB_RF_HAVESTRUCT);
1803 		}
1804 		DB_DNODE_EXIT(db);
1805 		DBUF_STAT_BUMP(hash_hits);
1806 	} else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
1807 		boolean_t need_wait = B_FALSE;
1808 
1809 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1810 
1811 		if (zio == NULL && (db->db_state == DB_NOFILL ||
1812 		    (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
1813 			spa_t *spa = dn->dn_objset->os_spa;
1814 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1815 			need_wait = B_TRUE;
1816 		}
1817 		err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1818 		/*
1819 		 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1820 		 * for us
1821 		 */
1822 		if (!err && prefetch) {
1823 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1824 			    db->db_state != DB_CACHED,
1825 			    flags & DB_RF_HAVESTRUCT);
1826 		}
1827 
1828 		DB_DNODE_EXIT(db);
1829 		DBUF_STAT_BUMP(hash_misses);
1830 
1831 		/*
1832 		 * If we created a zio_root we must execute it to avoid
1833 		 * leaking it, even if it isn't attached to any work due
1834 		 * to an error in dbuf_read_impl().
1835 		 */
1836 		if (need_wait) {
1837 			if (err == 0)
1838 				err = zio_wait(zio);
1839 			else
1840 				VERIFY0(zio_wait(zio));
1841 		}
1842 	} else {
1843 		/*
1844 		 * Another reader came in while the dbuf was in flight
1845 		 * between UNCACHED and CACHED.  Either a writer will finish
1846 		 * writing the buffer (sending the dbuf to CACHED) or the
1847 		 * first reader's request will reach the read_done callback
1848 		 * and send the dbuf to CACHED.  Otherwise, a failure
1849 		 * occurred and the dbuf went to UNCACHED.
1850 		 */
1851 		mutex_exit(&db->db_mtx);
1852 		if (prefetch) {
1853 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1854 			    B_TRUE, flags & DB_RF_HAVESTRUCT);
1855 		}
1856 		DB_DNODE_EXIT(db);
1857 		DBUF_STAT_BUMP(hash_misses);
1858 
1859 		/* Skip the wait per the caller's request. */
1860 		if ((flags & DB_RF_NEVERWAIT) == 0) {
1861 			mutex_enter(&db->db_mtx);
1862 			while (db->db_state == DB_READ ||
1863 			    db->db_state == DB_FILL) {
1864 				ASSERT(db->db_state == DB_READ ||
1865 				    (flags & DB_RF_HAVESTRUCT) == 0);
1866 				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1867 				    db, zio_t *, zio);
1868 				cv_wait(&db->db_changed, &db->db_mtx);
1869 			}
1870 			if (db->db_state == DB_UNCACHED)
1871 				err = SET_ERROR(EIO);
1872 			mutex_exit(&db->db_mtx);
1873 		}
1874 	}
1875 
1876 	return (err);
1877 }
1878 
1879 static void
1880 dbuf_noread(dmu_buf_impl_t *db)
1881 {
1882 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1883 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1884 	mutex_enter(&db->db_mtx);
1885 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1886 		cv_wait(&db->db_changed, &db->db_mtx);
1887 	if (db->db_state == DB_UNCACHED) {
1888 		ASSERT(db->db_buf == NULL);
1889 		ASSERT(db->db.db_data == NULL);
1890 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1891 		db->db_state = DB_FILL;
1892 		DTRACE_SET_STATE(db, "assigning filled buffer");
1893 	} else if (db->db_state == DB_NOFILL) {
1894 		dbuf_clear_data(db);
1895 	} else {
1896 		ASSERT3U(db->db_state, ==, DB_CACHED);
1897 	}
1898 	mutex_exit(&db->db_mtx);
1899 }
1900 
1901 void
1902 dbuf_unoverride(dbuf_dirty_record_t *dr)
1903 {
1904 	dmu_buf_impl_t *db = dr->dr_dbuf;
1905 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1906 	uint64_t txg = dr->dr_txg;
1907 	boolean_t release;
1908 
1909 	ASSERT(MUTEX_HELD(&db->db_mtx));
1910 	/*
1911 	 * This assert is valid because dmu_sync() expects to be called by
1912 	 * a zilog's get_data while holding a range lock.  This call only
1913 	 * comes from dbuf_dirty() callers who must also hold a range lock.
1914 	 */
1915 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1916 	ASSERT(db->db_level == 0);
1917 
1918 	if (db->db_blkid == DMU_BONUS_BLKID ||
1919 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1920 		return;
1921 
1922 	ASSERT(db->db_data_pending != dr);
1923 
1924 	/* free this block */
1925 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1926 		zio_free(db->db_objset->os_spa, txg, bp);
1927 
1928 	release = !dr->dt.dl.dr_brtwrite;
1929 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1930 	dr->dt.dl.dr_nopwrite = B_FALSE;
1931 	dr->dt.dl.dr_brtwrite = B_FALSE;
1932 	dr->dt.dl.dr_has_raw_params = B_FALSE;
1933 
1934 	/*
1935 	 * Release the already-written buffer, so we leave it in
1936 	 * a consistent dirty state.  Note that all callers are
1937 	 * modifying the buffer, so they will immediately do
1938 	 * another (redundant) arc_release().  Therefore, leave
1939 	 * the buf thawed to save the effort of freezing &
1940 	 * immediately re-thawing it.
1941 	 */
1942 	if (release)
1943 		arc_release(dr->dt.dl.dr_data, db);
1944 }
1945 
1946 /*
1947  * Evict (if its unreferenced) or clear (if its referenced) any level-0
1948  * data blocks in the free range, so that any future readers will find
1949  * empty blocks.
1950  */
1951 void
1952 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1953     dmu_tx_t *tx)
1954 {
1955 	dmu_buf_impl_t *db_search;
1956 	dmu_buf_impl_t *db, *db_next;
1957 	uint64_t txg = tx->tx_txg;
1958 	avl_index_t where;
1959 	dbuf_dirty_record_t *dr;
1960 
1961 	if (end_blkid > dn->dn_maxblkid &&
1962 	    !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1963 		end_blkid = dn->dn_maxblkid;
1964 	dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1965 	    (u_longlong_t)end_blkid);
1966 
1967 	db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1968 	db_search->db_level = 0;
1969 	db_search->db_blkid = start_blkid;
1970 	db_search->db_state = DB_SEARCH;
1971 
1972 	mutex_enter(&dn->dn_dbufs_mtx);
1973 	db = avl_find(&dn->dn_dbufs, db_search, &where);
1974 	ASSERT3P(db, ==, NULL);
1975 
1976 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1977 
1978 	for (; db != NULL; db = db_next) {
1979 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
1980 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1981 
1982 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
1983 			break;
1984 		}
1985 		ASSERT3U(db->db_blkid, >=, start_blkid);
1986 
1987 		/* found a level 0 buffer in the range */
1988 		mutex_enter(&db->db_mtx);
1989 		if (dbuf_undirty(db, tx)) {
1990 			/* mutex has been dropped and dbuf destroyed */
1991 			continue;
1992 		}
1993 
1994 		if (db->db_state == DB_UNCACHED ||
1995 		    db->db_state == DB_NOFILL ||
1996 		    db->db_state == DB_EVICTING) {
1997 			ASSERT(db->db.db_data == NULL);
1998 			mutex_exit(&db->db_mtx);
1999 			continue;
2000 		}
2001 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2002 			/* will be handled in dbuf_read_done or dbuf_rele */
2003 			db->db_freed_in_flight = TRUE;
2004 			mutex_exit(&db->db_mtx);
2005 			continue;
2006 		}
2007 		if (zfs_refcount_count(&db->db_holds) == 0) {
2008 			ASSERT(db->db_buf);
2009 			dbuf_destroy(db);
2010 			continue;
2011 		}
2012 		/* The dbuf is referenced */
2013 
2014 		dr = list_head(&db->db_dirty_records);
2015 		if (dr != NULL) {
2016 			if (dr->dr_txg == txg) {
2017 				/*
2018 				 * This buffer is "in-use", re-adjust the file
2019 				 * size to reflect that this buffer may
2020 				 * contain new data when we sync.
2021 				 */
2022 				if (db->db_blkid != DMU_SPILL_BLKID &&
2023 				    db->db_blkid > dn->dn_maxblkid)
2024 					dn->dn_maxblkid = db->db_blkid;
2025 				dbuf_unoverride(dr);
2026 			} else {
2027 				/*
2028 				 * This dbuf is not dirty in the open context.
2029 				 * Either uncache it (if its not referenced in
2030 				 * the open context) or reset its contents to
2031 				 * empty.
2032 				 */
2033 				dbuf_fix_old_data(db, txg);
2034 			}
2035 		}
2036 		/* clear the contents if its cached */
2037 		if (db->db_state == DB_CACHED) {
2038 			ASSERT(db->db.db_data != NULL);
2039 			arc_release(db->db_buf, db);
2040 			rw_enter(&db->db_rwlock, RW_WRITER);
2041 			memset(db->db.db_data, 0, db->db.db_size);
2042 			rw_exit(&db->db_rwlock);
2043 			arc_buf_freeze(db->db_buf);
2044 		}
2045 
2046 		mutex_exit(&db->db_mtx);
2047 	}
2048 
2049 	mutex_exit(&dn->dn_dbufs_mtx);
2050 	kmem_free(db_search, sizeof (dmu_buf_impl_t));
2051 }
2052 
2053 void
2054 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2055 {
2056 	arc_buf_t *buf, *old_buf;
2057 	dbuf_dirty_record_t *dr;
2058 	int osize = db->db.db_size;
2059 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2060 	dnode_t *dn;
2061 
2062 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2063 
2064 	DB_DNODE_ENTER(db);
2065 	dn = DB_DNODE(db);
2066 
2067 	/*
2068 	 * XXX we should be doing a dbuf_read, checking the return
2069 	 * value and returning that up to our callers
2070 	 */
2071 	dmu_buf_will_dirty(&db->db, tx);
2072 
2073 	/* create the data buffer for the new block */
2074 	buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2075 
2076 	/* copy old block data to the new block */
2077 	old_buf = db->db_buf;
2078 	memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2079 	/* zero the remainder */
2080 	if (size > osize)
2081 		memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2082 
2083 	mutex_enter(&db->db_mtx);
2084 	dbuf_set_data(db, buf);
2085 	arc_buf_destroy(old_buf, db);
2086 	db->db.db_size = size;
2087 
2088 	dr = list_head(&db->db_dirty_records);
2089 	/* dirty record added by dmu_buf_will_dirty() */
2090 	VERIFY(dr != NULL);
2091 	if (db->db_level == 0)
2092 		dr->dt.dl.dr_data = buf;
2093 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2094 	ASSERT3U(dr->dr_accounted, ==, osize);
2095 	dr->dr_accounted = size;
2096 	mutex_exit(&db->db_mtx);
2097 
2098 	dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2099 	DB_DNODE_EXIT(db);
2100 }
2101 
2102 void
2103 dbuf_release_bp(dmu_buf_impl_t *db)
2104 {
2105 	objset_t *os __maybe_unused = db->db_objset;
2106 
2107 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2108 	ASSERT(arc_released(os->os_phys_buf) ||
2109 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
2110 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2111 
2112 	(void) arc_release(db->db_buf, db);
2113 }
2114 
2115 /*
2116  * We already have a dirty record for this TXG, and we are being
2117  * dirtied again.
2118  */
2119 static void
2120 dbuf_redirty(dbuf_dirty_record_t *dr)
2121 {
2122 	dmu_buf_impl_t *db = dr->dr_dbuf;
2123 
2124 	ASSERT(MUTEX_HELD(&db->db_mtx));
2125 
2126 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2127 		/*
2128 		 * If this buffer has already been written out,
2129 		 * we now need to reset its state.
2130 		 */
2131 		dbuf_unoverride(dr);
2132 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2133 		    db->db_state != DB_NOFILL) {
2134 			/* Already released on initial dirty, so just thaw. */
2135 			ASSERT(arc_released(db->db_buf));
2136 			arc_buf_thaw(db->db_buf);
2137 		}
2138 	}
2139 }
2140 
2141 dbuf_dirty_record_t *
2142 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2143 {
2144 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2145 	IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2146 	dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2147 	ASSERT(dn->dn_maxblkid >= blkid);
2148 
2149 	dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2150 	list_link_init(&dr->dr_dirty_node);
2151 	list_link_init(&dr->dr_dbuf_node);
2152 	dr->dr_dnode = dn;
2153 	dr->dr_txg = tx->tx_txg;
2154 	dr->dt.dll.dr_blkid = blkid;
2155 	dr->dr_accounted = dn->dn_datablksz;
2156 
2157 	/*
2158 	 * There should not be any dbuf for the block that we're dirtying.
2159 	 * Otherwise the buffer contents could be inconsistent between the
2160 	 * dbuf and the lightweight dirty record.
2161 	 */
2162 	ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2163 	    NULL));
2164 
2165 	mutex_enter(&dn->dn_mtx);
2166 	int txgoff = tx->tx_txg & TXG_MASK;
2167 	if (dn->dn_free_ranges[txgoff] != NULL) {
2168 		range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2169 	}
2170 
2171 	if (dn->dn_nlevels == 1) {
2172 		ASSERT3U(blkid, <, dn->dn_nblkptr);
2173 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2174 		mutex_exit(&dn->dn_mtx);
2175 		rw_exit(&dn->dn_struct_rwlock);
2176 		dnode_setdirty(dn, tx);
2177 	} else {
2178 		mutex_exit(&dn->dn_mtx);
2179 
2180 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2181 		dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2182 		    1, blkid >> epbs, FTAG);
2183 		rw_exit(&dn->dn_struct_rwlock);
2184 		if (parent_db == NULL) {
2185 			kmem_free(dr, sizeof (*dr));
2186 			return (NULL);
2187 		}
2188 		int err = dbuf_read(parent_db, NULL,
2189 		    (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2190 		if (err != 0) {
2191 			dbuf_rele(parent_db, FTAG);
2192 			kmem_free(dr, sizeof (*dr));
2193 			return (NULL);
2194 		}
2195 
2196 		dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2197 		dbuf_rele(parent_db, FTAG);
2198 		mutex_enter(&parent_dr->dt.di.dr_mtx);
2199 		ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2200 		list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2201 		mutex_exit(&parent_dr->dt.di.dr_mtx);
2202 		dr->dr_parent = parent_dr;
2203 	}
2204 
2205 	dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2206 
2207 	return (dr);
2208 }
2209 
2210 dbuf_dirty_record_t *
2211 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2212 {
2213 	dnode_t *dn;
2214 	objset_t *os;
2215 	dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2216 	int txgoff = tx->tx_txg & TXG_MASK;
2217 	boolean_t drop_struct_rwlock = B_FALSE;
2218 
2219 	ASSERT(tx->tx_txg != 0);
2220 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2221 	DMU_TX_DIRTY_BUF(tx, db);
2222 
2223 	DB_DNODE_ENTER(db);
2224 	dn = DB_DNODE(db);
2225 	/*
2226 	 * Shouldn't dirty a regular buffer in syncing context.  Private
2227 	 * objects may be dirtied in syncing context, but only if they
2228 	 * were already pre-dirtied in open context.
2229 	 */
2230 #ifdef ZFS_DEBUG
2231 	if (dn->dn_objset->os_dsl_dataset != NULL) {
2232 		rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2233 		    RW_READER, FTAG);
2234 	}
2235 	ASSERT(!dmu_tx_is_syncing(tx) ||
2236 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2237 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2238 	    dn->dn_objset->os_dsl_dataset == NULL);
2239 	if (dn->dn_objset->os_dsl_dataset != NULL)
2240 		rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2241 #endif
2242 	/*
2243 	 * We make this assert for private objects as well, but after we
2244 	 * check if we're already dirty.  They are allowed to re-dirty
2245 	 * in syncing context.
2246 	 */
2247 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2248 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2249 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2250 
2251 	mutex_enter(&db->db_mtx);
2252 	/*
2253 	 * XXX make this true for indirects too?  The problem is that
2254 	 * transactions created with dmu_tx_create_assigned() from
2255 	 * syncing context don't bother holding ahead.
2256 	 */
2257 	ASSERT(db->db_level != 0 ||
2258 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2259 	    db->db_state == DB_NOFILL);
2260 
2261 	mutex_enter(&dn->dn_mtx);
2262 	dnode_set_dirtyctx(dn, tx, db);
2263 	if (tx->tx_txg > dn->dn_dirty_txg)
2264 		dn->dn_dirty_txg = tx->tx_txg;
2265 	mutex_exit(&dn->dn_mtx);
2266 
2267 	if (db->db_blkid == DMU_SPILL_BLKID)
2268 		dn->dn_have_spill = B_TRUE;
2269 
2270 	/*
2271 	 * If this buffer is already dirty, we're done.
2272 	 */
2273 	dr_head = list_head(&db->db_dirty_records);
2274 	ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2275 	    db->db.db_object == DMU_META_DNODE_OBJECT);
2276 	dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2277 	if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2278 		DB_DNODE_EXIT(db);
2279 
2280 		dbuf_redirty(dr_next);
2281 		mutex_exit(&db->db_mtx);
2282 		return (dr_next);
2283 	}
2284 
2285 	/*
2286 	 * Only valid if not already dirty.
2287 	 */
2288 	ASSERT(dn->dn_object == 0 ||
2289 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2290 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2291 
2292 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
2293 
2294 	/*
2295 	 * We should only be dirtying in syncing context if it's the
2296 	 * mos or we're initializing the os or it's a special object.
2297 	 * However, we are allowed to dirty in syncing context provided
2298 	 * we already dirtied it in open context.  Hence we must make
2299 	 * this assertion only if we're not already dirty.
2300 	 */
2301 	os = dn->dn_objset;
2302 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2303 #ifdef ZFS_DEBUG
2304 	if (dn->dn_objset->os_dsl_dataset != NULL)
2305 		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2306 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2307 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2308 	if (dn->dn_objset->os_dsl_dataset != NULL)
2309 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2310 #endif
2311 	ASSERT(db->db.db_size != 0);
2312 
2313 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2314 
2315 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2316 		dmu_objset_willuse_space(os, db->db.db_size, tx);
2317 	}
2318 
2319 	/*
2320 	 * If this buffer is dirty in an old transaction group we need
2321 	 * to make a copy of it so that the changes we make in this
2322 	 * transaction group won't leak out when we sync the older txg.
2323 	 */
2324 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2325 	list_link_init(&dr->dr_dirty_node);
2326 	list_link_init(&dr->dr_dbuf_node);
2327 	dr->dr_dnode = dn;
2328 	if (db->db_level == 0) {
2329 		void *data_old = db->db_buf;
2330 
2331 		if (db->db_state != DB_NOFILL) {
2332 			if (db->db_blkid == DMU_BONUS_BLKID) {
2333 				dbuf_fix_old_data(db, tx->tx_txg);
2334 				data_old = db->db.db_data;
2335 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2336 				/*
2337 				 * Release the data buffer from the cache so
2338 				 * that we can modify it without impacting
2339 				 * possible other users of this cached data
2340 				 * block.  Note that indirect blocks and
2341 				 * private objects are not released until the
2342 				 * syncing state (since they are only modified
2343 				 * then).
2344 				 */
2345 				arc_release(db->db_buf, db);
2346 				dbuf_fix_old_data(db, tx->tx_txg);
2347 				data_old = db->db_buf;
2348 			}
2349 			ASSERT(data_old != NULL);
2350 		}
2351 		dr->dt.dl.dr_data = data_old;
2352 	} else {
2353 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2354 		list_create(&dr->dt.di.dr_children,
2355 		    sizeof (dbuf_dirty_record_t),
2356 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
2357 	}
2358 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2359 		dr->dr_accounted = db->db.db_size;
2360 	}
2361 	dr->dr_dbuf = db;
2362 	dr->dr_txg = tx->tx_txg;
2363 	list_insert_before(&db->db_dirty_records, dr_next, dr);
2364 
2365 	/*
2366 	 * We could have been freed_in_flight between the dbuf_noread
2367 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
2368 	 * happened after the free.
2369 	 */
2370 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2371 	    db->db_blkid != DMU_SPILL_BLKID) {
2372 		mutex_enter(&dn->dn_mtx);
2373 		if (dn->dn_free_ranges[txgoff] != NULL) {
2374 			range_tree_clear(dn->dn_free_ranges[txgoff],
2375 			    db->db_blkid, 1);
2376 		}
2377 		mutex_exit(&dn->dn_mtx);
2378 		db->db_freed_in_flight = FALSE;
2379 	}
2380 
2381 	/*
2382 	 * This buffer is now part of this txg
2383 	 */
2384 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2385 	db->db_dirtycnt += 1;
2386 	ASSERT3U(db->db_dirtycnt, <=, 3);
2387 
2388 	mutex_exit(&db->db_mtx);
2389 
2390 	if (db->db_blkid == DMU_BONUS_BLKID ||
2391 	    db->db_blkid == DMU_SPILL_BLKID) {
2392 		mutex_enter(&dn->dn_mtx);
2393 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2394 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2395 		mutex_exit(&dn->dn_mtx);
2396 		dnode_setdirty(dn, tx);
2397 		DB_DNODE_EXIT(db);
2398 		return (dr);
2399 	}
2400 
2401 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2402 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2403 		drop_struct_rwlock = B_TRUE;
2404 	}
2405 
2406 	/*
2407 	 * If we are overwriting a dedup BP, then unless it is snapshotted,
2408 	 * when we get to syncing context we will need to decrement its
2409 	 * refcount in the DDT.  Prefetch the relevant DDT block so that
2410 	 * syncing context won't have to wait for the i/o.
2411 	 */
2412 	if (db->db_blkptr != NULL) {
2413 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2414 		ddt_prefetch(os->os_spa, db->db_blkptr);
2415 		dmu_buf_unlock_parent(db, dblt, FTAG);
2416 	}
2417 
2418 	/*
2419 	 * We need to hold the dn_struct_rwlock to make this assertion,
2420 	 * because it protects dn_phys / dn_next_nlevels from changing.
2421 	 */
2422 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2423 	    dn->dn_phys->dn_nlevels > db->db_level ||
2424 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
2425 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2426 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2427 
2428 
2429 	if (db->db_level == 0) {
2430 		ASSERT(!db->db_objset->os_raw_receive ||
2431 		    dn->dn_maxblkid >= db->db_blkid);
2432 		dnode_new_blkid(dn, db->db_blkid, tx,
2433 		    drop_struct_rwlock, B_FALSE);
2434 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
2435 	}
2436 
2437 	if (db->db_level+1 < dn->dn_nlevels) {
2438 		dmu_buf_impl_t *parent = db->db_parent;
2439 		dbuf_dirty_record_t *di;
2440 		int parent_held = FALSE;
2441 
2442 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2443 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2444 			parent = dbuf_hold_level(dn, db->db_level + 1,
2445 			    db->db_blkid >> epbs, FTAG);
2446 			ASSERT(parent != NULL);
2447 			parent_held = TRUE;
2448 		}
2449 		if (drop_struct_rwlock)
2450 			rw_exit(&dn->dn_struct_rwlock);
2451 		ASSERT3U(db->db_level + 1, ==, parent->db_level);
2452 		di = dbuf_dirty(parent, tx);
2453 		if (parent_held)
2454 			dbuf_rele(parent, FTAG);
2455 
2456 		mutex_enter(&db->db_mtx);
2457 		/*
2458 		 * Since we've dropped the mutex, it's possible that
2459 		 * dbuf_undirty() might have changed this out from under us.
2460 		 */
2461 		if (list_head(&db->db_dirty_records) == dr ||
2462 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
2463 			mutex_enter(&di->dt.di.dr_mtx);
2464 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2465 			ASSERT(!list_link_active(&dr->dr_dirty_node));
2466 			list_insert_tail(&di->dt.di.dr_children, dr);
2467 			mutex_exit(&di->dt.di.dr_mtx);
2468 			dr->dr_parent = di;
2469 		}
2470 		mutex_exit(&db->db_mtx);
2471 	} else {
2472 		ASSERT(db->db_level + 1 == dn->dn_nlevels);
2473 		ASSERT(db->db_blkid < dn->dn_nblkptr);
2474 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2475 		mutex_enter(&dn->dn_mtx);
2476 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2477 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2478 		mutex_exit(&dn->dn_mtx);
2479 		if (drop_struct_rwlock)
2480 			rw_exit(&dn->dn_struct_rwlock);
2481 	}
2482 
2483 	dnode_setdirty(dn, tx);
2484 	DB_DNODE_EXIT(db);
2485 	return (dr);
2486 }
2487 
2488 static void
2489 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2490 {
2491 	dmu_buf_impl_t *db = dr->dr_dbuf;
2492 
2493 	if (dr->dt.dl.dr_data != db->db.db_data) {
2494 		struct dnode *dn = dr->dr_dnode;
2495 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2496 
2497 		kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2498 		arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2499 	}
2500 	db->db_data_pending = NULL;
2501 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2502 	list_remove(&db->db_dirty_records, dr);
2503 	if (dr->dr_dbuf->db_level != 0) {
2504 		mutex_destroy(&dr->dt.di.dr_mtx);
2505 		list_destroy(&dr->dt.di.dr_children);
2506 	}
2507 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2508 	ASSERT3U(db->db_dirtycnt, >, 0);
2509 	db->db_dirtycnt -= 1;
2510 }
2511 
2512 /*
2513  * Undirty a buffer in the transaction group referenced by the given
2514  * transaction.  Return whether this evicted the dbuf.
2515  */
2516 boolean_t
2517 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2518 {
2519 	uint64_t txg = tx->tx_txg;
2520 	boolean_t brtwrite;
2521 
2522 	ASSERT(txg != 0);
2523 
2524 	/*
2525 	 * Due to our use of dn_nlevels below, this can only be called
2526 	 * in open context, unless we are operating on the MOS.
2527 	 * From syncing context, dn_nlevels may be different from the
2528 	 * dn_nlevels used when dbuf was dirtied.
2529 	 */
2530 	ASSERT(db->db_objset ==
2531 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2532 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2533 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2534 	ASSERT0(db->db_level);
2535 	ASSERT(MUTEX_HELD(&db->db_mtx));
2536 
2537 	/*
2538 	 * If this buffer is not dirty, we're done.
2539 	 */
2540 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2541 	if (dr == NULL)
2542 		return (B_FALSE);
2543 	ASSERT(dr->dr_dbuf == db);
2544 
2545 	brtwrite = dr->dt.dl.dr_brtwrite;
2546 	if (brtwrite) {
2547 		/*
2548 		 * We are freeing a block that we cloned in the same
2549 		 * transaction group.
2550 		 */
2551 		brt_pending_remove(dmu_objset_spa(db->db_objset),
2552 		    &dr->dt.dl.dr_overridden_by, tx);
2553 	}
2554 
2555 	dnode_t *dn = dr->dr_dnode;
2556 
2557 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2558 
2559 	ASSERT(db->db.db_size != 0);
2560 
2561 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2562 	    dr->dr_accounted, txg);
2563 
2564 	list_remove(&db->db_dirty_records, dr);
2565 
2566 	/*
2567 	 * Note that there are three places in dbuf_dirty()
2568 	 * where this dirty record may be put on a list.
2569 	 * Make sure to do a list_remove corresponding to
2570 	 * every one of those list_insert calls.
2571 	 */
2572 	if (dr->dr_parent) {
2573 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2574 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2575 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2576 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
2577 	    db->db_level + 1 == dn->dn_nlevels) {
2578 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2579 		mutex_enter(&dn->dn_mtx);
2580 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2581 		mutex_exit(&dn->dn_mtx);
2582 	}
2583 
2584 	if (db->db_state != DB_NOFILL && !brtwrite) {
2585 		dbuf_unoverride(dr);
2586 
2587 		ASSERT(db->db_buf != NULL);
2588 		ASSERT(dr->dt.dl.dr_data != NULL);
2589 		if (dr->dt.dl.dr_data != db->db_buf)
2590 			arc_buf_destroy(dr->dt.dl.dr_data, db);
2591 	}
2592 
2593 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2594 
2595 	ASSERT(db->db_dirtycnt > 0);
2596 	db->db_dirtycnt -= 1;
2597 
2598 	if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2599 		ASSERT(db->db_state == DB_NOFILL || brtwrite ||
2600 		    arc_released(db->db_buf));
2601 		dbuf_destroy(db);
2602 		return (B_TRUE);
2603 	}
2604 
2605 	return (B_FALSE);
2606 }
2607 
2608 static void
2609 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2610 {
2611 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2612 	boolean_t undirty = B_FALSE;
2613 
2614 	ASSERT(tx->tx_txg != 0);
2615 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2616 
2617 	/*
2618 	 * Quick check for dirtiness.  For already dirty blocks, this
2619 	 * reduces runtime of this function by >90%, and overall performance
2620 	 * by 50% for some workloads (e.g. file deletion with indirect blocks
2621 	 * cached).
2622 	 */
2623 	mutex_enter(&db->db_mtx);
2624 
2625 	if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
2626 		dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2627 		/*
2628 		 * It's possible that it is already dirty but not cached,
2629 		 * because there are some calls to dbuf_dirty() that don't
2630 		 * go through dmu_buf_will_dirty().
2631 		 */
2632 		if (dr != NULL) {
2633 			if (dr->dt.dl.dr_brtwrite) {
2634 				/*
2635 				 * Block cloning: If we are dirtying a cloned
2636 				 * block, we cannot simply redirty it, because
2637 				 * this dr has no data associated with it.
2638 				 * We will go through a full undirtying below,
2639 				 * before dirtying it again.
2640 				 */
2641 				undirty = B_TRUE;
2642 			} else {
2643 				/* This dbuf is already dirty and cached. */
2644 				dbuf_redirty(dr);
2645 				mutex_exit(&db->db_mtx);
2646 				return;
2647 			}
2648 		}
2649 	}
2650 	mutex_exit(&db->db_mtx);
2651 
2652 	DB_DNODE_ENTER(db);
2653 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2654 		flags |= DB_RF_HAVESTRUCT;
2655 	DB_DNODE_EXIT(db);
2656 
2657 	/*
2658 	 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
2659 	 * want to make sure dbuf_read() will read the pending cloned block and
2660 	 * not the uderlying block that is being replaced. dbuf_undirty() will
2661 	 * do dbuf_unoverride(), so we will end up with cloned block content,
2662 	 * without overridden BP.
2663 	 */
2664 	(void) dbuf_read(db, NULL, flags);
2665 	if (undirty) {
2666 		mutex_enter(&db->db_mtx);
2667 		VERIFY(!dbuf_undirty(db, tx));
2668 		mutex_exit(&db->db_mtx);
2669 	}
2670 	(void) dbuf_dirty(db, tx);
2671 }
2672 
2673 void
2674 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2675 {
2676 	dmu_buf_will_dirty_impl(db_fake,
2677 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2678 }
2679 
2680 boolean_t
2681 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2682 {
2683 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2684 	dbuf_dirty_record_t *dr;
2685 
2686 	mutex_enter(&db->db_mtx);
2687 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2688 	mutex_exit(&db->db_mtx);
2689 	return (dr != NULL);
2690 }
2691 
2692 void
2693 dmu_buf_will_clone(dmu_buf_t *db_fake, dmu_tx_t *tx)
2694 {
2695 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2696 
2697 	/*
2698 	 * Block cloning: We are going to clone into this block, so undirty
2699 	 * modifications done to this block so far in this txg. This includes
2700 	 * writes and clones into this block.
2701 	 */
2702 	mutex_enter(&db->db_mtx);
2703 	VERIFY(!dbuf_undirty(db, tx));
2704 	ASSERT(list_head(&db->db_dirty_records) == NULL);
2705 	if (db->db_buf != NULL) {
2706 		arc_buf_destroy(db->db_buf, db);
2707 		db->db_buf = NULL;
2708 	}
2709 	mutex_exit(&db->db_mtx);
2710 
2711 	dmu_buf_will_not_fill(db_fake, tx);
2712 }
2713 
2714 void
2715 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2716 {
2717 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2718 
2719 	mutex_enter(&db->db_mtx);
2720 	db->db_state = DB_NOFILL;
2721 	DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2722 	mutex_exit(&db->db_mtx);
2723 
2724 	dbuf_noread(db);
2725 	(void) dbuf_dirty(db, tx);
2726 }
2727 
2728 void
2729 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2730 {
2731 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2732 
2733 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2734 	ASSERT(tx->tx_txg != 0);
2735 	ASSERT(db->db_level == 0);
2736 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2737 
2738 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2739 	    dmu_tx_private_ok(tx));
2740 
2741 	mutex_enter(&db->db_mtx);
2742 	if (db->db_state == DB_NOFILL) {
2743 		/*
2744 		 * Block cloning: We will be completely overwriting a block
2745 		 * cloned in this transaction group, so let's undirty the
2746 		 * pending clone and mark the block as uncached. This will be
2747 		 * as if the clone was never done.
2748 		 */
2749 		VERIFY(!dbuf_undirty(db, tx));
2750 		db->db_state = DB_UNCACHED;
2751 	}
2752 	mutex_exit(&db->db_mtx);
2753 
2754 	dbuf_noread(db);
2755 	(void) dbuf_dirty(db, tx);
2756 }
2757 
2758 /*
2759  * This function is effectively the same as dmu_buf_will_dirty(), but
2760  * indicates the caller expects raw encrypted data in the db, and provides
2761  * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2762  * blkptr_t when this dbuf is written.  This is only used for blocks of
2763  * dnodes, during raw receive.
2764  */
2765 void
2766 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2767     const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2768 {
2769 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2770 	dbuf_dirty_record_t *dr;
2771 
2772 	/*
2773 	 * dr_has_raw_params is only processed for blocks of dnodes
2774 	 * (see dbuf_sync_dnode_leaf_crypt()).
2775 	 */
2776 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2777 	ASSERT3U(db->db_level, ==, 0);
2778 	ASSERT(db->db_objset->os_raw_receive);
2779 
2780 	dmu_buf_will_dirty_impl(db_fake,
2781 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2782 
2783 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2784 
2785 	ASSERT3P(dr, !=, NULL);
2786 
2787 	dr->dt.dl.dr_has_raw_params = B_TRUE;
2788 	dr->dt.dl.dr_byteorder = byteorder;
2789 	memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2790 	memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2791 	memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2792 }
2793 
2794 static void
2795 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2796 {
2797 	struct dirty_leaf *dl;
2798 	dbuf_dirty_record_t *dr;
2799 
2800 	dr = list_head(&db->db_dirty_records);
2801 	ASSERT3P(dr, !=, NULL);
2802 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2803 	dl = &dr->dt.dl;
2804 	dl->dr_overridden_by = *bp;
2805 	dl->dr_override_state = DR_OVERRIDDEN;
2806 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2807 }
2808 
2809 void
2810 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2811 {
2812 	(void) tx;
2813 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2814 	dbuf_states_t old_state;
2815 	mutex_enter(&db->db_mtx);
2816 	DBUF_VERIFY(db);
2817 
2818 	old_state = db->db_state;
2819 	db->db_state = DB_CACHED;
2820 	if (old_state == DB_FILL) {
2821 		if (db->db_level == 0 && db->db_freed_in_flight) {
2822 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2823 			/* we were freed while filling */
2824 			/* XXX dbuf_undirty? */
2825 			memset(db->db.db_data, 0, db->db.db_size);
2826 			db->db_freed_in_flight = FALSE;
2827 			DTRACE_SET_STATE(db,
2828 			    "fill done handling freed in flight");
2829 		} else {
2830 			DTRACE_SET_STATE(db, "fill done");
2831 		}
2832 		cv_broadcast(&db->db_changed);
2833 	}
2834 	mutex_exit(&db->db_mtx);
2835 }
2836 
2837 void
2838 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2839     bp_embedded_type_t etype, enum zio_compress comp,
2840     int uncompressed_size, int compressed_size, int byteorder,
2841     dmu_tx_t *tx)
2842 {
2843 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2844 	struct dirty_leaf *dl;
2845 	dmu_object_type_t type;
2846 	dbuf_dirty_record_t *dr;
2847 
2848 	if (etype == BP_EMBEDDED_TYPE_DATA) {
2849 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2850 		    SPA_FEATURE_EMBEDDED_DATA));
2851 	}
2852 
2853 	DB_DNODE_ENTER(db);
2854 	type = DB_DNODE(db)->dn_type;
2855 	DB_DNODE_EXIT(db);
2856 
2857 	ASSERT0(db->db_level);
2858 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2859 
2860 	dmu_buf_will_not_fill(dbuf, tx);
2861 
2862 	dr = list_head(&db->db_dirty_records);
2863 	ASSERT3P(dr, !=, NULL);
2864 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2865 	dl = &dr->dt.dl;
2866 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
2867 	    data, comp, uncompressed_size, compressed_size);
2868 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2869 	BP_SET_TYPE(&dl->dr_overridden_by, type);
2870 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2871 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2872 
2873 	dl->dr_override_state = DR_OVERRIDDEN;
2874 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2875 }
2876 
2877 void
2878 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2879 {
2880 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2881 	dmu_object_type_t type;
2882 	ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2883 	    SPA_FEATURE_REDACTED_DATASETS));
2884 
2885 	DB_DNODE_ENTER(db);
2886 	type = DB_DNODE(db)->dn_type;
2887 	DB_DNODE_EXIT(db);
2888 
2889 	ASSERT0(db->db_level);
2890 	dmu_buf_will_not_fill(dbuf, tx);
2891 
2892 	blkptr_t bp = { { { {0} } } };
2893 	BP_SET_TYPE(&bp, type);
2894 	BP_SET_LEVEL(&bp, 0);
2895 	BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2896 	BP_SET_REDACTED(&bp);
2897 	BPE_SET_LSIZE(&bp, dbuf->db_size);
2898 
2899 	dbuf_override_impl(db, &bp, tx);
2900 }
2901 
2902 /*
2903  * Directly assign a provided arc buf to a given dbuf if it's not referenced
2904  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2905  */
2906 void
2907 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2908 {
2909 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2910 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2911 	ASSERT(db->db_level == 0);
2912 	ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2913 	ASSERT(buf != NULL);
2914 	ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2915 	ASSERT(tx->tx_txg != 0);
2916 
2917 	arc_return_buf(buf, db);
2918 	ASSERT(arc_released(buf));
2919 
2920 	mutex_enter(&db->db_mtx);
2921 
2922 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
2923 		cv_wait(&db->db_changed, &db->db_mtx);
2924 
2925 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2926 
2927 	if (db->db_state == DB_CACHED &&
2928 	    zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2929 		/*
2930 		 * In practice, we will never have a case where we have an
2931 		 * encrypted arc buffer while additional holds exist on the
2932 		 * dbuf. We don't handle this here so we simply assert that
2933 		 * fact instead.
2934 		 */
2935 		ASSERT(!arc_is_encrypted(buf));
2936 		mutex_exit(&db->db_mtx);
2937 		(void) dbuf_dirty(db, tx);
2938 		memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2939 		arc_buf_destroy(buf, db);
2940 		return;
2941 	}
2942 
2943 	if (db->db_state == DB_CACHED) {
2944 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2945 
2946 		ASSERT(db->db_buf != NULL);
2947 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2948 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
2949 
2950 			if (!arc_released(db->db_buf)) {
2951 				ASSERT(dr->dt.dl.dr_override_state ==
2952 				    DR_OVERRIDDEN);
2953 				arc_release(db->db_buf, db);
2954 			}
2955 			dr->dt.dl.dr_data = buf;
2956 			arc_buf_destroy(db->db_buf, db);
2957 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2958 			arc_release(db->db_buf, db);
2959 			arc_buf_destroy(db->db_buf, db);
2960 		}
2961 		db->db_buf = NULL;
2962 	}
2963 	ASSERT(db->db_buf == NULL);
2964 	dbuf_set_data(db, buf);
2965 	db->db_state = DB_FILL;
2966 	DTRACE_SET_STATE(db, "filling assigned arcbuf");
2967 	mutex_exit(&db->db_mtx);
2968 	(void) dbuf_dirty(db, tx);
2969 	dmu_buf_fill_done(&db->db, tx);
2970 }
2971 
2972 void
2973 dbuf_destroy(dmu_buf_impl_t *db)
2974 {
2975 	dnode_t *dn;
2976 	dmu_buf_impl_t *parent = db->db_parent;
2977 	dmu_buf_impl_t *dndb;
2978 
2979 	ASSERT(MUTEX_HELD(&db->db_mtx));
2980 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
2981 
2982 	if (db->db_buf != NULL) {
2983 		arc_buf_destroy(db->db_buf, db);
2984 		db->db_buf = NULL;
2985 	}
2986 
2987 	if (db->db_blkid == DMU_BONUS_BLKID) {
2988 		int slots = DB_DNODE(db)->dn_num_slots;
2989 		int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2990 		if (db->db.db_data != NULL) {
2991 			kmem_free(db->db.db_data, bonuslen);
2992 			arc_space_return(bonuslen, ARC_SPACE_BONUS);
2993 			db->db_state = DB_UNCACHED;
2994 			DTRACE_SET_STATE(db, "buffer cleared");
2995 		}
2996 	}
2997 
2998 	dbuf_clear_data(db);
2999 
3000 	if (multilist_link_active(&db->db_cache_link)) {
3001 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3002 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3003 
3004 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3005 		(void) zfs_refcount_remove_many(
3006 		    &dbuf_caches[db->db_caching_status].size,
3007 		    db->db.db_size, db);
3008 
3009 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3010 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3011 		} else {
3012 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3013 			DBUF_STAT_BUMPDOWN(cache_count);
3014 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3015 			    db->db.db_size);
3016 		}
3017 		db->db_caching_status = DB_NO_CACHE;
3018 	}
3019 
3020 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
3021 	ASSERT(db->db_data_pending == NULL);
3022 	ASSERT(list_is_empty(&db->db_dirty_records));
3023 
3024 	db->db_state = DB_EVICTING;
3025 	DTRACE_SET_STATE(db, "buffer eviction started");
3026 	db->db_blkptr = NULL;
3027 
3028 	/*
3029 	 * Now that db_state is DB_EVICTING, nobody else can find this via
3030 	 * the hash table.  We can now drop db_mtx, which allows us to
3031 	 * acquire the dn_dbufs_mtx.
3032 	 */
3033 	mutex_exit(&db->db_mtx);
3034 
3035 	DB_DNODE_ENTER(db);
3036 	dn = DB_DNODE(db);
3037 	dndb = dn->dn_dbuf;
3038 	if (db->db_blkid != DMU_BONUS_BLKID) {
3039 		boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
3040 		if (needlock)
3041 			mutex_enter_nested(&dn->dn_dbufs_mtx,
3042 			    NESTED_SINGLE);
3043 		avl_remove(&dn->dn_dbufs, db);
3044 		membar_producer();
3045 		DB_DNODE_EXIT(db);
3046 		if (needlock)
3047 			mutex_exit(&dn->dn_dbufs_mtx);
3048 		/*
3049 		 * Decrementing the dbuf count means that the hold corresponding
3050 		 * to the removed dbuf is no longer discounted in dnode_move(),
3051 		 * so the dnode cannot be moved until after we release the hold.
3052 		 * The membar_producer() ensures visibility of the decremented
3053 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
3054 		 * release any lock.
3055 		 */
3056 		mutex_enter(&dn->dn_mtx);
3057 		dnode_rele_and_unlock(dn, db, B_TRUE);
3058 		db->db_dnode_handle = NULL;
3059 
3060 		dbuf_hash_remove(db);
3061 	} else {
3062 		DB_DNODE_EXIT(db);
3063 	}
3064 
3065 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3066 
3067 	db->db_parent = NULL;
3068 
3069 	ASSERT(db->db_buf == NULL);
3070 	ASSERT(db->db.db_data == NULL);
3071 	ASSERT(db->db_hash_next == NULL);
3072 	ASSERT(db->db_blkptr == NULL);
3073 	ASSERT(db->db_data_pending == NULL);
3074 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3075 	ASSERT(!multilist_link_active(&db->db_cache_link));
3076 
3077 	/*
3078 	 * If this dbuf is referenced from an indirect dbuf,
3079 	 * decrement the ref count on the indirect dbuf.
3080 	 */
3081 	if (parent && parent != dndb) {
3082 		mutex_enter(&parent->db_mtx);
3083 		dbuf_rele_and_unlock(parent, db, B_TRUE);
3084 	}
3085 
3086 	kmem_cache_free(dbuf_kmem_cache, db);
3087 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3088 }
3089 
3090 /*
3091  * Note: While bpp will always be updated if the function returns success,
3092  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3093  * this happens when the dnode is the meta-dnode, or {user|group|project}used
3094  * object.
3095  */
3096 __attribute__((always_inline))
3097 static inline int
3098 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3099     dmu_buf_impl_t **parentp, blkptr_t **bpp)
3100 {
3101 	*parentp = NULL;
3102 	*bpp = NULL;
3103 
3104 	ASSERT(blkid != DMU_BONUS_BLKID);
3105 
3106 	if (blkid == DMU_SPILL_BLKID) {
3107 		mutex_enter(&dn->dn_mtx);
3108 		if (dn->dn_have_spill &&
3109 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3110 			*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3111 		else
3112 			*bpp = NULL;
3113 		dbuf_add_ref(dn->dn_dbuf, NULL);
3114 		*parentp = dn->dn_dbuf;
3115 		mutex_exit(&dn->dn_mtx);
3116 		return (0);
3117 	}
3118 
3119 	int nlevels =
3120 	    (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3121 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3122 
3123 	ASSERT3U(level * epbs, <, 64);
3124 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3125 	/*
3126 	 * This assertion shouldn't trip as long as the max indirect block size
3127 	 * is less than 1M.  The reason for this is that up to that point,
3128 	 * the number of levels required to address an entire object with blocks
3129 	 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.	 In
3130 	 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3131 	 * (i.e. we can address the entire object), objects will all use at most
3132 	 * N-1 levels and the assertion won't overflow.	 However, once epbs is
3133 	 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
3134 	 * enough to address an entire object, so objects will have 5 levels,
3135 	 * but then this assertion will overflow.
3136 	 *
3137 	 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3138 	 * need to redo this logic to handle overflows.
3139 	 */
3140 	ASSERT(level >= nlevels ||
3141 	    ((nlevels - level - 1) * epbs) +
3142 	    highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3143 	if (level >= nlevels ||
3144 	    blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3145 	    ((nlevels - level - 1) * epbs)) ||
3146 	    (fail_sparse &&
3147 	    blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3148 		/* the buffer has no parent yet */
3149 		return (SET_ERROR(ENOENT));
3150 	} else if (level < nlevels-1) {
3151 		/* this block is referenced from an indirect block */
3152 		int err;
3153 
3154 		err = dbuf_hold_impl(dn, level + 1,
3155 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3156 
3157 		if (err)
3158 			return (err);
3159 		err = dbuf_read(*parentp, NULL,
3160 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3161 		if (err) {
3162 			dbuf_rele(*parentp, NULL);
3163 			*parentp = NULL;
3164 			return (err);
3165 		}
3166 		rw_enter(&(*parentp)->db_rwlock, RW_READER);
3167 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3168 		    (blkid & ((1ULL << epbs) - 1));
3169 		if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3170 			ASSERT(BP_IS_HOLE(*bpp));
3171 		rw_exit(&(*parentp)->db_rwlock);
3172 		return (0);
3173 	} else {
3174 		/* the block is referenced from the dnode */
3175 		ASSERT3U(level, ==, nlevels-1);
3176 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3177 		    blkid < dn->dn_phys->dn_nblkptr);
3178 		if (dn->dn_dbuf) {
3179 			dbuf_add_ref(dn->dn_dbuf, NULL);
3180 			*parentp = dn->dn_dbuf;
3181 		}
3182 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
3183 		return (0);
3184 	}
3185 }
3186 
3187 static dmu_buf_impl_t *
3188 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3189     dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3190 {
3191 	objset_t *os = dn->dn_objset;
3192 	dmu_buf_impl_t *db, *odb;
3193 
3194 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3195 	ASSERT(dn->dn_type != DMU_OT_NONE);
3196 
3197 	db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3198 
3199 	list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3200 	    offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3201 
3202 	db->db_objset = os;
3203 	db->db.db_object = dn->dn_object;
3204 	db->db_level = level;
3205 	db->db_blkid = blkid;
3206 	db->db_dirtycnt = 0;
3207 	db->db_dnode_handle = dn->dn_handle;
3208 	db->db_parent = parent;
3209 	db->db_blkptr = blkptr;
3210 	db->db_hash = hash;
3211 
3212 	db->db_user = NULL;
3213 	db->db_user_immediate_evict = FALSE;
3214 	db->db_freed_in_flight = FALSE;
3215 	db->db_pending_evict = FALSE;
3216 
3217 	if (blkid == DMU_BONUS_BLKID) {
3218 		ASSERT3P(parent, ==, dn->dn_dbuf);
3219 		db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3220 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3221 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3222 		db->db.db_offset = DMU_BONUS_BLKID;
3223 		db->db_state = DB_UNCACHED;
3224 		DTRACE_SET_STATE(db, "bonus buffer created");
3225 		db->db_caching_status = DB_NO_CACHE;
3226 		/* the bonus dbuf is not placed in the hash table */
3227 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3228 		return (db);
3229 	} else if (blkid == DMU_SPILL_BLKID) {
3230 		db->db.db_size = (blkptr != NULL) ?
3231 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3232 		db->db.db_offset = 0;
3233 	} else {
3234 		int blocksize =
3235 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3236 		db->db.db_size = blocksize;
3237 		db->db.db_offset = db->db_blkid * blocksize;
3238 	}
3239 
3240 	/*
3241 	 * Hold the dn_dbufs_mtx while we get the new dbuf
3242 	 * in the hash table *and* added to the dbufs list.
3243 	 * This prevents a possible deadlock with someone
3244 	 * trying to look up this dbuf before it's added to the
3245 	 * dn_dbufs list.
3246 	 */
3247 	mutex_enter(&dn->dn_dbufs_mtx);
3248 	db->db_state = DB_EVICTING; /* not worth logging this state change */
3249 	if ((odb = dbuf_hash_insert(db)) != NULL) {
3250 		/* someone else inserted it first */
3251 		mutex_exit(&dn->dn_dbufs_mtx);
3252 		kmem_cache_free(dbuf_kmem_cache, db);
3253 		DBUF_STAT_BUMP(hash_insert_race);
3254 		return (odb);
3255 	}
3256 	avl_add(&dn->dn_dbufs, db);
3257 
3258 	db->db_state = DB_UNCACHED;
3259 	DTRACE_SET_STATE(db, "regular buffer created");
3260 	db->db_caching_status = DB_NO_CACHE;
3261 	mutex_exit(&dn->dn_dbufs_mtx);
3262 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3263 
3264 	if (parent && parent != dn->dn_dbuf)
3265 		dbuf_add_ref(parent, db);
3266 
3267 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3268 	    zfs_refcount_count(&dn->dn_holds) > 0);
3269 	(void) zfs_refcount_add(&dn->dn_holds, db);
3270 
3271 	dprintf_dbuf(db, "db=%p\n", db);
3272 
3273 	return (db);
3274 }
3275 
3276 /*
3277  * This function returns a block pointer and information about the object,
3278  * given a dnode and a block.  This is a publicly accessible version of
3279  * dbuf_findbp that only returns some information, rather than the
3280  * dbuf.  Note that the dnode passed in must be held, and the dn_struct_rwlock
3281  * should be locked as (at least) a reader.
3282  */
3283 int
3284 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3285     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3286 {
3287 	dmu_buf_impl_t *dbp = NULL;
3288 	blkptr_t *bp2;
3289 	int err = 0;
3290 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3291 
3292 	err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3293 	if (err == 0) {
3294 		ASSERT3P(bp2, !=, NULL);
3295 		*bp = *bp2;
3296 		if (dbp != NULL)
3297 			dbuf_rele(dbp, NULL);
3298 		if (datablkszsec != NULL)
3299 			*datablkszsec = dn->dn_phys->dn_datablkszsec;
3300 		if (indblkshift != NULL)
3301 			*indblkshift = dn->dn_phys->dn_indblkshift;
3302 	}
3303 
3304 	return (err);
3305 }
3306 
3307 typedef struct dbuf_prefetch_arg {
3308 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
3309 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3310 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3311 	int dpa_curlevel; /* The current level that we're reading */
3312 	dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3313 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3314 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3315 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3316 	dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3317 	void *dpa_arg; /* prefetch completion arg */
3318 } dbuf_prefetch_arg_t;
3319 
3320 static void
3321 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3322 {
3323 	if (dpa->dpa_cb != NULL) {
3324 		dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3325 		    dpa->dpa_zb.zb_blkid, io_done);
3326 	}
3327 	kmem_free(dpa, sizeof (*dpa));
3328 }
3329 
3330 static void
3331 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3332     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3333 {
3334 	(void) zio, (void) zb, (void) iobp;
3335 	dbuf_prefetch_arg_t *dpa = private;
3336 
3337 	if (abuf != NULL)
3338 		arc_buf_destroy(abuf, private);
3339 
3340 	dbuf_prefetch_fini(dpa, B_TRUE);
3341 }
3342 
3343 /*
3344  * Actually issue the prefetch read for the block given.
3345  */
3346 static void
3347 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3348 {
3349 	ASSERT(!BP_IS_REDACTED(bp) ||
3350 	    dsl_dataset_feature_is_active(
3351 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3352 	    SPA_FEATURE_REDACTED_DATASETS));
3353 
3354 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3355 		return (dbuf_prefetch_fini(dpa, B_FALSE));
3356 
3357 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3358 	arc_flags_t aflags =
3359 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3360 	    ARC_FLAG_NO_BUF;
3361 
3362 	/* dnodes are always read as raw and then converted later */
3363 	if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3364 	    dpa->dpa_curlevel == 0)
3365 		zio_flags |= ZIO_FLAG_RAW;
3366 
3367 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3368 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3369 	ASSERT(dpa->dpa_zio != NULL);
3370 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3371 	    dbuf_issue_final_prefetch_done, dpa,
3372 	    dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3373 }
3374 
3375 /*
3376  * Called when an indirect block above our prefetch target is read in.  This
3377  * will either read in the next indirect block down the tree or issue the actual
3378  * prefetch if the next block down is our target.
3379  */
3380 static void
3381 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3382     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3383 {
3384 	(void) zb, (void) iobp;
3385 	dbuf_prefetch_arg_t *dpa = private;
3386 
3387 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3388 	ASSERT3S(dpa->dpa_curlevel, >, 0);
3389 
3390 	if (abuf == NULL) {
3391 		ASSERT(zio == NULL || zio->io_error != 0);
3392 		dbuf_prefetch_fini(dpa, B_TRUE);
3393 		return;
3394 	}
3395 	ASSERT(zio == NULL || zio->io_error == 0);
3396 
3397 	/*
3398 	 * The dpa_dnode is only valid if we are called with a NULL
3399 	 * zio. This indicates that the arc_read() returned without
3400 	 * first calling zio_read() to issue a physical read. Once
3401 	 * a physical read is made the dpa_dnode must be invalidated
3402 	 * as the locks guarding it may have been dropped. If the
3403 	 * dpa_dnode is still valid, then we want to add it to the dbuf
3404 	 * cache. To do so, we must hold the dbuf associated with the block
3405 	 * we just prefetched, read its contents so that we associate it
3406 	 * with an arc_buf_t, and then release it.
3407 	 */
3408 	if (zio != NULL) {
3409 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3410 		if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3411 			ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3412 		} else {
3413 			ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3414 		}
3415 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3416 
3417 		dpa->dpa_dnode = NULL;
3418 	} else if (dpa->dpa_dnode != NULL) {
3419 		uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3420 		    (dpa->dpa_epbs * (dpa->dpa_curlevel -
3421 		    dpa->dpa_zb.zb_level));
3422 		dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3423 		    dpa->dpa_curlevel, curblkid, FTAG);
3424 		if (db == NULL) {
3425 			arc_buf_destroy(abuf, private);
3426 			dbuf_prefetch_fini(dpa, B_TRUE);
3427 			return;
3428 		}
3429 		(void) dbuf_read(db, NULL,
3430 		    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3431 		dbuf_rele(db, FTAG);
3432 	}
3433 
3434 	dpa->dpa_curlevel--;
3435 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3436 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3437 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3438 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3439 
3440 	ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3441 	    dsl_dataset_feature_is_active(
3442 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3443 	    SPA_FEATURE_REDACTED_DATASETS)));
3444 	if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3445 		arc_buf_destroy(abuf, private);
3446 		dbuf_prefetch_fini(dpa, B_TRUE);
3447 		return;
3448 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3449 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3450 		dbuf_issue_final_prefetch(dpa, bp);
3451 	} else {
3452 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3453 		zbookmark_phys_t zb;
3454 
3455 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3456 		if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3457 			iter_aflags |= ARC_FLAG_L2CACHE;
3458 
3459 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3460 
3461 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3462 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3463 
3464 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3465 		    bp, dbuf_prefetch_indirect_done, dpa,
3466 		    ZIO_PRIORITY_SYNC_READ,
3467 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3468 		    &iter_aflags, &zb);
3469 	}
3470 
3471 	arc_buf_destroy(abuf, private);
3472 }
3473 
3474 /*
3475  * Issue prefetch reads for the given block on the given level.  If the indirect
3476  * blocks above that block are not in memory, we will read them in
3477  * asynchronously.  As a result, this call never blocks waiting for a read to
3478  * complete. Note that the prefetch might fail if the dataset is encrypted and
3479  * the encryption key is unmapped before the IO completes.
3480  */
3481 int
3482 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3483     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3484     void *arg)
3485 {
3486 	blkptr_t bp;
3487 	int epbs, nlevels, curlevel;
3488 	uint64_t curblkid;
3489 
3490 	ASSERT(blkid != DMU_BONUS_BLKID);
3491 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3492 
3493 	if (blkid > dn->dn_maxblkid)
3494 		goto no_issue;
3495 
3496 	if (level == 0 && dnode_block_freed(dn, blkid))
3497 		goto no_issue;
3498 
3499 	/*
3500 	 * This dnode hasn't been written to disk yet, so there's nothing to
3501 	 * prefetch.
3502 	 */
3503 	nlevels = dn->dn_phys->dn_nlevels;
3504 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3505 		goto no_issue;
3506 
3507 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3508 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3509 		goto no_issue;
3510 
3511 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3512 	    level, blkid, NULL);
3513 	if (db != NULL) {
3514 		mutex_exit(&db->db_mtx);
3515 		/*
3516 		 * This dbuf already exists.  It is either CACHED, or
3517 		 * (we assume) about to be read or filled.
3518 		 */
3519 		goto no_issue;
3520 	}
3521 
3522 	/*
3523 	 * Find the closest ancestor (indirect block) of the target block
3524 	 * that is present in the cache.  In this indirect block, we will
3525 	 * find the bp that is at curlevel, curblkid.
3526 	 */
3527 	curlevel = level;
3528 	curblkid = blkid;
3529 	while (curlevel < nlevels - 1) {
3530 		int parent_level = curlevel + 1;
3531 		uint64_t parent_blkid = curblkid >> epbs;
3532 		dmu_buf_impl_t *db;
3533 
3534 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3535 		    FALSE, TRUE, FTAG, &db) == 0) {
3536 			blkptr_t *bpp = db->db_buf->b_data;
3537 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3538 			dbuf_rele(db, FTAG);
3539 			break;
3540 		}
3541 
3542 		curlevel = parent_level;
3543 		curblkid = parent_blkid;
3544 	}
3545 
3546 	if (curlevel == nlevels - 1) {
3547 		/* No cached indirect blocks found. */
3548 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3549 		bp = dn->dn_phys->dn_blkptr[curblkid];
3550 	}
3551 	ASSERT(!BP_IS_REDACTED(&bp) ||
3552 	    dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3553 	    SPA_FEATURE_REDACTED_DATASETS));
3554 	if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3555 		goto no_issue;
3556 
3557 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3558 
3559 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3560 	    ZIO_FLAG_CANFAIL);
3561 
3562 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3563 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3564 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3565 	    dn->dn_object, level, blkid);
3566 	dpa->dpa_curlevel = curlevel;
3567 	dpa->dpa_prio = prio;
3568 	dpa->dpa_aflags = aflags;
3569 	dpa->dpa_spa = dn->dn_objset->os_spa;
3570 	dpa->dpa_dnode = dn;
3571 	dpa->dpa_epbs = epbs;
3572 	dpa->dpa_zio = pio;
3573 	dpa->dpa_cb = cb;
3574 	dpa->dpa_arg = arg;
3575 
3576 	if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3577 		dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3578 	else if (dnode_level_is_l2cacheable(&bp, dn, level))
3579 		dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3580 
3581 	/*
3582 	 * If we have the indirect just above us, no need to do the asynchronous
3583 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
3584 	 * a higher level, though, we want to issue the prefetches for all the
3585 	 * indirect blocks asynchronously, so we can go on with whatever we were
3586 	 * doing.
3587 	 */
3588 	if (curlevel == level) {
3589 		ASSERT3U(curblkid, ==, blkid);
3590 		dbuf_issue_final_prefetch(dpa, &bp);
3591 	} else {
3592 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3593 		zbookmark_phys_t zb;
3594 
3595 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3596 		if (dnode_level_is_l2cacheable(&bp, dn, level))
3597 			iter_aflags |= ARC_FLAG_L2CACHE;
3598 
3599 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3600 		    dn->dn_object, curlevel, curblkid);
3601 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3602 		    &bp, dbuf_prefetch_indirect_done, dpa,
3603 		    ZIO_PRIORITY_SYNC_READ,
3604 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3605 		    &iter_aflags, &zb);
3606 	}
3607 	/*
3608 	 * We use pio here instead of dpa_zio since it's possible that
3609 	 * dpa may have already been freed.
3610 	 */
3611 	zio_nowait(pio);
3612 	return (1);
3613 no_issue:
3614 	if (cb != NULL)
3615 		cb(arg, level, blkid, B_FALSE);
3616 	return (0);
3617 }
3618 
3619 int
3620 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3621     arc_flags_t aflags)
3622 {
3623 
3624 	return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3625 }
3626 
3627 /*
3628  * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3629  * the case of encrypted, compressed and uncompressed buffers by
3630  * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3631  * arc_alloc_compressed_buf() or arc_alloc_buf().*
3632  *
3633  * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3634  */
3635 noinline static void
3636 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3637 {
3638 	dbuf_dirty_record_t *dr = db->db_data_pending;
3639 	arc_buf_t *data = dr->dt.dl.dr_data;
3640 	enum zio_compress compress_type = arc_get_compression(data);
3641 	uint8_t complevel = arc_get_complevel(data);
3642 
3643 	if (arc_is_encrypted(data)) {
3644 		boolean_t byteorder;
3645 		uint8_t salt[ZIO_DATA_SALT_LEN];
3646 		uint8_t iv[ZIO_DATA_IV_LEN];
3647 		uint8_t mac[ZIO_DATA_MAC_LEN];
3648 
3649 		arc_get_raw_params(data, &byteorder, salt, iv, mac);
3650 		dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3651 		    dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3652 		    dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3653 		    compress_type, complevel));
3654 	} else if (compress_type != ZIO_COMPRESS_OFF) {
3655 		dbuf_set_data(db, arc_alloc_compressed_buf(
3656 		    dn->dn_objset->os_spa, db, arc_buf_size(data),
3657 		    arc_buf_lsize(data), compress_type, complevel));
3658 	} else {
3659 		dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3660 		    DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3661 	}
3662 
3663 	rw_enter(&db->db_rwlock, RW_WRITER);
3664 	memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3665 	rw_exit(&db->db_rwlock);
3666 }
3667 
3668 /*
3669  * Returns with db_holds incremented, and db_mtx not held.
3670  * Note: dn_struct_rwlock must be held.
3671  */
3672 int
3673 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3674     boolean_t fail_sparse, boolean_t fail_uncached,
3675     const void *tag, dmu_buf_impl_t **dbp)
3676 {
3677 	dmu_buf_impl_t *db, *parent = NULL;
3678 	uint64_t hv;
3679 
3680 	/* If the pool has been created, verify the tx_sync_lock is not held */
3681 	spa_t *spa = dn->dn_objset->os_spa;
3682 	dsl_pool_t *dp = spa->spa_dsl_pool;
3683 	if (dp != NULL) {
3684 		ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3685 	}
3686 
3687 	ASSERT(blkid != DMU_BONUS_BLKID);
3688 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3689 	ASSERT3U(dn->dn_nlevels, >, level);
3690 
3691 	*dbp = NULL;
3692 
3693 	/* dbuf_find() returns with db_mtx held */
3694 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3695 
3696 	if (db == NULL) {
3697 		blkptr_t *bp = NULL;
3698 		int err;
3699 
3700 		if (fail_uncached)
3701 			return (SET_ERROR(ENOENT));
3702 
3703 		ASSERT3P(parent, ==, NULL);
3704 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3705 		if (fail_sparse) {
3706 			if (err == 0 && bp && BP_IS_HOLE(bp))
3707 				err = SET_ERROR(ENOENT);
3708 			if (err) {
3709 				if (parent)
3710 					dbuf_rele(parent, NULL);
3711 				return (err);
3712 			}
3713 		}
3714 		if (err && err != ENOENT)
3715 			return (err);
3716 		db = dbuf_create(dn, level, blkid, parent, bp, hv);
3717 	}
3718 
3719 	if (fail_uncached && db->db_state != DB_CACHED) {
3720 		mutex_exit(&db->db_mtx);
3721 		return (SET_ERROR(ENOENT));
3722 	}
3723 
3724 	if (db->db_buf != NULL) {
3725 		arc_buf_access(db->db_buf);
3726 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3727 	}
3728 
3729 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3730 
3731 	/*
3732 	 * If this buffer is currently syncing out, and we are
3733 	 * still referencing it from db_data, we need to make a copy
3734 	 * of it in case we decide we want to dirty it again in this txg.
3735 	 */
3736 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3737 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
3738 	    db->db_state == DB_CACHED && db->db_data_pending) {
3739 		dbuf_dirty_record_t *dr = db->db_data_pending;
3740 		if (dr->dt.dl.dr_data == db->db_buf) {
3741 			ASSERT3P(db->db_buf, !=, NULL);
3742 			dbuf_hold_copy(dn, db);
3743 		}
3744 	}
3745 
3746 	if (multilist_link_active(&db->db_cache_link)) {
3747 		ASSERT(zfs_refcount_is_zero(&db->db_holds));
3748 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3749 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3750 
3751 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3752 		(void) zfs_refcount_remove_many(
3753 		    &dbuf_caches[db->db_caching_status].size,
3754 		    db->db.db_size, db);
3755 
3756 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3757 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3758 		} else {
3759 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3760 			DBUF_STAT_BUMPDOWN(cache_count);
3761 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3762 			    db->db.db_size);
3763 		}
3764 		db->db_caching_status = DB_NO_CACHE;
3765 	}
3766 	(void) zfs_refcount_add(&db->db_holds, tag);
3767 	DBUF_VERIFY(db);
3768 	mutex_exit(&db->db_mtx);
3769 
3770 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
3771 	if (parent)
3772 		dbuf_rele(parent, NULL);
3773 
3774 	ASSERT3P(DB_DNODE(db), ==, dn);
3775 	ASSERT3U(db->db_blkid, ==, blkid);
3776 	ASSERT3U(db->db_level, ==, level);
3777 	*dbp = db;
3778 
3779 	return (0);
3780 }
3781 
3782 dmu_buf_impl_t *
3783 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3784 {
3785 	return (dbuf_hold_level(dn, 0, blkid, tag));
3786 }
3787 
3788 dmu_buf_impl_t *
3789 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3790 {
3791 	dmu_buf_impl_t *db;
3792 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3793 	return (err ? NULL : db);
3794 }
3795 
3796 void
3797 dbuf_create_bonus(dnode_t *dn)
3798 {
3799 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3800 
3801 	ASSERT(dn->dn_bonus == NULL);
3802 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
3803 	    dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
3804 }
3805 
3806 int
3807 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3808 {
3809 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3810 
3811 	if (db->db_blkid != DMU_SPILL_BLKID)
3812 		return (SET_ERROR(ENOTSUP));
3813 	if (blksz == 0)
3814 		blksz = SPA_MINBLOCKSIZE;
3815 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3816 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3817 
3818 	dbuf_new_size(db, blksz, tx);
3819 
3820 	return (0);
3821 }
3822 
3823 void
3824 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3825 {
3826 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3827 }
3828 
3829 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3830 void
3831 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3832 {
3833 	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3834 	VERIFY3S(holds, >, 1);
3835 }
3836 
3837 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3838 boolean_t
3839 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3840     const void *tag)
3841 {
3842 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3843 	dmu_buf_impl_t *found_db;
3844 	boolean_t result = B_FALSE;
3845 
3846 	if (blkid == DMU_BONUS_BLKID)
3847 		found_db = dbuf_find_bonus(os, obj);
3848 	else
3849 		found_db = dbuf_find(os, obj, 0, blkid, NULL);
3850 
3851 	if (found_db != NULL) {
3852 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3853 			(void) zfs_refcount_add(&db->db_holds, tag);
3854 			result = B_TRUE;
3855 		}
3856 		mutex_exit(&found_db->db_mtx);
3857 	}
3858 	return (result);
3859 }
3860 
3861 /*
3862  * If you call dbuf_rele() you had better not be referencing the dnode handle
3863  * unless you have some other direct or indirect hold on the dnode. (An indirect
3864  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3865  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3866  * dnode's parent dbuf evicting its dnode handles.
3867  */
3868 void
3869 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3870 {
3871 	mutex_enter(&db->db_mtx);
3872 	dbuf_rele_and_unlock(db, tag, B_FALSE);
3873 }
3874 
3875 void
3876 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3877 {
3878 	dbuf_rele((dmu_buf_impl_t *)db, tag);
3879 }
3880 
3881 /*
3882  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
3883  * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
3884  * argument should be set if we are already in the dbuf-evicting code
3885  * path, in which case we don't want to recursively evict.  This allows us to
3886  * avoid deeply nested stacks that would have a call flow similar to this:
3887  *
3888  * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3889  *	^						|
3890  *	|						|
3891  *	+-----dbuf_destroy()<--dbuf_evict_one()<--------+
3892  *
3893  */
3894 void
3895 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3896 {
3897 	int64_t holds;
3898 	uint64_t size;
3899 
3900 	ASSERT(MUTEX_HELD(&db->db_mtx));
3901 	DBUF_VERIFY(db);
3902 
3903 	/*
3904 	 * Remove the reference to the dbuf before removing its hold on the
3905 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
3906 	 * buffer has a corresponding dnode hold.
3907 	 */
3908 	holds = zfs_refcount_remove(&db->db_holds, tag);
3909 	ASSERT(holds >= 0);
3910 
3911 	/*
3912 	 * We can't freeze indirects if there is a possibility that they
3913 	 * may be modified in the current syncing context.
3914 	 */
3915 	if (db->db_buf != NULL &&
3916 	    holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3917 		arc_buf_freeze(db->db_buf);
3918 	}
3919 
3920 	if (holds == db->db_dirtycnt &&
3921 	    db->db_level == 0 && db->db_user_immediate_evict)
3922 		dbuf_evict_user(db);
3923 
3924 	if (holds == 0) {
3925 		if (db->db_blkid == DMU_BONUS_BLKID) {
3926 			dnode_t *dn;
3927 			boolean_t evict_dbuf = db->db_pending_evict;
3928 
3929 			/*
3930 			 * If the dnode moves here, we cannot cross this
3931 			 * barrier until the move completes.
3932 			 */
3933 			DB_DNODE_ENTER(db);
3934 
3935 			dn = DB_DNODE(db);
3936 			atomic_dec_32(&dn->dn_dbufs_count);
3937 
3938 			/*
3939 			 * Decrementing the dbuf count means that the bonus
3940 			 * buffer's dnode hold is no longer discounted in
3941 			 * dnode_move(). The dnode cannot move until after
3942 			 * the dnode_rele() below.
3943 			 */
3944 			DB_DNODE_EXIT(db);
3945 
3946 			/*
3947 			 * Do not reference db after its lock is dropped.
3948 			 * Another thread may evict it.
3949 			 */
3950 			mutex_exit(&db->db_mtx);
3951 
3952 			if (evict_dbuf)
3953 				dnode_evict_bonus(dn);
3954 
3955 			dnode_rele(dn, db);
3956 		} else if (db->db_buf == NULL) {
3957 			/*
3958 			 * This is a special case: we never associated this
3959 			 * dbuf with any data allocated from the ARC.
3960 			 */
3961 			ASSERT(db->db_state == DB_UNCACHED ||
3962 			    db->db_state == DB_NOFILL);
3963 			dbuf_destroy(db);
3964 		} else if (arc_released(db->db_buf)) {
3965 			/*
3966 			 * This dbuf has anonymous data associated with it.
3967 			 */
3968 			dbuf_destroy(db);
3969 		} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
3970 		    db->db_pending_evict) {
3971 			dbuf_destroy(db);
3972 		} else if (!multilist_link_active(&db->db_cache_link)) {
3973 			ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3974 
3975 			dbuf_cached_state_t dcs =
3976 			    dbuf_include_in_metadata_cache(db) ?
3977 			    DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3978 			db->db_caching_status = dcs;
3979 
3980 			multilist_insert(&dbuf_caches[dcs].cache, db);
3981 			uint64_t db_size = db->db.db_size;
3982 			size = zfs_refcount_add_many(
3983 			    &dbuf_caches[dcs].size, db_size, db);
3984 			uint8_t db_level = db->db_level;
3985 			mutex_exit(&db->db_mtx);
3986 
3987 			if (dcs == DB_DBUF_METADATA_CACHE) {
3988 				DBUF_STAT_BUMP(metadata_cache_count);
3989 				DBUF_STAT_MAX(metadata_cache_size_bytes_max,
3990 				    size);
3991 			} else {
3992 				DBUF_STAT_BUMP(cache_count);
3993 				DBUF_STAT_MAX(cache_size_bytes_max, size);
3994 				DBUF_STAT_BUMP(cache_levels[db_level]);
3995 				DBUF_STAT_INCR(cache_levels_bytes[db_level],
3996 				    db_size);
3997 			}
3998 
3999 			if (dcs == DB_DBUF_CACHE && !evicting)
4000 				dbuf_evict_notify(size);
4001 		}
4002 	} else {
4003 		mutex_exit(&db->db_mtx);
4004 	}
4005 
4006 }
4007 
4008 #pragma weak dmu_buf_refcount = dbuf_refcount
4009 uint64_t
4010 dbuf_refcount(dmu_buf_impl_t *db)
4011 {
4012 	return (zfs_refcount_count(&db->db_holds));
4013 }
4014 
4015 uint64_t
4016 dmu_buf_user_refcount(dmu_buf_t *db_fake)
4017 {
4018 	uint64_t holds;
4019 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4020 
4021 	mutex_enter(&db->db_mtx);
4022 	ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
4023 	holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
4024 	mutex_exit(&db->db_mtx);
4025 
4026 	return (holds);
4027 }
4028 
4029 void *
4030 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
4031     dmu_buf_user_t *new_user)
4032 {
4033 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4034 
4035 	mutex_enter(&db->db_mtx);
4036 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4037 	if (db->db_user == old_user)
4038 		db->db_user = new_user;
4039 	else
4040 		old_user = db->db_user;
4041 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4042 	mutex_exit(&db->db_mtx);
4043 
4044 	return (old_user);
4045 }
4046 
4047 void *
4048 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4049 {
4050 	return (dmu_buf_replace_user(db_fake, NULL, user));
4051 }
4052 
4053 void *
4054 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4055 {
4056 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4057 
4058 	db->db_user_immediate_evict = TRUE;
4059 	return (dmu_buf_set_user(db_fake, user));
4060 }
4061 
4062 void *
4063 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4064 {
4065 	return (dmu_buf_replace_user(db_fake, user, NULL));
4066 }
4067 
4068 void *
4069 dmu_buf_get_user(dmu_buf_t *db_fake)
4070 {
4071 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4072 
4073 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4074 	return (db->db_user);
4075 }
4076 
4077 void
4078 dmu_buf_user_evict_wait(void)
4079 {
4080 	taskq_wait(dbu_evict_taskq);
4081 }
4082 
4083 blkptr_t *
4084 dmu_buf_get_blkptr(dmu_buf_t *db)
4085 {
4086 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4087 	return (dbi->db_blkptr);
4088 }
4089 
4090 objset_t *
4091 dmu_buf_get_objset(dmu_buf_t *db)
4092 {
4093 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4094 	return (dbi->db_objset);
4095 }
4096 
4097 dnode_t *
4098 dmu_buf_dnode_enter(dmu_buf_t *db)
4099 {
4100 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4101 	DB_DNODE_ENTER(dbi);
4102 	return (DB_DNODE(dbi));
4103 }
4104 
4105 void
4106 dmu_buf_dnode_exit(dmu_buf_t *db)
4107 {
4108 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4109 	DB_DNODE_EXIT(dbi);
4110 }
4111 
4112 static void
4113 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4114 {
4115 	/* ASSERT(dmu_tx_is_syncing(tx) */
4116 	ASSERT(MUTEX_HELD(&db->db_mtx));
4117 
4118 	if (db->db_blkptr != NULL)
4119 		return;
4120 
4121 	if (db->db_blkid == DMU_SPILL_BLKID) {
4122 		db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4123 		BP_ZERO(db->db_blkptr);
4124 		return;
4125 	}
4126 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4127 		/*
4128 		 * This buffer was allocated at a time when there was
4129 		 * no available blkptrs from the dnode, or it was
4130 		 * inappropriate to hook it in (i.e., nlevels mismatch).
4131 		 */
4132 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4133 		ASSERT(db->db_parent == NULL);
4134 		db->db_parent = dn->dn_dbuf;
4135 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4136 		DBUF_VERIFY(db);
4137 	} else {
4138 		dmu_buf_impl_t *parent = db->db_parent;
4139 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4140 
4141 		ASSERT(dn->dn_phys->dn_nlevels > 1);
4142 		if (parent == NULL) {
4143 			mutex_exit(&db->db_mtx);
4144 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
4145 			parent = dbuf_hold_level(dn, db->db_level + 1,
4146 			    db->db_blkid >> epbs, db);
4147 			rw_exit(&dn->dn_struct_rwlock);
4148 			mutex_enter(&db->db_mtx);
4149 			db->db_parent = parent;
4150 		}
4151 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
4152 		    (db->db_blkid & ((1ULL << epbs) - 1));
4153 		DBUF_VERIFY(db);
4154 	}
4155 }
4156 
4157 static void
4158 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4159 {
4160 	dmu_buf_impl_t *db = dr->dr_dbuf;
4161 	void *data = dr->dt.dl.dr_data;
4162 
4163 	ASSERT0(db->db_level);
4164 	ASSERT(MUTEX_HELD(&db->db_mtx));
4165 	ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4166 	ASSERT(data != NULL);
4167 
4168 	dnode_t *dn = dr->dr_dnode;
4169 	ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4170 	    DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4171 	memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4172 
4173 	dbuf_sync_leaf_verify_bonus_dnode(dr);
4174 
4175 	dbuf_undirty_bonus(dr);
4176 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4177 }
4178 
4179 /*
4180  * When syncing out a blocks of dnodes, adjust the block to deal with
4181  * encryption.  Normally, we make sure the block is decrypted before writing
4182  * it.  If we have crypt params, then we are writing a raw (encrypted) block,
4183  * from a raw receive.  In this case, set the ARC buf's crypt params so
4184  * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4185  */
4186 static void
4187 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4188 {
4189 	int err;
4190 	dmu_buf_impl_t *db = dr->dr_dbuf;
4191 
4192 	ASSERT(MUTEX_HELD(&db->db_mtx));
4193 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4194 	ASSERT3U(db->db_level, ==, 0);
4195 
4196 	if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4197 		zbookmark_phys_t zb;
4198 
4199 		/*
4200 		 * Unfortunately, there is currently no mechanism for
4201 		 * syncing context to handle decryption errors. An error
4202 		 * here is only possible if an attacker maliciously
4203 		 * changed a dnode block and updated the associated
4204 		 * checksums going up the block tree.
4205 		 */
4206 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4207 		    db->db.db_object, db->db_level, db->db_blkid);
4208 		err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4209 		    &zb, B_TRUE);
4210 		if (err)
4211 			panic("Invalid dnode block MAC");
4212 	} else if (dr->dt.dl.dr_has_raw_params) {
4213 		(void) arc_release(dr->dt.dl.dr_data, db);
4214 		arc_convert_to_raw(dr->dt.dl.dr_data,
4215 		    dmu_objset_id(db->db_objset),
4216 		    dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4217 		    dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4218 	}
4219 }
4220 
4221 /*
4222  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4223  * is critical the we not allow the compiler to inline this function in to
4224  * dbuf_sync_list() thereby drastically bloating the stack usage.
4225  */
4226 noinline static void
4227 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4228 {
4229 	dmu_buf_impl_t *db = dr->dr_dbuf;
4230 	dnode_t *dn = dr->dr_dnode;
4231 
4232 	ASSERT(dmu_tx_is_syncing(tx));
4233 
4234 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4235 
4236 	mutex_enter(&db->db_mtx);
4237 
4238 	ASSERT(db->db_level > 0);
4239 	DBUF_VERIFY(db);
4240 
4241 	/* Read the block if it hasn't been read yet. */
4242 	if (db->db_buf == NULL) {
4243 		mutex_exit(&db->db_mtx);
4244 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4245 		mutex_enter(&db->db_mtx);
4246 	}
4247 	ASSERT3U(db->db_state, ==, DB_CACHED);
4248 	ASSERT(db->db_buf != NULL);
4249 
4250 	/* Indirect block size must match what the dnode thinks it is. */
4251 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4252 	dbuf_check_blkptr(dn, db);
4253 
4254 	/* Provide the pending dirty record to child dbufs */
4255 	db->db_data_pending = dr;
4256 
4257 	mutex_exit(&db->db_mtx);
4258 
4259 	dbuf_write(dr, db->db_buf, tx);
4260 
4261 	zio_t *zio = dr->dr_zio;
4262 	mutex_enter(&dr->dt.di.dr_mtx);
4263 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4264 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4265 	mutex_exit(&dr->dt.di.dr_mtx);
4266 	zio_nowait(zio);
4267 }
4268 
4269 /*
4270  * Verify that the size of the data in our bonus buffer does not exceed
4271  * its recorded size.
4272  *
4273  * The purpose of this verification is to catch any cases in development
4274  * where the size of a phys structure (i.e space_map_phys_t) grows and,
4275  * due to incorrect feature management, older pools expect to read more
4276  * data even though they didn't actually write it to begin with.
4277  *
4278  * For a example, this would catch an error in the feature logic where we
4279  * open an older pool and we expect to write the space map histogram of
4280  * a space map with size SPACE_MAP_SIZE_V0.
4281  */
4282 static void
4283 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4284 {
4285 #ifdef ZFS_DEBUG
4286 	dnode_t *dn = dr->dr_dnode;
4287 
4288 	/*
4289 	 * Encrypted bonus buffers can have data past their bonuslen.
4290 	 * Skip the verification of these blocks.
4291 	 */
4292 	if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4293 		return;
4294 
4295 	uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4296 	uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4297 	ASSERT3U(bonuslen, <=, maxbonuslen);
4298 
4299 	arc_buf_t *datap = dr->dt.dl.dr_data;
4300 	char *datap_end = ((char *)datap) + bonuslen;
4301 	char *datap_max = ((char *)datap) + maxbonuslen;
4302 
4303 	/* ensure that everything is zero after our data */
4304 	for (; datap_end < datap_max; datap_end++)
4305 		ASSERT(*datap_end == 0);
4306 #endif
4307 }
4308 
4309 static blkptr_t *
4310 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4311 {
4312 	/* This must be a lightweight dirty record. */
4313 	ASSERT3P(dr->dr_dbuf, ==, NULL);
4314 	dnode_t *dn = dr->dr_dnode;
4315 
4316 	if (dn->dn_phys->dn_nlevels == 1) {
4317 		VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4318 		return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4319 	} else {
4320 		dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4321 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4322 		VERIFY3U(parent_db->db_level, ==, 1);
4323 		VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4324 		VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4325 		blkptr_t *bp = parent_db->db.db_data;
4326 		return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4327 	}
4328 }
4329 
4330 static void
4331 dbuf_lightweight_ready(zio_t *zio)
4332 {
4333 	dbuf_dirty_record_t *dr = zio->io_private;
4334 	blkptr_t *bp = zio->io_bp;
4335 
4336 	if (zio->io_error != 0)
4337 		return;
4338 
4339 	dnode_t *dn = dr->dr_dnode;
4340 
4341 	blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4342 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4343 	int64_t delta = bp_get_dsize_sync(spa, bp) -
4344 	    bp_get_dsize_sync(spa, bp_orig);
4345 	dnode_diduse_space(dn, delta);
4346 
4347 	uint64_t blkid = dr->dt.dll.dr_blkid;
4348 	mutex_enter(&dn->dn_mtx);
4349 	if (blkid > dn->dn_phys->dn_maxblkid) {
4350 		ASSERT0(dn->dn_objset->os_raw_receive);
4351 		dn->dn_phys->dn_maxblkid = blkid;
4352 	}
4353 	mutex_exit(&dn->dn_mtx);
4354 
4355 	if (!BP_IS_EMBEDDED(bp)) {
4356 		uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4357 		BP_SET_FILL(bp, fill);
4358 	}
4359 
4360 	dmu_buf_impl_t *parent_db;
4361 	EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4362 	if (dr->dr_parent == NULL) {
4363 		parent_db = dn->dn_dbuf;
4364 	} else {
4365 		parent_db = dr->dr_parent->dr_dbuf;
4366 	}
4367 	rw_enter(&parent_db->db_rwlock, RW_WRITER);
4368 	*bp_orig = *bp;
4369 	rw_exit(&parent_db->db_rwlock);
4370 }
4371 
4372 static void
4373 dbuf_lightweight_physdone(zio_t *zio)
4374 {
4375 	dbuf_dirty_record_t *dr = zio->io_private;
4376 	dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
4377 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4378 
4379 	/*
4380 	 * The callback will be called io_phys_children times.  Retire one
4381 	 * portion of our dirty space each time we are called.  Any rounding
4382 	 * error will be cleaned up by dbuf_lightweight_done().
4383 	 */
4384 	int delta = dr->dr_accounted / zio->io_phys_children;
4385 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4386 }
4387 
4388 static void
4389 dbuf_lightweight_done(zio_t *zio)
4390 {
4391 	dbuf_dirty_record_t *dr = zio->io_private;
4392 
4393 	VERIFY0(zio->io_error);
4394 
4395 	objset_t *os = dr->dr_dnode->dn_objset;
4396 	dmu_tx_t *tx = os->os_synctx;
4397 
4398 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4399 		ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4400 	} else {
4401 		dsl_dataset_t *ds = os->os_dsl_dataset;
4402 		(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4403 		dsl_dataset_block_born(ds, zio->io_bp, tx);
4404 	}
4405 
4406 	/*
4407 	 * See comment in dbuf_write_done().
4408 	 */
4409 	if (zio->io_phys_children == 0) {
4410 		dsl_pool_undirty_space(dmu_objset_pool(os),
4411 		    dr->dr_accounted, zio->io_txg);
4412 	} else {
4413 		dsl_pool_undirty_space(dmu_objset_pool(os),
4414 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4415 	}
4416 
4417 	abd_free(dr->dt.dll.dr_abd);
4418 	kmem_free(dr, sizeof (*dr));
4419 }
4420 
4421 noinline static void
4422 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4423 {
4424 	dnode_t *dn = dr->dr_dnode;
4425 	zio_t *pio;
4426 	if (dn->dn_phys->dn_nlevels == 1) {
4427 		pio = dn->dn_zio;
4428 	} else {
4429 		pio = dr->dr_parent->dr_zio;
4430 	}
4431 
4432 	zbookmark_phys_t zb = {
4433 		.zb_objset = dmu_objset_id(dn->dn_objset),
4434 		.zb_object = dn->dn_object,
4435 		.zb_level = 0,
4436 		.zb_blkid = dr->dt.dll.dr_blkid,
4437 	};
4438 
4439 	/*
4440 	 * See comment in dbuf_write().  This is so that zio->io_bp_orig
4441 	 * will have the old BP in dbuf_lightweight_done().
4442 	 */
4443 	dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4444 
4445 	dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4446 	    dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4447 	    dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4448 	    &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4449 	    dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
4450 	    ZIO_PRIORITY_ASYNC_WRITE,
4451 	    ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4452 
4453 	zio_nowait(dr->dr_zio);
4454 }
4455 
4456 /*
4457  * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4458  * critical the we not allow the compiler to inline this function in to
4459  * dbuf_sync_list() thereby drastically bloating the stack usage.
4460  */
4461 noinline static void
4462 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4463 {
4464 	arc_buf_t **datap = &dr->dt.dl.dr_data;
4465 	dmu_buf_impl_t *db = dr->dr_dbuf;
4466 	dnode_t *dn = dr->dr_dnode;
4467 	objset_t *os;
4468 	uint64_t txg = tx->tx_txg;
4469 
4470 	ASSERT(dmu_tx_is_syncing(tx));
4471 
4472 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4473 
4474 	mutex_enter(&db->db_mtx);
4475 	/*
4476 	 * To be synced, we must be dirtied.  But we
4477 	 * might have been freed after the dirty.
4478 	 */
4479 	if (db->db_state == DB_UNCACHED) {
4480 		/* This buffer has been freed since it was dirtied */
4481 		ASSERT(db->db.db_data == NULL);
4482 	} else if (db->db_state == DB_FILL) {
4483 		/* This buffer was freed and is now being re-filled */
4484 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4485 	} else {
4486 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4487 	}
4488 	DBUF_VERIFY(db);
4489 
4490 	if (db->db_blkid == DMU_SPILL_BLKID) {
4491 		mutex_enter(&dn->dn_mtx);
4492 		if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4493 			/*
4494 			 * In the previous transaction group, the bonus buffer
4495 			 * was entirely used to store the attributes for the
4496 			 * dnode which overrode the dn_spill field.  However,
4497 			 * when adding more attributes to the file a spill
4498 			 * block was required to hold the extra attributes.
4499 			 *
4500 			 * Make sure to clear the garbage left in the dn_spill
4501 			 * field from the previous attributes in the bonus
4502 			 * buffer.  Otherwise, after writing out the spill
4503 			 * block to the new allocated dva, it will free
4504 			 * the old block pointed to by the invalid dn_spill.
4505 			 */
4506 			db->db_blkptr = NULL;
4507 		}
4508 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4509 		mutex_exit(&dn->dn_mtx);
4510 	}
4511 
4512 	/*
4513 	 * If this is a bonus buffer, simply copy the bonus data into the
4514 	 * dnode.  It will be written out when the dnode is synced (and it
4515 	 * will be synced, since it must have been dirty for dbuf_sync to
4516 	 * be called).
4517 	 */
4518 	if (db->db_blkid == DMU_BONUS_BLKID) {
4519 		ASSERT(dr->dr_dbuf == db);
4520 		dbuf_sync_bonus(dr, tx);
4521 		return;
4522 	}
4523 
4524 	os = dn->dn_objset;
4525 
4526 	/*
4527 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
4528 	 * operation to sneak in. As a result, we need to ensure that we
4529 	 * don't check the dr_override_state until we have returned from
4530 	 * dbuf_check_blkptr.
4531 	 */
4532 	dbuf_check_blkptr(dn, db);
4533 
4534 	/*
4535 	 * If this buffer is in the middle of an immediate write,
4536 	 * wait for the synchronous IO to complete.
4537 	 */
4538 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4539 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4540 		cv_wait(&db->db_changed, &db->db_mtx);
4541 	}
4542 
4543 	/*
4544 	 * If this is a dnode block, ensure it is appropriately encrypted
4545 	 * or decrypted, depending on what we are writing to it this txg.
4546 	 */
4547 	if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4548 		dbuf_prepare_encrypted_dnode_leaf(dr);
4549 
4550 	if (db->db_state != DB_NOFILL &&
4551 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
4552 	    zfs_refcount_count(&db->db_holds) > 1 &&
4553 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4554 	    *datap == db->db_buf) {
4555 		/*
4556 		 * If this buffer is currently "in use" (i.e., there
4557 		 * are active holds and db_data still references it),
4558 		 * then make a copy before we start the write so that
4559 		 * any modifications from the open txg will not leak
4560 		 * into this write.
4561 		 *
4562 		 * NOTE: this copy does not need to be made for
4563 		 * objects only modified in the syncing context (e.g.
4564 		 * DNONE_DNODE blocks).
4565 		 */
4566 		int psize = arc_buf_size(*datap);
4567 		int lsize = arc_buf_lsize(*datap);
4568 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4569 		enum zio_compress compress_type = arc_get_compression(*datap);
4570 		uint8_t complevel = arc_get_complevel(*datap);
4571 
4572 		if (arc_is_encrypted(*datap)) {
4573 			boolean_t byteorder;
4574 			uint8_t salt[ZIO_DATA_SALT_LEN];
4575 			uint8_t iv[ZIO_DATA_IV_LEN];
4576 			uint8_t mac[ZIO_DATA_MAC_LEN];
4577 
4578 			arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4579 			*datap = arc_alloc_raw_buf(os->os_spa, db,
4580 			    dmu_objset_id(os), byteorder, salt, iv, mac,
4581 			    dn->dn_type, psize, lsize, compress_type,
4582 			    complevel);
4583 		} else if (compress_type != ZIO_COMPRESS_OFF) {
4584 			ASSERT3U(type, ==, ARC_BUFC_DATA);
4585 			*datap = arc_alloc_compressed_buf(os->os_spa, db,
4586 			    psize, lsize, compress_type, complevel);
4587 		} else {
4588 			*datap = arc_alloc_buf(os->os_spa, db, type, psize);
4589 		}
4590 		memcpy((*datap)->b_data, db->db.db_data, psize);
4591 	}
4592 	db->db_data_pending = dr;
4593 
4594 	mutex_exit(&db->db_mtx);
4595 
4596 	dbuf_write(dr, *datap, tx);
4597 
4598 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4599 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4600 		list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4601 	} else {
4602 		zio_nowait(dr->dr_zio);
4603 	}
4604 }
4605 
4606 void
4607 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4608 {
4609 	dbuf_dirty_record_t *dr;
4610 
4611 	while ((dr = list_head(list))) {
4612 		if (dr->dr_zio != NULL) {
4613 			/*
4614 			 * If we find an already initialized zio then we
4615 			 * are processing the meta-dnode, and we have finished.
4616 			 * The dbufs for all dnodes are put back on the list
4617 			 * during processing, so that we can zio_wait()
4618 			 * these IOs after initiating all child IOs.
4619 			 */
4620 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4621 			    DMU_META_DNODE_OBJECT);
4622 			break;
4623 		}
4624 		list_remove(list, dr);
4625 		if (dr->dr_dbuf == NULL) {
4626 			dbuf_sync_lightweight(dr, tx);
4627 		} else {
4628 			if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4629 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4630 				VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4631 			}
4632 			if (dr->dr_dbuf->db_level > 0)
4633 				dbuf_sync_indirect(dr, tx);
4634 			else
4635 				dbuf_sync_leaf(dr, tx);
4636 		}
4637 	}
4638 }
4639 
4640 static void
4641 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4642 {
4643 	(void) buf;
4644 	dmu_buf_impl_t *db = vdb;
4645 	dnode_t *dn;
4646 	blkptr_t *bp = zio->io_bp;
4647 	blkptr_t *bp_orig = &zio->io_bp_orig;
4648 	spa_t *spa = zio->io_spa;
4649 	int64_t delta;
4650 	uint64_t fill = 0;
4651 	int i;
4652 
4653 	ASSERT3P(db->db_blkptr, !=, NULL);
4654 	ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4655 
4656 	DB_DNODE_ENTER(db);
4657 	dn = DB_DNODE(db);
4658 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4659 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4660 	zio->io_prev_space_delta = delta;
4661 
4662 	if (bp->blk_birth != 0) {
4663 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4664 		    BP_GET_TYPE(bp) == dn->dn_type) ||
4665 		    (db->db_blkid == DMU_SPILL_BLKID &&
4666 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4667 		    BP_IS_EMBEDDED(bp));
4668 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4669 	}
4670 
4671 	mutex_enter(&db->db_mtx);
4672 
4673 #ifdef ZFS_DEBUG
4674 	if (db->db_blkid == DMU_SPILL_BLKID) {
4675 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4676 		ASSERT(!(BP_IS_HOLE(bp)) &&
4677 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4678 	}
4679 #endif
4680 
4681 	if (db->db_level == 0) {
4682 		mutex_enter(&dn->dn_mtx);
4683 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4684 		    db->db_blkid != DMU_SPILL_BLKID) {
4685 			ASSERT0(db->db_objset->os_raw_receive);
4686 			dn->dn_phys->dn_maxblkid = db->db_blkid;
4687 		}
4688 		mutex_exit(&dn->dn_mtx);
4689 
4690 		if (dn->dn_type == DMU_OT_DNODE) {
4691 			i = 0;
4692 			while (i < db->db.db_size) {
4693 				dnode_phys_t *dnp =
4694 				    (void *)(((char *)db->db.db_data) + i);
4695 
4696 				i += DNODE_MIN_SIZE;
4697 				if (dnp->dn_type != DMU_OT_NONE) {
4698 					fill++;
4699 					for (int j = 0; j < dnp->dn_nblkptr;
4700 					    j++) {
4701 						(void) zfs_blkptr_verify(spa,
4702 						    &dnp->dn_blkptr[j],
4703 						    BLK_CONFIG_SKIP,
4704 						    BLK_VERIFY_HALT);
4705 					}
4706 					if (dnp->dn_flags &
4707 					    DNODE_FLAG_SPILL_BLKPTR) {
4708 						(void) zfs_blkptr_verify(spa,
4709 						    DN_SPILL_BLKPTR(dnp),
4710 						    BLK_CONFIG_SKIP,
4711 						    BLK_VERIFY_HALT);
4712 					}
4713 					i += dnp->dn_extra_slots *
4714 					    DNODE_MIN_SIZE;
4715 				}
4716 			}
4717 		} else {
4718 			if (BP_IS_HOLE(bp)) {
4719 				fill = 0;
4720 			} else {
4721 				fill = 1;
4722 			}
4723 		}
4724 	} else {
4725 		blkptr_t *ibp = db->db.db_data;
4726 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4727 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4728 			if (BP_IS_HOLE(ibp))
4729 				continue;
4730 			(void) zfs_blkptr_verify(spa, ibp,
4731 			    BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
4732 			fill += BP_GET_FILL(ibp);
4733 		}
4734 	}
4735 	DB_DNODE_EXIT(db);
4736 
4737 	if (!BP_IS_EMBEDDED(bp))
4738 		BP_SET_FILL(bp, fill);
4739 
4740 	mutex_exit(&db->db_mtx);
4741 
4742 	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4743 	*db->db_blkptr = *bp;
4744 	dmu_buf_unlock_parent(db, dblt, FTAG);
4745 }
4746 
4747 /*
4748  * This function gets called just prior to running through the compression
4749  * stage of the zio pipeline. If we're an indirect block comprised of only
4750  * holes, then we want this indirect to be compressed away to a hole. In
4751  * order to do that we must zero out any information about the holes that
4752  * this indirect points to prior to before we try to compress it.
4753  */
4754 static void
4755 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4756 {
4757 	(void) zio, (void) buf;
4758 	dmu_buf_impl_t *db = vdb;
4759 	dnode_t *dn;
4760 	blkptr_t *bp;
4761 	unsigned int epbs, i;
4762 
4763 	ASSERT3U(db->db_level, >, 0);
4764 	DB_DNODE_ENTER(db);
4765 	dn = DB_DNODE(db);
4766 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4767 	ASSERT3U(epbs, <, 31);
4768 
4769 	/* Determine if all our children are holes */
4770 	for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4771 		if (!BP_IS_HOLE(bp))
4772 			break;
4773 	}
4774 
4775 	/*
4776 	 * If all the children are holes, then zero them all out so that
4777 	 * we may get compressed away.
4778 	 */
4779 	if (i == 1ULL << epbs) {
4780 		/*
4781 		 * We only found holes. Grab the rwlock to prevent
4782 		 * anybody from reading the blocks we're about to
4783 		 * zero out.
4784 		 */
4785 		rw_enter(&db->db_rwlock, RW_WRITER);
4786 		memset(db->db.db_data, 0, db->db.db_size);
4787 		rw_exit(&db->db_rwlock);
4788 	}
4789 	DB_DNODE_EXIT(db);
4790 }
4791 
4792 /*
4793  * The SPA will call this callback several times for each zio - once
4794  * for every physical child i/o (zio->io_phys_children times).  This
4795  * allows the DMU to monitor the progress of each logical i/o.  For example,
4796  * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4797  * block.  There may be a long delay before all copies/fragments are completed,
4798  * so this callback allows us to retire dirty space gradually, as the physical
4799  * i/os complete.
4800  */
4801 static void
4802 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4803 {
4804 	(void) buf;
4805 	dmu_buf_impl_t *db = arg;
4806 	objset_t *os = db->db_objset;
4807 	dsl_pool_t *dp = dmu_objset_pool(os);
4808 	dbuf_dirty_record_t *dr;
4809 	int delta = 0;
4810 
4811 	dr = db->db_data_pending;
4812 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4813 
4814 	/*
4815 	 * The callback will be called io_phys_children times.  Retire one
4816 	 * portion of our dirty space each time we are called.  Any rounding
4817 	 * error will be cleaned up by dbuf_write_done().
4818 	 */
4819 	delta = dr->dr_accounted / zio->io_phys_children;
4820 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4821 }
4822 
4823 static void
4824 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4825 {
4826 	(void) buf;
4827 	dmu_buf_impl_t *db = vdb;
4828 	blkptr_t *bp_orig = &zio->io_bp_orig;
4829 	blkptr_t *bp = db->db_blkptr;
4830 	objset_t *os = db->db_objset;
4831 	dmu_tx_t *tx = os->os_synctx;
4832 
4833 	ASSERT0(zio->io_error);
4834 	ASSERT(db->db_blkptr == bp);
4835 
4836 	/*
4837 	 * For nopwrites and rewrites we ensure that the bp matches our
4838 	 * original and bypass all the accounting.
4839 	 */
4840 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4841 		ASSERT(BP_EQUAL(bp, bp_orig));
4842 	} else {
4843 		dsl_dataset_t *ds = os->os_dsl_dataset;
4844 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4845 		dsl_dataset_block_born(ds, bp, tx);
4846 	}
4847 
4848 	mutex_enter(&db->db_mtx);
4849 
4850 	DBUF_VERIFY(db);
4851 
4852 	dbuf_dirty_record_t *dr = db->db_data_pending;
4853 	dnode_t *dn = dr->dr_dnode;
4854 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4855 	ASSERT(dr->dr_dbuf == db);
4856 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4857 	list_remove(&db->db_dirty_records, dr);
4858 
4859 #ifdef ZFS_DEBUG
4860 	if (db->db_blkid == DMU_SPILL_BLKID) {
4861 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4862 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4863 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4864 	}
4865 #endif
4866 
4867 	if (db->db_level == 0) {
4868 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4869 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4870 		if (db->db_state != DB_NOFILL) {
4871 			if (dr->dt.dl.dr_data != NULL &&
4872 			    dr->dt.dl.dr_data != db->db_buf) {
4873 				arc_buf_destroy(dr->dt.dl.dr_data, db);
4874 			}
4875 		}
4876 	} else {
4877 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4878 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4879 		if (!BP_IS_HOLE(db->db_blkptr)) {
4880 			int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4881 			    SPA_BLKPTRSHIFT;
4882 			ASSERT3U(db->db_blkid, <=,
4883 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4884 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4885 			    db->db.db_size);
4886 		}
4887 		mutex_destroy(&dr->dt.di.dr_mtx);
4888 		list_destroy(&dr->dt.di.dr_children);
4889 	}
4890 
4891 	cv_broadcast(&db->db_changed);
4892 	ASSERT(db->db_dirtycnt > 0);
4893 	db->db_dirtycnt -= 1;
4894 	db->db_data_pending = NULL;
4895 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4896 
4897 	/*
4898 	 * If we didn't do a physical write in this ZIO and we
4899 	 * still ended up here, it means that the space of the
4900 	 * dbuf that we just released (and undirtied) above hasn't
4901 	 * been marked as undirtied in the pool's accounting.
4902 	 *
4903 	 * Thus, we undirty that space in the pool's view of the
4904 	 * world here. For physical writes this type of update
4905 	 * happens in dbuf_write_physdone().
4906 	 *
4907 	 * If we did a physical write, cleanup any rounding errors
4908 	 * that came up due to writing multiple copies of a block
4909 	 * on disk [see dbuf_write_physdone()].
4910 	 */
4911 	if (zio->io_phys_children == 0) {
4912 		dsl_pool_undirty_space(dmu_objset_pool(os),
4913 		    dr->dr_accounted, zio->io_txg);
4914 	} else {
4915 		dsl_pool_undirty_space(dmu_objset_pool(os),
4916 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4917 	}
4918 
4919 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
4920 }
4921 
4922 static void
4923 dbuf_write_nofill_ready(zio_t *zio)
4924 {
4925 	dbuf_write_ready(zio, NULL, zio->io_private);
4926 }
4927 
4928 static void
4929 dbuf_write_nofill_done(zio_t *zio)
4930 {
4931 	dbuf_write_done(zio, NULL, zio->io_private);
4932 }
4933 
4934 static void
4935 dbuf_write_override_ready(zio_t *zio)
4936 {
4937 	dbuf_dirty_record_t *dr = zio->io_private;
4938 	dmu_buf_impl_t *db = dr->dr_dbuf;
4939 
4940 	dbuf_write_ready(zio, NULL, db);
4941 }
4942 
4943 static void
4944 dbuf_write_override_done(zio_t *zio)
4945 {
4946 	dbuf_dirty_record_t *dr = zio->io_private;
4947 	dmu_buf_impl_t *db = dr->dr_dbuf;
4948 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4949 
4950 	mutex_enter(&db->db_mtx);
4951 	if (!BP_EQUAL(zio->io_bp, obp)) {
4952 		if (!BP_IS_HOLE(obp))
4953 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4954 		arc_release(dr->dt.dl.dr_data, db);
4955 	}
4956 	mutex_exit(&db->db_mtx);
4957 
4958 	dbuf_write_done(zio, NULL, db);
4959 
4960 	if (zio->io_abd != NULL)
4961 		abd_free(zio->io_abd);
4962 }
4963 
4964 typedef struct dbuf_remap_impl_callback_arg {
4965 	objset_t	*drica_os;
4966 	uint64_t	drica_blk_birth;
4967 	dmu_tx_t	*drica_tx;
4968 } dbuf_remap_impl_callback_arg_t;
4969 
4970 static void
4971 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4972     void *arg)
4973 {
4974 	dbuf_remap_impl_callback_arg_t *drica = arg;
4975 	objset_t *os = drica->drica_os;
4976 	spa_t *spa = dmu_objset_spa(os);
4977 	dmu_tx_t *tx = drica->drica_tx;
4978 
4979 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4980 
4981 	if (os == spa_meta_objset(spa)) {
4982 		spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4983 	} else {
4984 		dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4985 		    size, drica->drica_blk_birth, tx);
4986 	}
4987 }
4988 
4989 static void
4990 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4991 {
4992 	blkptr_t bp_copy = *bp;
4993 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4994 	dbuf_remap_impl_callback_arg_t drica;
4995 
4996 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4997 
4998 	drica.drica_os = dn->dn_objset;
4999 	drica.drica_blk_birth = bp->blk_birth;
5000 	drica.drica_tx = tx;
5001 	if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
5002 	    &drica)) {
5003 		/*
5004 		 * If the blkptr being remapped is tracked by a livelist,
5005 		 * then we need to make sure the livelist reflects the update.
5006 		 * First, cancel out the old blkptr by appending a 'FREE'
5007 		 * entry. Next, add an 'ALLOC' to track the new version. This
5008 		 * way we avoid trying to free an inaccurate blkptr at delete.
5009 		 * Note that embedded blkptrs are not tracked in livelists.
5010 		 */
5011 		if (dn->dn_objset != spa_meta_objset(spa)) {
5012 			dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
5013 			if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
5014 			    bp->blk_birth > ds->ds_dir->dd_origin_txg) {
5015 				ASSERT(!BP_IS_EMBEDDED(bp));
5016 				ASSERT(dsl_dir_is_clone(ds->ds_dir));
5017 				ASSERT(spa_feature_is_enabled(spa,
5018 				    SPA_FEATURE_LIVELIST));
5019 				bplist_append(&ds->ds_dir->dd_pending_frees,
5020 				    bp);
5021 				bplist_append(&ds->ds_dir->dd_pending_allocs,
5022 				    &bp_copy);
5023 			}
5024 		}
5025 
5026 		/*
5027 		 * The db_rwlock prevents dbuf_read_impl() from
5028 		 * dereferencing the BP while we are changing it.  To
5029 		 * avoid lock contention, only grab it when we are actually
5030 		 * changing the BP.
5031 		 */
5032 		if (rw != NULL)
5033 			rw_enter(rw, RW_WRITER);
5034 		*bp = bp_copy;
5035 		if (rw != NULL)
5036 			rw_exit(rw);
5037 	}
5038 }
5039 
5040 /*
5041  * Remap any existing BP's to concrete vdevs, if possible.
5042  */
5043 static void
5044 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
5045 {
5046 	spa_t *spa = dmu_objset_spa(db->db_objset);
5047 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5048 
5049 	if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
5050 		return;
5051 
5052 	if (db->db_level > 0) {
5053 		blkptr_t *bp = db->db.db_data;
5054 		for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
5055 			dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
5056 		}
5057 	} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
5058 		dnode_phys_t *dnp = db->db.db_data;
5059 		ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
5060 		    DMU_OT_DNODE);
5061 		for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
5062 		    i += dnp[i].dn_extra_slots + 1) {
5063 			for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
5064 				krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
5065 				    &dn->dn_dbuf->db_rwlock);
5066 				dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
5067 				    tx);
5068 			}
5069 		}
5070 	}
5071 }
5072 
5073 
5074 /* Issue I/O to commit a dirty buffer to disk. */
5075 static void
5076 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5077 {
5078 	dmu_buf_impl_t *db = dr->dr_dbuf;
5079 	dnode_t *dn = dr->dr_dnode;
5080 	objset_t *os;
5081 	dmu_buf_impl_t *parent = db->db_parent;
5082 	uint64_t txg = tx->tx_txg;
5083 	zbookmark_phys_t zb;
5084 	zio_prop_t zp;
5085 	zio_t *pio; /* parent I/O */
5086 	int wp_flag = 0;
5087 
5088 	ASSERT(dmu_tx_is_syncing(tx));
5089 
5090 	os = dn->dn_objset;
5091 
5092 	if (db->db_state != DB_NOFILL) {
5093 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5094 			/*
5095 			 * Private object buffers are released here rather
5096 			 * than in dbuf_dirty() since they are only modified
5097 			 * in the syncing context and we don't want the
5098 			 * overhead of making multiple copies of the data.
5099 			 */
5100 			if (BP_IS_HOLE(db->db_blkptr)) {
5101 				arc_buf_thaw(data);
5102 			} else {
5103 				dbuf_release_bp(db);
5104 			}
5105 			dbuf_remap(dn, db, tx);
5106 		}
5107 	}
5108 
5109 	if (parent != dn->dn_dbuf) {
5110 		/* Our parent is an indirect block. */
5111 		/* We have a dirty parent that has been scheduled for write. */
5112 		ASSERT(parent && parent->db_data_pending);
5113 		/* Our parent's buffer is one level closer to the dnode. */
5114 		ASSERT(db->db_level == parent->db_level-1);
5115 		/*
5116 		 * We're about to modify our parent's db_data by modifying
5117 		 * our block pointer, so the parent must be released.
5118 		 */
5119 		ASSERT(arc_released(parent->db_buf));
5120 		pio = parent->db_data_pending->dr_zio;
5121 	} else {
5122 		/* Our parent is the dnode itself. */
5123 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5124 		    db->db_blkid != DMU_SPILL_BLKID) ||
5125 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5126 		if (db->db_blkid != DMU_SPILL_BLKID)
5127 			ASSERT3P(db->db_blkptr, ==,
5128 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
5129 		pio = dn->dn_zio;
5130 	}
5131 
5132 	ASSERT(db->db_level == 0 || data == db->db_buf);
5133 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
5134 	ASSERT(pio);
5135 
5136 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5137 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5138 	    db->db.db_object, db->db_level, db->db_blkid);
5139 
5140 	if (db->db_blkid == DMU_SPILL_BLKID)
5141 		wp_flag = WP_SPILL;
5142 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
5143 
5144 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5145 
5146 	/*
5147 	 * We copy the blkptr now (rather than when we instantiate the dirty
5148 	 * record), because its value can change between open context and
5149 	 * syncing context. We do not need to hold dn_struct_rwlock to read
5150 	 * db_blkptr because we are in syncing context.
5151 	 */
5152 	dr->dr_bp_copy = *db->db_blkptr;
5153 
5154 	if (db->db_level == 0 &&
5155 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5156 		/*
5157 		 * The BP for this block has been provided by open context
5158 		 * (by dmu_sync() or dmu_buf_write_embedded()).
5159 		 */
5160 		abd_t *contents = (data != NULL) ?
5161 		    abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5162 
5163 		dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5164 		    contents, db->db.db_size, db->db.db_size, &zp,
5165 		    dbuf_write_override_ready, NULL, NULL,
5166 		    dbuf_write_override_done,
5167 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5168 		mutex_enter(&db->db_mtx);
5169 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5170 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5171 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5172 		    dr->dt.dl.dr_brtwrite);
5173 		mutex_exit(&db->db_mtx);
5174 	} else if (db->db_state == DB_NOFILL) {
5175 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5176 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5177 		dr->dr_zio = zio_write(pio, os->os_spa, txg,
5178 		    &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5179 		    dbuf_write_nofill_ready, NULL, NULL,
5180 		    dbuf_write_nofill_done, db,
5181 		    ZIO_PRIORITY_ASYNC_WRITE,
5182 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5183 	} else {
5184 		ASSERT(arc_released(data));
5185 
5186 		/*
5187 		 * For indirect blocks, we want to setup the children
5188 		 * ready callback so that we can properly handle an indirect
5189 		 * block that only contains holes.
5190 		 */
5191 		arc_write_done_func_t *children_ready_cb = NULL;
5192 		if (db->db_level != 0)
5193 			children_ready_cb = dbuf_write_children_ready;
5194 
5195 		dr->dr_zio = arc_write(pio, os->os_spa, txg,
5196 		    &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5197 		    dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
5198 		    children_ready_cb, dbuf_write_physdone,
5199 		    dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
5200 		    ZIO_FLAG_MUSTSUCCEED, &zb);
5201 	}
5202 }
5203 
5204 EXPORT_SYMBOL(dbuf_find);
5205 EXPORT_SYMBOL(dbuf_is_metadata);
5206 EXPORT_SYMBOL(dbuf_destroy);
5207 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5208 EXPORT_SYMBOL(dbuf_whichblock);
5209 EXPORT_SYMBOL(dbuf_read);
5210 EXPORT_SYMBOL(dbuf_unoverride);
5211 EXPORT_SYMBOL(dbuf_free_range);
5212 EXPORT_SYMBOL(dbuf_new_size);
5213 EXPORT_SYMBOL(dbuf_release_bp);
5214 EXPORT_SYMBOL(dbuf_dirty);
5215 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5216 EXPORT_SYMBOL(dmu_buf_will_dirty);
5217 EXPORT_SYMBOL(dmu_buf_is_dirty);
5218 EXPORT_SYMBOL(dmu_buf_will_clone);
5219 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5220 EXPORT_SYMBOL(dmu_buf_will_fill);
5221 EXPORT_SYMBOL(dmu_buf_fill_done);
5222 EXPORT_SYMBOL(dmu_buf_rele);
5223 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5224 EXPORT_SYMBOL(dbuf_prefetch);
5225 EXPORT_SYMBOL(dbuf_hold_impl);
5226 EXPORT_SYMBOL(dbuf_hold);
5227 EXPORT_SYMBOL(dbuf_hold_level);
5228 EXPORT_SYMBOL(dbuf_create_bonus);
5229 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5230 EXPORT_SYMBOL(dbuf_rm_spill);
5231 EXPORT_SYMBOL(dbuf_add_ref);
5232 EXPORT_SYMBOL(dbuf_rele);
5233 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5234 EXPORT_SYMBOL(dbuf_refcount);
5235 EXPORT_SYMBOL(dbuf_sync_list);
5236 EXPORT_SYMBOL(dmu_buf_set_user);
5237 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5238 EXPORT_SYMBOL(dmu_buf_get_user);
5239 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5240 
5241 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5242 	"Maximum size in bytes of the dbuf cache.");
5243 
5244 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5245 	"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5246 
5247 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5248 	"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5249 
5250 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5251 	"Maximum size in bytes of dbuf metadata cache.");
5252 
5253 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5254 	"Set size of dbuf cache to log2 fraction of arc size.");
5255 
5256 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5257 	"Set size of dbuf metadata cache to log2 fraction of arc size.");
5258 
5259 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5260 	"Set size of dbuf cache mutex array as log2 shift.");
5261