xref: /freebsd/sys/contrib/openzfs/module/zfs/dbuf.c (revision 9f27341c336aa12f6c7163c17e646e76c813b689)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2019, Klara Inc.
28  * Copyright (c) 2019, Allan Jude
29  * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
59 
60 static kstat_t *dbuf_ksp;
61 
62 typedef struct dbuf_stats {
63 	/*
64 	 * Various statistics about the size of the dbuf cache.
65 	 */
66 	kstat_named_t cache_count;
67 	kstat_named_t cache_size_bytes;
68 	kstat_named_t cache_size_bytes_max;
69 	/*
70 	 * Statistics regarding the bounds on the dbuf cache size.
71 	 */
72 	kstat_named_t cache_target_bytes;
73 	kstat_named_t cache_lowater_bytes;
74 	kstat_named_t cache_hiwater_bytes;
75 	/*
76 	 * Total number of dbuf cache evictions that have occurred.
77 	 */
78 	kstat_named_t cache_total_evicts;
79 	/*
80 	 * The distribution of dbuf levels in the dbuf cache and
81 	 * the total size of all dbufs at each level.
82 	 */
83 	kstat_named_t cache_levels[DN_MAX_LEVELS];
84 	kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
85 	/*
86 	 * Statistics about the dbuf hash table.
87 	 */
88 	kstat_named_t hash_hits;
89 	kstat_named_t hash_misses;
90 	kstat_named_t hash_collisions;
91 	kstat_named_t hash_elements;
92 	kstat_named_t hash_elements_max;
93 	/*
94 	 * Number of sublists containing more than one dbuf in the dbuf
95 	 * hash table. Keep track of the longest hash chain.
96 	 */
97 	kstat_named_t hash_chains;
98 	kstat_named_t hash_chain_max;
99 	/*
100 	 * Number of times a dbuf_create() discovers that a dbuf was
101 	 * already created and in the dbuf hash table.
102 	 */
103 	kstat_named_t hash_insert_race;
104 	/*
105 	 * Number of entries in the hash table dbuf and mutex arrays.
106 	 */
107 	kstat_named_t hash_table_count;
108 	kstat_named_t hash_mutex_count;
109 	/*
110 	 * Statistics about the size of the metadata dbuf cache.
111 	 */
112 	kstat_named_t metadata_cache_count;
113 	kstat_named_t metadata_cache_size_bytes;
114 	kstat_named_t metadata_cache_size_bytes_max;
115 	/*
116 	 * For diagnostic purposes, this is incremented whenever we can't add
117 	 * something to the metadata cache because it's full, and instead put
118 	 * the data in the regular dbuf cache.
119 	 */
120 	kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122 
123 dbuf_stats_t dbuf_stats = {
124 	{ "cache_count",			KSTAT_DATA_UINT64 },
125 	{ "cache_size_bytes",			KSTAT_DATA_UINT64 },
126 	{ "cache_size_bytes_max",		KSTAT_DATA_UINT64 },
127 	{ "cache_target_bytes",			KSTAT_DATA_UINT64 },
128 	{ "cache_lowater_bytes",		KSTAT_DATA_UINT64 },
129 	{ "cache_hiwater_bytes",		KSTAT_DATA_UINT64 },
130 	{ "cache_total_evicts",			KSTAT_DATA_UINT64 },
131 	{ { "cache_levels_N",			KSTAT_DATA_UINT64 } },
132 	{ { "cache_levels_bytes_N",		KSTAT_DATA_UINT64 } },
133 	{ "hash_hits",				KSTAT_DATA_UINT64 },
134 	{ "hash_misses",			KSTAT_DATA_UINT64 },
135 	{ "hash_collisions",			KSTAT_DATA_UINT64 },
136 	{ "hash_elements",			KSTAT_DATA_UINT64 },
137 	{ "hash_elements_max",			KSTAT_DATA_UINT64 },
138 	{ "hash_chains",			KSTAT_DATA_UINT64 },
139 	{ "hash_chain_max",			KSTAT_DATA_UINT64 },
140 	{ "hash_insert_race",			KSTAT_DATA_UINT64 },
141 	{ "hash_table_count",			KSTAT_DATA_UINT64 },
142 	{ "hash_mutex_count",			KSTAT_DATA_UINT64 },
143 	{ "metadata_cache_count",		KSTAT_DATA_UINT64 },
144 	{ "metadata_cache_size_bytes",		KSTAT_DATA_UINT64 },
145 	{ "metadata_cache_size_bytes_max",	KSTAT_DATA_UINT64 },
146 	{ "metadata_cache_overflow",		KSTAT_DATA_UINT64 }
147 };
148 
149 struct {
150 	wmsum_t cache_count;
151 	wmsum_t cache_total_evicts;
152 	wmsum_t cache_levels[DN_MAX_LEVELS];
153 	wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 	wmsum_t hash_hits;
155 	wmsum_t hash_misses;
156 	wmsum_t hash_collisions;
157 	wmsum_t hash_chains;
158 	wmsum_t hash_insert_race;
159 	wmsum_t metadata_cache_count;
160 	wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162 
163 #define	DBUF_STAT_INCR(stat, val)	\
164 	wmsum_add(&dbuf_sums.stat, val);
165 #define	DBUF_STAT_DECR(stat, val)	\
166 	DBUF_STAT_INCR(stat, -(val));
167 #define	DBUF_STAT_BUMP(stat)		\
168 	DBUF_STAT_INCR(stat, 1);
169 #define	DBUF_STAT_BUMPDOWN(stat)	\
170 	DBUF_STAT_INCR(stat, -1);
171 #define	DBUF_STAT_MAX(stat, v) {					\
172 	uint64_t _m;							\
173 	while ((v) > (_m = dbuf_stats.stat.value.ui64) &&		\
174 	    (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 		continue;						\
176 }
177 
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
181 
182 /*
183  * Global data structures and functions for the dbuf cache.
184  */
185 static kmem_cache_t *dbuf_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
187 
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
192 
193 /*
194  * There are two dbuf caches; each dbuf can only be in one of them at a time.
195  *
196  * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197  *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198  *    that represent the metadata that describes filesystems/snapshots/
199  *    bookmarks/properties/etc. We only evict from this cache when we export a
200  *    pool, to short-circuit as much I/O as possible for all administrative
201  *    commands that need the metadata. There is no eviction policy for this
202  *    cache, because we try to only include types in it which would occupy a
203  *    very small amount of space per object but create a large impact on the
204  *    performance of these commands. Instead, after it reaches a maximum size
205  *    (which should only happen on very small memory systems with a very large
206  *    number of filesystem objects), we stop taking new dbufs into the
207  *    metadata cache, instead putting them in the normal dbuf cache.
208  *
209  * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210  *    are not currently held but have been recently released. These dbufs
211  *    are not eligible for arc eviction until they are aged out of the cache.
212  *    Dbufs that are aged out of the cache will be immediately destroyed and
213  *    become eligible for arc eviction.
214  *
215  * Dbufs are added to these caches once the last hold is released. If a dbuf is
216  * later accessed and still exists in the dbuf cache, then it will be removed
217  * from the cache and later re-added to the head of the cache.
218  *
219  * If a given dbuf meets the requirements for the metadata cache, it will go
220  * there, otherwise it will be considered for the generic LRU dbuf cache. The
221  * caches and the refcounts tracking their sizes are stored in an array indexed
222  * by those caches' matching enum values (from dbuf_cached_state_t).
223  */
224 typedef struct dbuf_cache {
225 	multilist_t cache;
226 	zfs_refcount_t size ____cacheline_aligned;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229 
230 /* Size limits for the caches */
231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
233 
234 /* Set the default sizes of the caches to log2 fraction of arc size */
235 static uint_t dbuf_cache_shift = 5;
236 static uint_t dbuf_metadata_cache_shift = 6;
237 
238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
239 static uint_t dbuf_mutex_cache_shift = 0;
240 
241 static unsigned long dbuf_cache_target_bytes(void);
242 static unsigned long dbuf_metadata_cache_target_bytes(void);
243 
244 /*
245  * The LRU dbuf cache uses a three-stage eviction policy:
246  *	- A low water marker designates when the dbuf eviction thread
247  *	should stop evicting from the dbuf cache.
248  *	- When we reach the maximum size (aka mid water mark), we
249  *	signal the eviction thread to run.
250  *	- The high water mark indicates when the eviction thread
251  *	is unable to keep up with the incoming load and eviction must
252  *	happen in the context of the calling thread.
253  *
254  * The dbuf cache:
255  *                                                 (max size)
256  *                                      low water   mid water   hi water
257  * +----------------------------------------+----------+----------+
258  * |                                        |          |          |
259  * |                                        |          |          |
260  * |                                        |          |          |
261  * |                                        |          |          |
262  * +----------------------------------------+----------+----------+
263  *                                        stop        signal     evict
264  *                                      evicting     eviction   directly
265  *                                                    thread
266  *
267  * The high and low water marks indicate the operating range for the eviction
268  * thread. The low water mark is, by default, 90% of the total size of the
269  * cache and the high water mark is at 110% (both of these percentages can be
270  * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
271  * respectively). The eviction thread will try to ensure that the cache remains
272  * within this range by waking up every second and checking if the cache is
273  * above the low water mark. The thread can also be woken up by callers adding
274  * elements into the cache if the cache is larger than the mid water (i.e max
275  * cache size). Once the eviction thread is woken up and eviction is required,
276  * it will continue evicting buffers until it's able to reduce the cache size
277  * to the low water mark. If the cache size continues to grow and hits the high
278  * water mark, then callers adding elements to the cache will begin to evict
279  * directly from the cache until the cache is no longer above the high water
280  * mark.
281  */
282 
283 /*
284  * The percentage above and below the maximum cache size.
285  */
286 static uint_t dbuf_cache_hiwater_pct = 10;
287 static uint_t dbuf_cache_lowater_pct = 10;
288 
289 static int
290 dbuf_cons(void *vdb, void *unused, int kmflag)
291 {
292 	(void) unused, (void) kmflag;
293 	dmu_buf_impl_t *db = vdb;
294 	memset(db, 0, sizeof (dmu_buf_impl_t));
295 
296 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
297 	rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
298 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
299 	multilist_link_init(&db->db_cache_link);
300 	zfs_refcount_create(&db->db_holds);
301 
302 	return (0);
303 }
304 
305 static void
306 dbuf_dest(void *vdb, void *unused)
307 {
308 	(void) unused;
309 	dmu_buf_impl_t *db = vdb;
310 	mutex_destroy(&db->db_mtx);
311 	rw_destroy(&db->db_rwlock);
312 	cv_destroy(&db->db_changed);
313 	ASSERT(!multilist_link_active(&db->db_cache_link));
314 	zfs_refcount_destroy(&db->db_holds);
315 }
316 
317 /*
318  * dbuf hash table routines
319  */
320 static dbuf_hash_table_t dbuf_hash_table;
321 
322 /*
323  * We use Cityhash for this. It's fast, and has good hash properties without
324  * requiring any large static buffers.
325  */
326 static uint64_t
327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
328 {
329 	return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
330 }
331 
332 #define	DTRACE_SET_STATE(db, why) \
333 	DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db,	\
334 	    const char *, why)
335 
336 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
337 	((dbuf)->db.db_object == (obj) &&		\
338 	(dbuf)->db_objset == (os) &&			\
339 	(dbuf)->db_level == (level) &&			\
340 	(dbuf)->db_blkid == (blkid))
341 
342 dmu_buf_impl_t *
343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
344     uint64_t *hash_out)
345 {
346 	dbuf_hash_table_t *h = &dbuf_hash_table;
347 	uint64_t hv;
348 	uint64_t idx;
349 	dmu_buf_impl_t *db;
350 
351 	hv = dbuf_hash(os, obj, level, blkid);
352 	idx = hv & h->hash_table_mask;
353 
354 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
355 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
356 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
357 			mutex_enter(&db->db_mtx);
358 			if (db->db_state != DB_EVICTING) {
359 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 				return (db);
361 			}
362 			mutex_exit(&db->db_mtx);
363 		}
364 	}
365 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
366 	if (hash_out != NULL)
367 		*hash_out = hv;
368 	return (NULL);
369 }
370 
371 static dmu_buf_impl_t *
372 dbuf_find_bonus(objset_t *os, uint64_t object)
373 {
374 	dnode_t *dn;
375 	dmu_buf_impl_t *db = NULL;
376 
377 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
378 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
379 		if (dn->dn_bonus != NULL) {
380 			db = dn->dn_bonus;
381 			mutex_enter(&db->db_mtx);
382 		}
383 		rw_exit(&dn->dn_struct_rwlock);
384 		dnode_rele(dn, FTAG);
385 	}
386 	return (db);
387 }
388 
389 /*
390  * Insert an entry into the hash table.  If there is already an element
391  * equal to elem in the hash table, then the already existing element
392  * will be returned and the new element will not be inserted.
393  * Otherwise returns NULL.
394  */
395 static dmu_buf_impl_t *
396 dbuf_hash_insert(dmu_buf_impl_t *db)
397 {
398 	dbuf_hash_table_t *h = &dbuf_hash_table;
399 	objset_t *os = db->db_objset;
400 	uint64_t obj = db->db.db_object;
401 	int level = db->db_level;
402 	uint64_t blkid, idx;
403 	dmu_buf_impl_t *dbf;
404 	uint32_t i;
405 
406 	blkid = db->db_blkid;
407 	ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
408 	idx = db->db_hash & h->hash_table_mask;
409 
410 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
411 	for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
412 	    dbf = dbf->db_hash_next, i++) {
413 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
414 			mutex_enter(&dbf->db_mtx);
415 			if (dbf->db_state != DB_EVICTING) {
416 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
417 				return (dbf);
418 			}
419 			mutex_exit(&dbf->db_mtx);
420 		}
421 	}
422 
423 	if (i > 0) {
424 		DBUF_STAT_BUMP(hash_collisions);
425 		if (i == 1)
426 			DBUF_STAT_BUMP(hash_chains);
427 
428 		DBUF_STAT_MAX(hash_chain_max, i);
429 	}
430 
431 	mutex_enter(&db->db_mtx);
432 	db->db_hash_next = h->hash_table[idx];
433 	h->hash_table[idx] = db;
434 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
435 	uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
436 	DBUF_STAT_MAX(hash_elements_max, he);
437 
438 	return (NULL);
439 }
440 
441 /*
442  * This returns whether this dbuf should be stored in the metadata cache, which
443  * is based on whether it's from one of the dnode types that store data related
444  * to traversing dataset hierarchies.
445  */
446 static boolean_t
447 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
448 {
449 	DB_DNODE_ENTER(db);
450 	dmu_object_type_t type = DB_DNODE(db)->dn_type;
451 	DB_DNODE_EXIT(db);
452 
453 	/* Check if this dbuf is one of the types we care about */
454 	if (DMU_OT_IS_METADATA_CACHED(type)) {
455 		/* If we hit this, then we set something up wrong in dmu_ot */
456 		ASSERT(DMU_OT_IS_METADATA(type));
457 
458 		/*
459 		 * Sanity check for small-memory systems: don't allocate too
460 		 * much memory for this purpose.
461 		 */
462 		if (zfs_refcount_count(
463 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
464 		    dbuf_metadata_cache_target_bytes()) {
465 			DBUF_STAT_BUMP(metadata_cache_overflow);
466 			return (B_FALSE);
467 		}
468 
469 		return (B_TRUE);
470 	}
471 
472 	return (B_FALSE);
473 }
474 
475 /*
476  * Remove an entry from the hash table.  It must be in the EVICTING state.
477  */
478 static void
479 dbuf_hash_remove(dmu_buf_impl_t *db)
480 {
481 	dbuf_hash_table_t *h = &dbuf_hash_table;
482 	uint64_t idx;
483 	dmu_buf_impl_t *dbf, **dbp;
484 
485 	ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
486 	    db->db_blkid), ==, db->db_hash);
487 	idx = db->db_hash & h->hash_table_mask;
488 
489 	/*
490 	 * We mustn't hold db_mtx to maintain lock ordering:
491 	 * DBUF_HASH_MUTEX > db_mtx.
492 	 */
493 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
494 	ASSERT(db->db_state == DB_EVICTING);
495 	ASSERT(!MUTEX_HELD(&db->db_mtx));
496 
497 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
498 	dbp = &h->hash_table[idx];
499 	while ((dbf = *dbp) != db) {
500 		dbp = &dbf->db_hash_next;
501 		ASSERT(dbf != NULL);
502 	}
503 	*dbp = db->db_hash_next;
504 	db->db_hash_next = NULL;
505 	if (h->hash_table[idx] &&
506 	    h->hash_table[idx]->db_hash_next == NULL)
507 		DBUF_STAT_BUMPDOWN(hash_chains);
508 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
509 	atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
510 }
511 
512 typedef enum {
513 	DBVU_EVICTING,
514 	DBVU_NOT_EVICTING
515 } dbvu_verify_type_t;
516 
517 static void
518 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
519 {
520 #ifdef ZFS_DEBUG
521 	int64_t holds;
522 
523 	if (db->db_user == NULL)
524 		return;
525 
526 	/* Only data blocks support the attachment of user data. */
527 	ASSERT(db->db_level == 0);
528 
529 	/* Clients must resolve a dbuf before attaching user data. */
530 	ASSERT(db->db.db_data != NULL);
531 	ASSERT3U(db->db_state, ==, DB_CACHED);
532 
533 	holds = zfs_refcount_count(&db->db_holds);
534 	if (verify_type == DBVU_EVICTING) {
535 		/*
536 		 * Immediate eviction occurs when holds == dirtycnt.
537 		 * For normal eviction buffers, holds is zero on
538 		 * eviction, except when dbuf_fix_old_data() calls
539 		 * dbuf_clear_data().  However, the hold count can grow
540 		 * during eviction even though db_mtx is held (see
541 		 * dmu_bonus_hold() for an example), so we can only
542 		 * test the generic invariant that holds >= dirtycnt.
543 		 */
544 		ASSERT3U(holds, >=, db->db_dirtycnt);
545 	} else {
546 		if (db->db_user_immediate_evict == TRUE)
547 			ASSERT3U(holds, >=, db->db_dirtycnt);
548 		else
549 			ASSERT3U(holds, >, 0);
550 	}
551 #endif
552 }
553 
554 static void
555 dbuf_evict_user(dmu_buf_impl_t *db)
556 {
557 	dmu_buf_user_t *dbu = db->db_user;
558 
559 	ASSERT(MUTEX_HELD(&db->db_mtx));
560 
561 	if (dbu == NULL)
562 		return;
563 
564 	dbuf_verify_user(db, DBVU_EVICTING);
565 	db->db_user = NULL;
566 
567 #ifdef ZFS_DEBUG
568 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
569 		*dbu->dbu_clear_on_evict_dbufp = NULL;
570 #endif
571 
572 	/*
573 	 * There are two eviction callbacks - one that we call synchronously
574 	 * and one that we invoke via a taskq.  The async one is useful for
575 	 * avoiding lock order reversals and limiting stack depth.
576 	 *
577 	 * Note that if we have a sync callback but no async callback,
578 	 * it's likely that the sync callback will free the structure
579 	 * containing the dbu.  In that case we need to take care to not
580 	 * dereference dbu after calling the sync evict func.
581 	 */
582 	boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
583 
584 	if (dbu->dbu_evict_func_sync != NULL)
585 		dbu->dbu_evict_func_sync(dbu);
586 
587 	if (has_async) {
588 		taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
589 		    dbu, 0, &dbu->dbu_tqent);
590 	}
591 }
592 
593 boolean_t
594 dbuf_is_metadata(dmu_buf_impl_t *db)
595 {
596 	/*
597 	 * Consider indirect blocks and spill blocks to be meta data.
598 	 */
599 	if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
600 		return (B_TRUE);
601 	} else {
602 		boolean_t is_metadata;
603 
604 		DB_DNODE_ENTER(db);
605 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
606 		DB_DNODE_EXIT(db);
607 
608 		return (is_metadata);
609 	}
610 }
611 
612 /*
613  * We want to exclude buffers that are on a special allocation class from
614  * L2ARC.
615  */
616 boolean_t
617 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
618 {
619 	if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
620 	    (db->db_objset->os_secondary_cache ==
621 	    ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
622 		if (l2arc_exclude_special == 0)
623 			return (B_TRUE);
624 
625 		blkptr_t *bp = db->db_blkptr;
626 		if (bp == NULL || BP_IS_HOLE(bp))
627 			return (B_FALSE);
628 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
629 		vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
630 		vdev_t *vd = NULL;
631 
632 		if (vdev < rvd->vdev_children)
633 			vd = rvd->vdev_child[vdev];
634 
635 		if (vd == NULL)
636 			return (B_TRUE);
637 
638 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
639 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
640 			return (B_TRUE);
641 	}
642 	return (B_FALSE);
643 }
644 
645 static inline boolean_t
646 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
647 {
648 	if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
649 	    (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
650 	    (level > 0 ||
651 	    DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
652 		if (l2arc_exclude_special == 0)
653 			return (B_TRUE);
654 
655 		if (bp == NULL || BP_IS_HOLE(bp))
656 			return (B_FALSE);
657 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
658 		vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
659 		vdev_t *vd = NULL;
660 
661 		if (vdev < rvd->vdev_children)
662 			vd = rvd->vdev_child[vdev];
663 
664 		if (vd == NULL)
665 			return (B_TRUE);
666 
667 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
668 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
669 			return (B_TRUE);
670 	}
671 	return (B_FALSE);
672 }
673 
674 
675 /*
676  * This function *must* return indices evenly distributed between all
677  * sublists of the multilist. This is needed due to how the dbuf eviction
678  * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
679  * distributed between all sublists and uses this assumption when
680  * deciding which sublist to evict from and how much to evict from it.
681  */
682 static unsigned int
683 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
684 {
685 	dmu_buf_impl_t *db = obj;
686 
687 	/*
688 	 * The assumption here, is the hash value for a given
689 	 * dmu_buf_impl_t will remain constant throughout it's lifetime
690 	 * (i.e. it's objset, object, level and blkid fields don't change).
691 	 * Thus, we don't need to store the dbuf's sublist index
692 	 * on insertion, as this index can be recalculated on removal.
693 	 *
694 	 * Also, the low order bits of the hash value are thought to be
695 	 * distributed evenly. Otherwise, in the case that the multilist
696 	 * has a power of two number of sublists, each sublists' usage
697 	 * would not be evenly distributed. In this context full 64bit
698 	 * division would be a waste of time, so limit it to 32 bits.
699 	 */
700 	return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
701 	    db->db_level, db->db_blkid) %
702 	    multilist_get_num_sublists(ml));
703 }
704 
705 /*
706  * The target size of the dbuf cache can grow with the ARC target,
707  * unless limited by the tunable dbuf_cache_max_bytes.
708  */
709 static inline unsigned long
710 dbuf_cache_target_bytes(void)
711 {
712 	return (MIN(dbuf_cache_max_bytes,
713 	    arc_target_bytes() >> dbuf_cache_shift));
714 }
715 
716 /*
717  * The target size of the dbuf metadata cache can grow with the ARC target,
718  * unless limited by the tunable dbuf_metadata_cache_max_bytes.
719  */
720 static inline unsigned long
721 dbuf_metadata_cache_target_bytes(void)
722 {
723 	return (MIN(dbuf_metadata_cache_max_bytes,
724 	    arc_target_bytes() >> dbuf_metadata_cache_shift));
725 }
726 
727 static inline uint64_t
728 dbuf_cache_hiwater_bytes(void)
729 {
730 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
731 	return (dbuf_cache_target +
732 	    (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
733 }
734 
735 static inline uint64_t
736 dbuf_cache_lowater_bytes(void)
737 {
738 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
739 	return (dbuf_cache_target -
740 	    (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
741 }
742 
743 static inline boolean_t
744 dbuf_cache_above_lowater(void)
745 {
746 	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
747 	    dbuf_cache_lowater_bytes());
748 }
749 
750 /*
751  * Evict the oldest eligible dbuf from the dbuf cache.
752  */
753 static void
754 dbuf_evict_one(void)
755 {
756 	int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
757 	multilist_sublist_t *mls = multilist_sublist_lock(
758 	    &dbuf_caches[DB_DBUF_CACHE].cache, idx);
759 
760 	ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
761 
762 	dmu_buf_impl_t *db = multilist_sublist_tail(mls);
763 	while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
764 		db = multilist_sublist_prev(mls, db);
765 	}
766 
767 	DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
768 	    multilist_sublist_t *, mls);
769 
770 	if (db != NULL) {
771 		multilist_sublist_remove(mls, db);
772 		multilist_sublist_unlock(mls);
773 		(void) zfs_refcount_remove_many(
774 		    &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
775 		DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
776 		DBUF_STAT_BUMPDOWN(cache_count);
777 		DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
778 		    db->db.db_size);
779 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
780 		db->db_caching_status = DB_NO_CACHE;
781 		dbuf_destroy(db);
782 		DBUF_STAT_BUMP(cache_total_evicts);
783 	} else {
784 		multilist_sublist_unlock(mls);
785 	}
786 }
787 
788 /*
789  * The dbuf evict thread is responsible for aging out dbufs from the
790  * cache. Once the cache has reached it's maximum size, dbufs are removed
791  * and destroyed. The eviction thread will continue running until the size
792  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
793  * out of the cache it is destroyed and becomes eligible for arc eviction.
794  */
795 static __attribute__((noreturn)) void
796 dbuf_evict_thread(void *unused)
797 {
798 	(void) unused;
799 	callb_cpr_t cpr;
800 
801 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
802 
803 	mutex_enter(&dbuf_evict_lock);
804 	while (!dbuf_evict_thread_exit) {
805 		while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
806 			CALLB_CPR_SAFE_BEGIN(&cpr);
807 			(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
808 			    &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
809 			CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
810 		}
811 		mutex_exit(&dbuf_evict_lock);
812 
813 		/*
814 		 * Keep evicting as long as we're above the low water mark
815 		 * for the cache. We do this without holding the locks to
816 		 * minimize lock contention.
817 		 */
818 		while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
819 			dbuf_evict_one();
820 		}
821 
822 		mutex_enter(&dbuf_evict_lock);
823 	}
824 
825 	dbuf_evict_thread_exit = B_FALSE;
826 	cv_broadcast(&dbuf_evict_cv);
827 	CALLB_CPR_EXIT(&cpr);	/* drops dbuf_evict_lock */
828 	thread_exit();
829 }
830 
831 /*
832  * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
833  * If the dbuf cache is at its high water mark, then evict a dbuf from the
834  * dbuf cache using the caller's context.
835  */
836 static void
837 dbuf_evict_notify(uint64_t size)
838 {
839 	/*
840 	 * We check if we should evict without holding the dbuf_evict_lock,
841 	 * because it's OK to occasionally make the wrong decision here,
842 	 * and grabbing the lock results in massive lock contention.
843 	 */
844 	if (size > dbuf_cache_target_bytes()) {
845 		if (size > dbuf_cache_hiwater_bytes())
846 			dbuf_evict_one();
847 		cv_signal(&dbuf_evict_cv);
848 	}
849 }
850 
851 static int
852 dbuf_kstat_update(kstat_t *ksp, int rw)
853 {
854 	dbuf_stats_t *ds = ksp->ks_data;
855 	dbuf_hash_table_t *h = &dbuf_hash_table;
856 
857 	if (rw == KSTAT_WRITE)
858 		return (SET_ERROR(EACCES));
859 
860 	ds->cache_count.value.ui64 =
861 	    wmsum_value(&dbuf_sums.cache_count);
862 	ds->cache_size_bytes.value.ui64 =
863 	    zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
864 	ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
865 	ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
866 	ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
867 	ds->cache_total_evicts.value.ui64 =
868 	    wmsum_value(&dbuf_sums.cache_total_evicts);
869 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
870 		ds->cache_levels[i].value.ui64 =
871 		    wmsum_value(&dbuf_sums.cache_levels[i]);
872 		ds->cache_levels_bytes[i].value.ui64 =
873 		    wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
874 	}
875 	ds->hash_hits.value.ui64 =
876 	    wmsum_value(&dbuf_sums.hash_hits);
877 	ds->hash_misses.value.ui64 =
878 	    wmsum_value(&dbuf_sums.hash_misses);
879 	ds->hash_collisions.value.ui64 =
880 	    wmsum_value(&dbuf_sums.hash_collisions);
881 	ds->hash_chains.value.ui64 =
882 	    wmsum_value(&dbuf_sums.hash_chains);
883 	ds->hash_insert_race.value.ui64 =
884 	    wmsum_value(&dbuf_sums.hash_insert_race);
885 	ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
886 	ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
887 	ds->metadata_cache_count.value.ui64 =
888 	    wmsum_value(&dbuf_sums.metadata_cache_count);
889 	ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
890 	    &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
891 	ds->metadata_cache_overflow.value.ui64 =
892 	    wmsum_value(&dbuf_sums.metadata_cache_overflow);
893 	return (0);
894 }
895 
896 void
897 dbuf_init(void)
898 {
899 	uint64_t hmsize, hsize = 1ULL << 16;
900 	dbuf_hash_table_t *h = &dbuf_hash_table;
901 
902 	/*
903 	 * The hash table is big enough to fill one eighth of physical memory
904 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
905 	 * By default, the table will take up
906 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
907 	 */
908 	while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
909 		hsize <<= 1;
910 
911 	h->hash_table = NULL;
912 	while (h->hash_table == NULL) {
913 		h->hash_table_mask = hsize - 1;
914 
915 		h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
916 		if (h->hash_table == NULL)
917 			hsize >>= 1;
918 
919 		ASSERT3U(hsize, >=, 1ULL << 10);
920 	}
921 
922 	/*
923 	 * The hash table buckets are protected by an array of mutexes where
924 	 * each mutex is reponsible for protecting 128 buckets.  A minimum
925 	 * array size of 8192 is targeted to avoid contention.
926 	 */
927 	if (dbuf_mutex_cache_shift == 0)
928 		hmsize = MAX(hsize >> 7, 1ULL << 13);
929 	else
930 		hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
931 
932 	h->hash_mutexes = NULL;
933 	while (h->hash_mutexes == NULL) {
934 		h->hash_mutex_mask = hmsize - 1;
935 
936 		h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
937 		    KM_SLEEP);
938 		if (h->hash_mutexes == NULL)
939 			hmsize >>= 1;
940 	}
941 
942 	dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
943 	    sizeof (dmu_buf_impl_t),
944 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
945 
946 	for (int i = 0; i < hmsize; i++)
947 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
948 
949 	dbuf_stats_init(h);
950 
951 	/*
952 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
953 	 * configuration is not required.
954 	 */
955 	dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
956 
957 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
958 		multilist_create(&dbuf_caches[dcs].cache,
959 		    sizeof (dmu_buf_impl_t),
960 		    offsetof(dmu_buf_impl_t, db_cache_link),
961 		    dbuf_cache_multilist_index_func);
962 		zfs_refcount_create(&dbuf_caches[dcs].size);
963 	}
964 
965 	dbuf_evict_thread_exit = B_FALSE;
966 	mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
967 	cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
968 	dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
969 	    NULL, 0, &p0, TS_RUN, minclsyspri);
970 
971 	wmsum_init(&dbuf_sums.cache_count, 0);
972 	wmsum_init(&dbuf_sums.cache_total_evicts, 0);
973 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
974 		wmsum_init(&dbuf_sums.cache_levels[i], 0);
975 		wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
976 	}
977 	wmsum_init(&dbuf_sums.hash_hits, 0);
978 	wmsum_init(&dbuf_sums.hash_misses, 0);
979 	wmsum_init(&dbuf_sums.hash_collisions, 0);
980 	wmsum_init(&dbuf_sums.hash_chains, 0);
981 	wmsum_init(&dbuf_sums.hash_insert_race, 0);
982 	wmsum_init(&dbuf_sums.metadata_cache_count, 0);
983 	wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
984 
985 	dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
986 	    KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
987 	    KSTAT_FLAG_VIRTUAL);
988 	if (dbuf_ksp != NULL) {
989 		for (int i = 0; i < DN_MAX_LEVELS; i++) {
990 			snprintf(dbuf_stats.cache_levels[i].name,
991 			    KSTAT_STRLEN, "cache_level_%d", i);
992 			dbuf_stats.cache_levels[i].data_type =
993 			    KSTAT_DATA_UINT64;
994 			snprintf(dbuf_stats.cache_levels_bytes[i].name,
995 			    KSTAT_STRLEN, "cache_level_%d_bytes", i);
996 			dbuf_stats.cache_levels_bytes[i].data_type =
997 			    KSTAT_DATA_UINT64;
998 		}
999 		dbuf_ksp->ks_data = &dbuf_stats;
1000 		dbuf_ksp->ks_update = dbuf_kstat_update;
1001 		kstat_install(dbuf_ksp);
1002 	}
1003 }
1004 
1005 void
1006 dbuf_fini(void)
1007 {
1008 	dbuf_hash_table_t *h = &dbuf_hash_table;
1009 
1010 	dbuf_stats_destroy();
1011 
1012 	for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1013 		mutex_destroy(&h->hash_mutexes[i]);
1014 
1015 	vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1016 	vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1017 	    sizeof (kmutex_t));
1018 
1019 	kmem_cache_destroy(dbuf_kmem_cache);
1020 	taskq_destroy(dbu_evict_taskq);
1021 
1022 	mutex_enter(&dbuf_evict_lock);
1023 	dbuf_evict_thread_exit = B_TRUE;
1024 	while (dbuf_evict_thread_exit) {
1025 		cv_signal(&dbuf_evict_cv);
1026 		cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1027 	}
1028 	mutex_exit(&dbuf_evict_lock);
1029 
1030 	mutex_destroy(&dbuf_evict_lock);
1031 	cv_destroy(&dbuf_evict_cv);
1032 
1033 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1034 		zfs_refcount_destroy(&dbuf_caches[dcs].size);
1035 		multilist_destroy(&dbuf_caches[dcs].cache);
1036 	}
1037 
1038 	if (dbuf_ksp != NULL) {
1039 		kstat_delete(dbuf_ksp);
1040 		dbuf_ksp = NULL;
1041 	}
1042 
1043 	wmsum_fini(&dbuf_sums.cache_count);
1044 	wmsum_fini(&dbuf_sums.cache_total_evicts);
1045 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
1046 		wmsum_fini(&dbuf_sums.cache_levels[i]);
1047 		wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1048 	}
1049 	wmsum_fini(&dbuf_sums.hash_hits);
1050 	wmsum_fini(&dbuf_sums.hash_misses);
1051 	wmsum_fini(&dbuf_sums.hash_collisions);
1052 	wmsum_fini(&dbuf_sums.hash_chains);
1053 	wmsum_fini(&dbuf_sums.hash_insert_race);
1054 	wmsum_fini(&dbuf_sums.metadata_cache_count);
1055 	wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1056 }
1057 
1058 /*
1059  * Other stuff.
1060  */
1061 
1062 #ifdef ZFS_DEBUG
1063 static void
1064 dbuf_verify(dmu_buf_impl_t *db)
1065 {
1066 	dnode_t *dn;
1067 	dbuf_dirty_record_t *dr;
1068 	uint32_t txg_prev;
1069 
1070 	ASSERT(MUTEX_HELD(&db->db_mtx));
1071 
1072 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1073 		return;
1074 
1075 	ASSERT(db->db_objset != NULL);
1076 	DB_DNODE_ENTER(db);
1077 	dn = DB_DNODE(db);
1078 	if (dn == NULL) {
1079 		ASSERT(db->db_parent == NULL);
1080 		ASSERT(db->db_blkptr == NULL);
1081 	} else {
1082 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
1083 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
1084 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
1085 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1086 		    db->db_blkid == DMU_SPILL_BLKID ||
1087 		    !avl_is_empty(&dn->dn_dbufs));
1088 	}
1089 	if (db->db_blkid == DMU_BONUS_BLKID) {
1090 		ASSERT(dn != NULL);
1091 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1092 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1093 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
1094 		ASSERT(dn != NULL);
1095 		ASSERT0(db->db.db_offset);
1096 	} else {
1097 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1098 	}
1099 
1100 	if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1101 		ASSERT(dr->dr_dbuf == db);
1102 		txg_prev = dr->dr_txg;
1103 		for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1104 		    dr = list_next(&db->db_dirty_records, dr)) {
1105 			ASSERT(dr->dr_dbuf == db);
1106 			ASSERT(txg_prev > dr->dr_txg);
1107 			txg_prev = dr->dr_txg;
1108 		}
1109 	}
1110 
1111 	/*
1112 	 * We can't assert that db_size matches dn_datablksz because it
1113 	 * can be momentarily different when another thread is doing
1114 	 * dnode_set_blksz().
1115 	 */
1116 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1117 		dr = db->db_data_pending;
1118 		/*
1119 		 * It should only be modified in syncing context, so
1120 		 * make sure we only have one copy of the data.
1121 		 */
1122 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1123 	}
1124 
1125 	/* verify db->db_blkptr */
1126 	if (db->db_blkptr) {
1127 		if (db->db_parent == dn->dn_dbuf) {
1128 			/* db is pointed to by the dnode */
1129 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1130 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1131 				ASSERT(db->db_parent == NULL);
1132 			else
1133 				ASSERT(db->db_parent != NULL);
1134 			if (db->db_blkid != DMU_SPILL_BLKID)
1135 				ASSERT3P(db->db_blkptr, ==,
1136 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
1137 		} else {
1138 			/* db is pointed to by an indirect block */
1139 			int epb __maybe_unused = db->db_parent->db.db_size >>
1140 			    SPA_BLKPTRSHIFT;
1141 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1142 			ASSERT3U(db->db_parent->db.db_object, ==,
1143 			    db->db.db_object);
1144 			/*
1145 			 * dnode_grow_indblksz() can make this fail if we don't
1146 			 * have the parent's rwlock.  XXX indblksz no longer
1147 			 * grows.  safe to do this now?
1148 			 */
1149 			if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1150 				ASSERT3P(db->db_blkptr, ==,
1151 				    ((blkptr_t *)db->db_parent->db.db_data +
1152 				    db->db_blkid % epb));
1153 			}
1154 		}
1155 	}
1156 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1157 	    (db->db_buf == NULL || db->db_buf->b_data) &&
1158 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1159 	    db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1160 		/*
1161 		 * If the blkptr isn't set but they have nonzero data,
1162 		 * it had better be dirty, otherwise we'll lose that
1163 		 * data when we evict this buffer.
1164 		 *
1165 		 * There is an exception to this rule for indirect blocks; in
1166 		 * this case, if the indirect block is a hole, we fill in a few
1167 		 * fields on each of the child blocks (importantly, birth time)
1168 		 * to prevent hole birth times from being lost when you
1169 		 * partially fill in a hole.
1170 		 */
1171 		if (db->db_dirtycnt == 0) {
1172 			if (db->db_level == 0) {
1173 				uint64_t *buf = db->db.db_data;
1174 				int i;
1175 
1176 				for (i = 0; i < db->db.db_size >> 3; i++) {
1177 					ASSERT(buf[i] == 0);
1178 				}
1179 			} else {
1180 				blkptr_t *bps = db->db.db_data;
1181 				ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1182 				    db->db.db_size);
1183 				/*
1184 				 * We want to verify that all the blkptrs in the
1185 				 * indirect block are holes, but we may have
1186 				 * automatically set up a few fields for them.
1187 				 * We iterate through each blkptr and verify
1188 				 * they only have those fields set.
1189 				 */
1190 				for (int i = 0;
1191 				    i < db->db.db_size / sizeof (blkptr_t);
1192 				    i++) {
1193 					blkptr_t *bp = &bps[i];
1194 					ASSERT(ZIO_CHECKSUM_IS_ZERO(
1195 					    &bp->blk_cksum));
1196 					ASSERT(
1197 					    DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1198 					    DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1199 					    DVA_IS_EMPTY(&bp->blk_dva[2]));
1200 					ASSERT0(bp->blk_fill);
1201 					ASSERT0(bp->blk_pad[0]);
1202 					ASSERT0(bp->blk_pad[1]);
1203 					ASSERT(!BP_IS_EMBEDDED(bp));
1204 					ASSERT(BP_IS_HOLE(bp));
1205 					ASSERT0(bp->blk_phys_birth);
1206 				}
1207 			}
1208 		}
1209 	}
1210 	DB_DNODE_EXIT(db);
1211 }
1212 #endif
1213 
1214 static void
1215 dbuf_clear_data(dmu_buf_impl_t *db)
1216 {
1217 	ASSERT(MUTEX_HELD(&db->db_mtx));
1218 	dbuf_evict_user(db);
1219 	ASSERT3P(db->db_buf, ==, NULL);
1220 	db->db.db_data = NULL;
1221 	if (db->db_state != DB_NOFILL) {
1222 		db->db_state = DB_UNCACHED;
1223 		DTRACE_SET_STATE(db, "clear data");
1224 	}
1225 }
1226 
1227 static void
1228 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1229 {
1230 	ASSERT(MUTEX_HELD(&db->db_mtx));
1231 	ASSERT(buf != NULL);
1232 
1233 	db->db_buf = buf;
1234 	ASSERT(buf->b_data != NULL);
1235 	db->db.db_data = buf->b_data;
1236 }
1237 
1238 static arc_buf_t *
1239 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1240 {
1241 	spa_t *spa = db->db_objset->os_spa;
1242 
1243 	return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1244 }
1245 
1246 /*
1247  * Loan out an arc_buf for read.  Return the loaned arc_buf.
1248  */
1249 arc_buf_t *
1250 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1251 {
1252 	arc_buf_t *abuf;
1253 
1254 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1255 	mutex_enter(&db->db_mtx);
1256 	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1257 		int blksz = db->db.db_size;
1258 		spa_t *spa = db->db_objset->os_spa;
1259 
1260 		mutex_exit(&db->db_mtx);
1261 		abuf = arc_loan_buf(spa, B_FALSE, blksz);
1262 		memcpy(abuf->b_data, db->db.db_data, blksz);
1263 	} else {
1264 		abuf = db->db_buf;
1265 		arc_loan_inuse_buf(abuf, db);
1266 		db->db_buf = NULL;
1267 		dbuf_clear_data(db);
1268 		mutex_exit(&db->db_mtx);
1269 	}
1270 	return (abuf);
1271 }
1272 
1273 /*
1274  * Calculate which level n block references the data at the level 0 offset
1275  * provided.
1276  */
1277 uint64_t
1278 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1279 {
1280 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1281 		/*
1282 		 * The level n blkid is equal to the level 0 blkid divided by
1283 		 * the number of level 0s in a level n block.
1284 		 *
1285 		 * The level 0 blkid is offset >> datablkshift =
1286 		 * offset / 2^datablkshift.
1287 		 *
1288 		 * The number of level 0s in a level n is the number of block
1289 		 * pointers in an indirect block, raised to the power of level.
1290 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1291 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1292 		 *
1293 		 * Thus, the level n blkid is: offset /
1294 		 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1295 		 * = offset / 2^(datablkshift + level *
1296 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1297 		 * = offset >> (datablkshift + level *
1298 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1299 		 */
1300 
1301 		const unsigned exp = dn->dn_datablkshift +
1302 		    level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1303 
1304 		if (exp >= 8 * sizeof (offset)) {
1305 			/* This only happens on the highest indirection level */
1306 			ASSERT3U(level, ==, dn->dn_nlevels - 1);
1307 			return (0);
1308 		}
1309 
1310 		ASSERT3U(exp, <, 8 * sizeof (offset));
1311 
1312 		return (offset >> exp);
1313 	} else {
1314 		ASSERT3U(offset, <, dn->dn_datablksz);
1315 		return (0);
1316 	}
1317 }
1318 
1319 /*
1320  * This function is used to lock the parent of the provided dbuf. This should be
1321  * used when modifying or reading db_blkptr.
1322  */
1323 db_lock_type_t
1324 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1325 {
1326 	enum db_lock_type ret = DLT_NONE;
1327 	if (db->db_parent != NULL) {
1328 		rw_enter(&db->db_parent->db_rwlock, rw);
1329 		ret = DLT_PARENT;
1330 	} else if (dmu_objset_ds(db->db_objset) != NULL) {
1331 		rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1332 		    tag);
1333 		ret = DLT_OBJSET;
1334 	}
1335 	/*
1336 	 * We only return a DLT_NONE lock when it's the top-most indirect block
1337 	 * of the meta-dnode of the MOS.
1338 	 */
1339 	return (ret);
1340 }
1341 
1342 /*
1343  * We need to pass the lock type in because it's possible that the block will
1344  * move from being the topmost indirect block in a dnode (and thus, have no
1345  * parent) to not the top-most via an indirection increase. This would cause a
1346  * panic if we didn't pass the lock type in.
1347  */
1348 void
1349 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1350 {
1351 	if (type == DLT_PARENT)
1352 		rw_exit(&db->db_parent->db_rwlock);
1353 	else if (type == DLT_OBJSET)
1354 		rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1355 }
1356 
1357 static void
1358 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1359     arc_buf_t *buf, void *vdb)
1360 {
1361 	(void) zb, (void) bp;
1362 	dmu_buf_impl_t *db = vdb;
1363 
1364 	mutex_enter(&db->db_mtx);
1365 	ASSERT3U(db->db_state, ==, DB_READ);
1366 	/*
1367 	 * All reads are synchronous, so we must have a hold on the dbuf
1368 	 */
1369 	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1370 	ASSERT(db->db_buf == NULL);
1371 	ASSERT(db->db.db_data == NULL);
1372 	if (buf == NULL) {
1373 		/* i/o error */
1374 		ASSERT(zio == NULL || zio->io_error != 0);
1375 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1376 		ASSERT3P(db->db_buf, ==, NULL);
1377 		db->db_state = DB_UNCACHED;
1378 		DTRACE_SET_STATE(db, "i/o error");
1379 	} else if (db->db_level == 0 && db->db_freed_in_flight) {
1380 		/* freed in flight */
1381 		ASSERT(zio == NULL || zio->io_error == 0);
1382 		arc_release(buf, db);
1383 		memset(buf->b_data, 0, db->db.db_size);
1384 		arc_buf_freeze(buf);
1385 		db->db_freed_in_flight = FALSE;
1386 		dbuf_set_data(db, buf);
1387 		db->db_state = DB_CACHED;
1388 		DTRACE_SET_STATE(db, "freed in flight");
1389 	} else {
1390 		/* success */
1391 		ASSERT(zio == NULL || zio->io_error == 0);
1392 		dbuf_set_data(db, buf);
1393 		db->db_state = DB_CACHED;
1394 		DTRACE_SET_STATE(db, "successful read");
1395 	}
1396 	cv_broadcast(&db->db_changed);
1397 	dbuf_rele_and_unlock(db, NULL, B_FALSE);
1398 }
1399 
1400 /*
1401  * Shortcut for performing reads on bonus dbufs.  Returns
1402  * an error if we fail to verify the dnode associated with
1403  * a decrypted block. Otherwise success.
1404  */
1405 static int
1406 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1407 {
1408 	int bonuslen, max_bonuslen, err;
1409 
1410 	err = dbuf_read_verify_dnode_crypt(db, flags);
1411 	if (err)
1412 		return (err);
1413 
1414 	bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1415 	max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1416 	ASSERT(MUTEX_HELD(&db->db_mtx));
1417 	ASSERT(DB_DNODE_HELD(db));
1418 	ASSERT3U(bonuslen, <=, db->db.db_size);
1419 	db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1420 	arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1421 	if (bonuslen < max_bonuslen)
1422 		memset(db->db.db_data, 0, max_bonuslen);
1423 	if (bonuslen)
1424 		memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1425 	db->db_state = DB_CACHED;
1426 	DTRACE_SET_STATE(db, "bonus buffer filled");
1427 	return (0);
1428 }
1429 
1430 static void
1431 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1432 {
1433 	blkptr_t *bps = db->db.db_data;
1434 	uint32_t indbs = 1ULL << dn->dn_indblkshift;
1435 	int n_bps = indbs >> SPA_BLKPTRSHIFT;
1436 
1437 	for (int i = 0; i < n_bps; i++) {
1438 		blkptr_t *bp = &bps[i];
1439 
1440 		ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1441 		BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1442 		    dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1443 		BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1444 		BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1445 		BP_SET_BIRTH(bp, dbbp->blk_birth, 0);
1446 	}
1447 }
1448 
1449 /*
1450  * Handle reads on dbufs that are holes, if necessary.  This function
1451  * requires that the dbuf's mutex is held. Returns success (0) if action
1452  * was taken, ENOENT if no action was taken.
1453  */
1454 static int
1455 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1456 {
1457 	ASSERT(MUTEX_HELD(&db->db_mtx));
1458 
1459 	int is_hole = bp == NULL || BP_IS_HOLE(bp);
1460 	/*
1461 	 * For level 0 blocks only, if the above check fails:
1462 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1463 	 * processes the delete record and clears the bp while we are waiting
1464 	 * for the dn_mtx (resulting in a "no" from block_freed).
1465 	 */
1466 	if (!is_hole && db->db_level == 0)
1467 		is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1468 
1469 	if (is_hole) {
1470 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1471 		memset(db->db.db_data, 0, db->db.db_size);
1472 
1473 		if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1474 		    bp->blk_birth != 0) {
1475 			dbuf_handle_indirect_hole(db, dn, bp);
1476 		}
1477 		db->db_state = DB_CACHED;
1478 		DTRACE_SET_STATE(db, "hole read satisfied");
1479 		return (0);
1480 	}
1481 	return (ENOENT);
1482 }
1483 
1484 /*
1485  * This function ensures that, when doing a decrypting read of a block,
1486  * we make sure we have decrypted the dnode associated with it. We must do
1487  * this so that we ensure we are fully authenticating the checksum-of-MACs
1488  * tree from the root of the objset down to this block. Indirect blocks are
1489  * always verified against their secure checksum-of-MACs assuming that the
1490  * dnode containing them is correct. Now that we are doing a decrypting read,
1491  * we can be sure that the key is loaded and verify that assumption. This is
1492  * especially important considering that we always read encrypted dnode
1493  * blocks as raw data (without verifying their MACs) to start, and
1494  * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1495  */
1496 static int
1497 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1498 {
1499 	int err = 0;
1500 	objset_t *os = db->db_objset;
1501 	arc_buf_t *dnode_abuf;
1502 	dnode_t *dn;
1503 	zbookmark_phys_t zb;
1504 
1505 	ASSERT(MUTEX_HELD(&db->db_mtx));
1506 
1507 	if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1508 	    !os->os_encrypted || os->os_raw_receive)
1509 		return (0);
1510 
1511 	DB_DNODE_ENTER(db);
1512 	dn = DB_DNODE(db);
1513 	dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1514 
1515 	if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1516 		DB_DNODE_EXIT(db);
1517 		return (0);
1518 	}
1519 
1520 	SET_BOOKMARK(&zb, dmu_objset_id(os),
1521 	    DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1522 	err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1523 
1524 	/*
1525 	 * An error code of EACCES tells us that the key is still not
1526 	 * available. This is ok if we are only reading authenticated
1527 	 * (and therefore non-encrypted) blocks.
1528 	 */
1529 	if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1530 	    !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1531 	    (db->db_blkid == DMU_BONUS_BLKID &&
1532 	    !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1533 		err = 0;
1534 
1535 	DB_DNODE_EXIT(db);
1536 
1537 	return (err);
1538 }
1539 
1540 /*
1541  * Drops db_mtx and the parent lock specified by dblt and tag before
1542  * returning.
1543  */
1544 static int
1545 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1546     db_lock_type_t dblt, const void *tag)
1547 {
1548 	dnode_t *dn;
1549 	zbookmark_phys_t zb;
1550 	uint32_t aflags = ARC_FLAG_NOWAIT;
1551 	int err, zio_flags;
1552 	blkptr_t bp, *bpp;
1553 
1554 	DB_DNODE_ENTER(db);
1555 	dn = DB_DNODE(db);
1556 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1557 	ASSERT(MUTEX_HELD(&db->db_mtx));
1558 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1559 	ASSERT(db->db_buf == NULL);
1560 	ASSERT(db->db_parent == NULL ||
1561 	    RW_LOCK_HELD(&db->db_parent->db_rwlock));
1562 
1563 	if (db->db_blkid == DMU_BONUS_BLKID) {
1564 		err = dbuf_read_bonus(db, dn, flags);
1565 		goto early_unlock;
1566 	}
1567 
1568 	if (db->db_state == DB_UNCACHED) {
1569 		if (db->db_blkptr == NULL) {
1570 			bpp = NULL;
1571 		} else {
1572 			bp = *db->db_blkptr;
1573 			bpp = &bp;
1574 		}
1575 	} else {
1576 		struct dirty_leaf *dl;
1577 		dbuf_dirty_record_t *dr;
1578 
1579 		ASSERT3S(db->db_state, ==, DB_NOFILL);
1580 
1581 		dr = list_head(&db->db_dirty_records);
1582 		if (dr == NULL) {
1583 			err = EIO;
1584 			goto early_unlock;
1585 		} else {
1586 			dl = &dr->dt.dl;
1587 			if (!dl->dr_brtwrite) {
1588 				err = EIO;
1589 				goto early_unlock;
1590 			}
1591 			bp = dl->dr_overridden_by;
1592 			bpp = &bp;
1593 		}
1594 	}
1595 
1596 	err = dbuf_read_hole(db, dn, bpp);
1597 	if (err == 0)
1598 		goto early_unlock;
1599 
1600 	ASSERT(bpp != NULL);
1601 
1602 	/*
1603 	 * Any attempt to read a redacted block should result in an error. This
1604 	 * will never happen under normal conditions, but can be useful for
1605 	 * debugging purposes.
1606 	 */
1607 	if (BP_IS_REDACTED(bpp)) {
1608 		ASSERT(dsl_dataset_feature_is_active(
1609 		    db->db_objset->os_dsl_dataset,
1610 		    SPA_FEATURE_REDACTED_DATASETS));
1611 		err = SET_ERROR(EIO);
1612 		goto early_unlock;
1613 	}
1614 
1615 	SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1616 	    db->db.db_object, db->db_level, db->db_blkid);
1617 
1618 	/*
1619 	 * All bps of an encrypted os should have the encryption bit set.
1620 	 * If this is not true it indicates tampering and we report an error.
1621 	 */
1622 	if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
1623 		spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth);
1624 		zfs_panic_recover("unencrypted block in encrypted "
1625 		    "object set %llu", dmu_objset_id(db->db_objset));
1626 		err = SET_ERROR(EIO);
1627 		goto early_unlock;
1628 	}
1629 
1630 	err = dbuf_read_verify_dnode_crypt(db, flags);
1631 	if (err != 0)
1632 		goto early_unlock;
1633 
1634 	DB_DNODE_EXIT(db);
1635 
1636 	db->db_state = DB_READ;
1637 	DTRACE_SET_STATE(db, "read issued");
1638 	mutex_exit(&db->db_mtx);
1639 
1640 	if (!DBUF_IS_CACHEABLE(db))
1641 		aflags |= ARC_FLAG_UNCACHED;
1642 	else if (dbuf_is_l2cacheable(db))
1643 		aflags |= ARC_FLAG_L2CACHE;
1644 
1645 	dbuf_add_ref(db, NULL);
1646 
1647 	zio_flags = (flags & DB_RF_CANFAIL) ?
1648 	    ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1649 
1650 	if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1651 		zio_flags |= ZIO_FLAG_RAW;
1652 	/*
1653 	 * The zio layer will copy the provided blkptr later, but we have our
1654 	 * own copy so that we can release the parent's rwlock. We have to
1655 	 * do that so that if dbuf_read_done is called synchronously (on
1656 	 * an l1 cache hit) we don't acquire the db_mtx while holding the
1657 	 * parent's rwlock, which would be a lock ordering violation.
1658 	 */
1659 	dmu_buf_unlock_parent(db, dblt, tag);
1660 	(void) arc_read(zio, db->db_objset->os_spa, bpp,
1661 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1662 	    &aflags, &zb);
1663 	return (err);
1664 early_unlock:
1665 	DB_DNODE_EXIT(db);
1666 	mutex_exit(&db->db_mtx);
1667 	dmu_buf_unlock_parent(db, dblt, tag);
1668 	return (err);
1669 }
1670 
1671 /*
1672  * This is our just-in-time copy function.  It makes a copy of buffers that
1673  * have been modified in a previous transaction group before we access them in
1674  * the current active group.
1675  *
1676  * This function is used in three places: when we are dirtying a buffer for the
1677  * first time in a txg, when we are freeing a range in a dnode that includes
1678  * this buffer, and when we are accessing a buffer which was received compressed
1679  * and later referenced in a WRITE_BYREF record.
1680  *
1681  * Note that when we are called from dbuf_free_range() we do not put a hold on
1682  * the buffer, we just traverse the active dbuf list for the dnode.
1683  */
1684 static void
1685 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1686 {
1687 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1688 
1689 	ASSERT(MUTEX_HELD(&db->db_mtx));
1690 	ASSERT(db->db.db_data != NULL);
1691 	ASSERT(db->db_level == 0);
1692 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1693 
1694 	if (dr == NULL ||
1695 	    (dr->dt.dl.dr_data !=
1696 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1697 		return;
1698 
1699 	/*
1700 	 * If the last dirty record for this dbuf has not yet synced
1701 	 * and its referencing the dbuf data, either:
1702 	 *	reset the reference to point to a new copy,
1703 	 * or (if there a no active holders)
1704 	 *	just null out the current db_data pointer.
1705 	 */
1706 	ASSERT3U(dr->dr_txg, >=, txg - 2);
1707 	if (db->db_blkid == DMU_BONUS_BLKID) {
1708 		dnode_t *dn = DB_DNODE(db);
1709 		int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1710 		dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1711 		arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1712 		memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1713 	} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1714 		dnode_t *dn = DB_DNODE(db);
1715 		int size = arc_buf_size(db->db_buf);
1716 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1717 		spa_t *spa = db->db_objset->os_spa;
1718 		enum zio_compress compress_type =
1719 		    arc_get_compression(db->db_buf);
1720 		uint8_t complevel = arc_get_complevel(db->db_buf);
1721 
1722 		if (arc_is_encrypted(db->db_buf)) {
1723 			boolean_t byteorder;
1724 			uint8_t salt[ZIO_DATA_SALT_LEN];
1725 			uint8_t iv[ZIO_DATA_IV_LEN];
1726 			uint8_t mac[ZIO_DATA_MAC_LEN];
1727 
1728 			arc_get_raw_params(db->db_buf, &byteorder, salt,
1729 			    iv, mac);
1730 			dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1731 			    dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1732 			    mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1733 			    compress_type, complevel);
1734 		} else if (compress_type != ZIO_COMPRESS_OFF) {
1735 			ASSERT3U(type, ==, ARC_BUFC_DATA);
1736 			dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1737 			    size, arc_buf_lsize(db->db_buf), compress_type,
1738 			    complevel);
1739 		} else {
1740 			dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1741 		}
1742 		memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1743 	} else {
1744 		db->db_buf = NULL;
1745 		dbuf_clear_data(db);
1746 	}
1747 }
1748 
1749 int
1750 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1751 {
1752 	int err = 0;
1753 	boolean_t prefetch;
1754 	dnode_t *dn;
1755 
1756 	/*
1757 	 * We don't have to hold the mutex to check db_state because it
1758 	 * can't be freed while we have a hold on the buffer.
1759 	 */
1760 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1761 
1762 	DB_DNODE_ENTER(db);
1763 	dn = DB_DNODE(db);
1764 
1765 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1766 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
1767 
1768 	mutex_enter(&db->db_mtx);
1769 	if (flags & DB_RF_PARTIAL_FIRST)
1770 		db->db_partial_read = B_TRUE;
1771 	else if (!(flags & DB_RF_PARTIAL_MORE))
1772 		db->db_partial_read = B_FALSE;
1773 	if (db->db_state == DB_CACHED) {
1774 		/*
1775 		 * Ensure that this block's dnode has been decrypted if
1776 		 * the caller has requested decrypted data.
1777 		 */
1778 		err = dbuf_read_verify_dnode_crypt(db, flags);
1779 
1780 		/*
1781 		 * If the arc buf is compressed or encrypted and the caller
1782 		 * requested uncompressed data, we need to untransform it
1783 		 * before returning. We also call arc_untransform() on any
1784 		 * unauthenticated blocks, which will verify their MAC if
1785 		 * the key is now available.
1786 		 */
1787 		if (err == 0 && db->db_buf != NULL &&
1788 		    (flags & DB_RF_NO_DECRYPT) == 0 &&
1789 		    (arc_is_encrypted(db->db_buf) ||
1790 		    arc_is_unauthenticated(db->db_buf) ||
1791 		    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1792 			spa_t *spa = dn->dn_objset->os_spa;
1793 			zbookmark_phys_t zb;
1794 
1795 			SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1796 			    db->db.db_object, db->db_level, db->db_blkid);
1797 			dbuf_fix_old_data(db, spa_syncing_txg(spa));
1798 			err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1799 			dbuf_set_data(db, db->db_buf);
1800 		}
1801 		mutex_exit(&db->db_mtx);
1802 		if (err == 0 && prefetch) {
1803 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1804 			    B_FALSE, flags & DB_RF_HAVESTRUCT);
1805 		}
1806 		DB_DNODE_EXIT(db);
1807 		DBUF_STAT_BUMP(hash_hits);
1808 	} else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
1809 		boolean_t need_wait = B_FALSE;
1810 
1811 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1812 
1813 		if (zio == NULL && (db->db_state == DB_NOFILL ||
1814 		    (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
1815 			spa_t *spa = dn->dn_objset->os_spa;
1816 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1817 			need_wait = B_TRUE;
1818 		}
1819 		err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1820 		/*
1821 		 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1822 		 * for us
1823 		 */
1824 		if (!err && prefetch) {
1825 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1826 			    db->db_state != DB_CACHED,
1827 			    flags & DB_RF_HAVESTRUCT);
1828 		}
1829 
1830 		DB_DNODE_EXIT(db);
1831 		DBUF_STAT_BUMP(hash_misses);
1832 
1833 		/*
1834 		 * If we created a zio_root we must execute it to avoid
1835 		 * leaking it, even if it isn't attached to any work due
1836 		 * to an error in dbuf_read_impl().
1837 		 */
1838 		if (need_wait) {
1839 			if (err == 0)
1840 				err = zio_wait(zio);
1841 			else
1842 				VERIFY0(zio_wait(zio));
1843 		}
1844 	} else {
1845 		/*
1846 		 * Another reader came in while the dbuf was in flight
1847 		 * between UNCACHED and CACHED.  Either a writer will finish
1848 		 * writing the buffer (sending the dbuf to CACHED) or the
1849 		 * first reader's request will reach the read_done callback
1850 		 * and send the dbuf to CACHED.  Otherwise, a failure
1851 		 * occurred and the dbuf went to UNCACHED.
1852 		 */
1853 		mutex_exit(&db->db_mtx);
1854 		if (prefetch) {
1855 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1856 			    B_TRUE, flags & DB_RF_HAVESTRUCT);
1857 		}
1858 		DB_DNODE_EXIT(db);
1859 		DBUF_STAT_BUMP(hash_misses);
1860 
1861 		/* Skip the wait per the caller's request. */
1862 		if ((flags & DB_RF_NEVERWAIT) == 0) {
1863 			mutex_enter(&db->db_mtx);
1864 			while (db->db_state == DB_READ ||
1865 			    db->db_state == DB_FILL) {
1866 				ASSERT(db->db_state == DB_READ ||
1867 				    (flags & DB_RF_HAVESTRUCT) == 0);
1868 				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1869 				    db, zio_t *, zio);
1870 				cv_wait(&db->db_changed, &db->db_mtx);
1871 			}
1872 			if (db->db_state == DB_UNCACHED)
1873 				err = SET_ERROR(EIO);
1874 			mutex_exit(&db->db_mtx);
1875 		}
1876 	}
1877 
1878 	return (err);
1879 }
1880 
1881 static void
1882 dbuf_noread(dmu_buf_impl_t *db)
1883 {
1884 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1885 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1886 	mutex_enter(&db->db_mtx);
1887 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1888 		cv_wait(&db->db_changed, &db->db_mtx);
1889 	if (db->db_state == DB_UNCACHED) {
1890 		ASSERT(db->db_buf == NULL);
1891 		ASSERT(db->db.db_data == NULL);
1892 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1893 		db->db_state = DB_FILL;
1894 		DTRACE_SET_STATE(db, "assigning filled buffer");
1895 	} else if (db->db_state == DB_NOFILL) {
1896 		dbuf_clear_data(db);
1897 	} else {
1898 		ASSERT3U(db->db_state, ==, DB_CACHED);
1899 	}
1900 	mutex_exit(&db->db_mtx);
1901 }
1902 
1903 void
1904 dbuf_unoverride(dbuf_dirty_record_t *dr)
1905 {
1906 	dmu_buf_impl_t *db = dr->dr_dbuf;
1907 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1908 	uint64_t txg = dr->dr_txg;
1909 
1910 	ASSERT(MUTEX_HELD(&db->db_mtx));
1911 	/*
1912 	 * This assert is valid because dmu_sync() expects to be called by
1913 	 * a zilog's get_data while holding a range lock.  This call only
1914 	 * comes from dbuf_dirty() callers who must also hold a range lock.
1915 	 */
1916 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1917 	ASSERT(db->db_level == 0);
1918 
1919 	if (db->db_blkid == DMU_BONUS_BLKID ||
1920 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1921 		return;
1922 
1923 	ASSERT(db->db_data_pending != dr);
1924 
1925 	/* free this block */
1926 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1927 		zio_free(db->db_objset->os_spa, txg, bp);
1928 
1929 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1930 	dr->dt.dl.dr_nopwrite = B_FALSE;
1931 	dr->dt.dl.dr_has_raw_params = B_FALSE;
1932 
1933 	/*
1934 	 * Release the already-written buffer, so we leave it in
1935 	 * a consistent dirty state.  Note that all callers are
1936 	 * modifying the buffer, so they will immediately do
1937 	 * another (redundant) arc_release().  Therefore, leave
1938 	 * the buf thawed to save the effort of freezing &
1939 	 * immediately re-thawing it.
1940 	 */
1941 	if (!dr->dt.dl.dr_brtwrite)
1942 		arc_release(dr->dt.dl.dr_data, db);
1943 }
1944 
1945 /*
1946  * Evict (if its unreferenced) or clear (if its referenced) any level-0
1947  * data blocks in the free range, so that any future readers will find
1948  * empty blocks.
1949  */
1950 void
1951 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1952     dmu_tx_t *tx)
1953 {
1954 	dmu_buf_impl_t *db_search;
1955 	dmu_buf_impl_t *db, *db_next;
1956 	uint64_t txg = tx->tx_txg;
1957 	avl_index_t where;
1958 	dbuf_dirty_record_t *dr;
1959 
1960 	if (end_blkid > dn->dn_maxblkid &&
1961 	    !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1962 		end_blkid = dn->dn_maxblkid;
1963 	dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1964 	    (u_longlong_t)end_blkid);
1965 
1966 	db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1967 	db_search->db_level = 0;
1968 	db_search->db_blkid = start_blkid;
1969 	db_search->db_state = DB_SEARCH;
1970 
1971 	mutex_enter(&dn->dn_dbufs_mtx);
1972 	db = avl_find(&dn->dn_dbufs, db_search, &where);
1973 	ASSERT3P(db, ==, NULL);
1974 
1975 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1976 
1977 	for (; db != NULL; db = db_next) {
1978 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
1979 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1980 
1981 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
1982 			break;
1983 		}
1984 		ASSERT3U(db->db_blkid, >=, start_blkid);
1985 
1986 		/* found a level 0 buffer in the range */
1987 		mutex_enter(&db->db_mtx);
1988 		if (dbuf_undirty(db, tx)) {
1989 			/* mutex has been dropped and dbuf destroyed */
1990 			continue;
1991 		}
1992 
1993 		if (db->db_state == DB_UNCACHED ||
1994 		    db->db_state == DB_NOFILL ||
1995 		    db->db_state == DB_EVICTING) {
1996 			ASSERT(db->db.db_data == NULL);
1997 			mutex_exit(&db->db_mtx);
1998 			continue;
1999 		}
2000 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2001 			/* will be handled in dbuf_read_done or dbuf_rele */
2002 			db->db_freed_in_flight = TRUE;
2003 			mutex_exit(&db->db_mtx);
2004 			continue;
2005 		}
2006 		if (zfs_refcount_count(&db->db_holds) == 0) {
2007 			ASSERT(db->db_buf);
2008 			dbuf_destroy(db);
2009 			continue;
2010 		}
2011 		/* The dbuf is referenced */
2012 
2013 		dr = list_head(&db->db_dirty_records);
2014 		if (dr != NULL) {
2015 			if (dr->dr_txg == txg) {
2016 				/*
2017 				 * This buffer is "in-use", re-adjust the file
2018 				 * size to reflect that this buffer may
2019 				 * contain new data when we sync.
2020 				 */
2021 				if (db->db_blkid != DMU_SPILL_BLKID &&
2022 				    db->db_blkid > dn->dn_maxblkid)
2023 					dn->dn_maxblkid = db->db_blkid;
2024 				dbuf_unoverride(dr);
2025 				if (dr->dt.dl.dr_brtwrite) {
2026 					ASSERT(db->db.db_data == NULL);
2027 					mutex_exit(&db->db_mtx);
2028 					continue;
2029 				}
2030 			} else {
2031 				/*
2032 				 * This dbuf is not dirty in the open context.
2033 				 * Either uncache it (if its not referenced in
2034 				 * the open context) or reset its contents to
2035 				 * empty.
2036 				 */
2037 				dbuf_fix_old_data(db, txg);
2038 			}
2039 		}
2040 		/* clear the contents if its cached */
2041 		if (db->db_state == DB_CACHED) {
2042 			ASSERT(db->db.db_data != NULL);
2043 			arc_release(db->db_buf, db);
2044 			rw_enter(&db->db_rwlock, RW_WRITER);
2045 			memset(db->db.db_data, 0, db->db.db_size);
2046 			rw_exit(&db->db_rwlock);
2047 			arc_buf_freeze(db->db_buf);
2048 		}
2049 
2050 		mutex_exit(&db->db_mtx);
2051 	}
2052 
2053 	mutex_exit(&dn->dn_dbufs_mtx);
2054 	kmem_free(db_search, sizeof (dmu_buf_impl_t));
2055 }
2056 
2057 void
2058 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2059 {
2060 	arc_buf_t *buf, *old_buf;
2061 	dbuf_dirty_record_t *dr;
2062 	int osize = db->db.db_size;
2063 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2064 	dnode_t *dn;
2065 
2066 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2067 
2068 	DB_DNODE_ENTER(db);
2069 	dn = DB_DNODE(db);
2070 
2071 	/*
2072 	 * XXX we should be doing a dbuf_read, checking the return
2073 	 * value and returning that up to our callers
2074 	 */
2075 	dmu_buf_will_dirty(&db->db, tx);
2076 
2077 	/* create the data buffer for the new block */
2078 	buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2079 
2080 	/* copy old block data to the new block */
2081 	old_buf = db->db_buf;
2082 	memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2083 	/* zero the remainder */
2084 	if (size > osize)
2085 		memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2086 
2087 	mutex_enter(&db->db_mtx);
2088 	dbuf_set_data(db, buf);
2089 	arc_buf_destroy(old_buf, db);
2090 	db->db.db_size = size;
2091 
2092 	dr = list_head(&db->db_dirty_records);
2093 	/* dirty record added by dmu_buf_will_dirty() */
2094 	VERIFY(dr != NULL);
2095 	if (db->db_level == 0)
2096 		dr->dt.dl.dr_data = buf;
2097 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2098 	ASSERT3U(dr->dr_accounted, ==, osize);
2099 	dr->dr_accounted = size;
2100 	mutex_exit(&db->db_mtx);
2101 
2102 	dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2103 	DB_DNODE_EXIT(db);
2104 }
2105 
2106 void
2107 dbuf_release_bp(dmu_buf_impl_t *db)
2108 {
2109 	objset_t *os __maybe_unused = db->db_objset;
2110 
2111 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2112 	ASSERT(arc_released(os->os_phys_buf) ||
2113 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
2114 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2115 
2116 	(void) arc_release(db->db_buf, db);
2117 }
2118 
2119 /*
2120  * We already have a dirty record for this TXG, and we are being
2121  * dirtied again.
2122  */
2123 static void
2124 dbuf_redirty(dbuf_dirty_record_t *dr)
2125 {
2126 	dmu_buf_impl_t *db = dr->dr_dbuf;
2127 
2128 	ASSERT(MUTEX_HELD(&db->db_mtx));
2129 
2130 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2131 		/*
2132 		 * If this buffer has already been written out,
2133 		 * we now need to reset its state.
2134 		 */
2135 		dbuf_unoverride(dr);
2136 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2137 		    db->db_state != DB_NOFILL) {
2138 			/* Already released on initial dirty, so just thaw. */
2139 			ASSERT(arc_released(db->db_buf));
2140 			arc_buf_thaw(db->db_buf);
2141 		}
2142 	}
2143 }
2144 
2145 dbuf_dirty_record_t *
2146 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2147 {
2148 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2149 	IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2150 	dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2151 	ASSERT(dn->dn_maxblkid >= blkid);
2152 
2153 	dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2154 	list_link_init(&dr->dr_dirty_node);
2155 	list_link_init(&dr->dr_dbuf_node);
2156 	dr->dr_dnode = dn;
2157 	dr->dr_txg = tx->tx_txg;
2158 	dr->dt.dll.dr_blkid = blkid;
2159 	dr->dr_accounted = dn->dn_datablksz;
2160 
2161 	/*
2162 	 * There should not be any dbuf for the block that we're dirtying.
2163 	 * Otherwise the buffer contents could be inconsistent between the
2164 	 * dbuf and the lightweight dirty record.
2165 	 */
2166 	ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2167 	    NULL));
2168 
2169 	mutex_enter(&dn->dn_mtx);
2170 	int txgoff = tx->tx_txg & TXG_MASK;
2171 	if (dn->dn_free_ranges[txgoff] != NULL) {
2172 		range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2173 	}
2174 
2175 	if (dn->dn_nlevels == 1) {
2176 		ASSERT3U(blkid, <, dn->dn_nblkptr);
2177 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2178 		mutex_exit(&dn->dn_mtx);
2179 		rw_exit(&dn->dn_struct_rwlock);
2180 		dnode_setdirty(dn, tx);
2181 	} else {
2182 		mutex_exit(&dn->dn_mtx);
2183 
2184 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2185 		dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2186 		    1, blkid >> epbs, FTAG);
2187 		rw_exit(&dn->dn_struct_rwlock);
2188 		if (parent_db == NULL) {
2189 			kmem_free(dr, sizeof (*dr));
2190 			return (NULL);
2191 		}
2192 		int err = dbuf_read(parent_db, NULL,
2193 		    (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2194 		if (err != 0) {
2195 			dbuf_rele(parent_db, FTAG);
2196 			kmem_free(dr, sizeof (*dr));
2197 			return (NULL);
2198 		}
2199 
2200 		dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2201 		dbuf_rele(parent_db, FTAG);
2202 		mutex_enter(&parent_dr->dt.di.dr_mtx);
2203 		ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2204 		list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2205 		mutex_exit(&parent_dr->dt.di.dr_mtx);
2206 		dr->dr_parent = parent_dr;
2207 	}
2208 
2209 	dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2210 
2211 	return (dr);
2212 }
2213 
2214 dbuf_dirty_record_t *
2215 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2216 {
2217 	dnode_t *dn;
2218 	objset_t *os;
2219 	dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2220 	int txgoff = tx->tx_txg & TXG_MASK;
2221 	boolean_t drop_struct_rwlock = B_FALSE;
2222 
2223 	ASSERT(tx->tx_txg != 0);
2224 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2225 	DMU_TX_DIRTY_BUF(tx, db);
2226 
2227 	DB_DNODE_ENTER(db);
2228 	dn = DB_DNODE(db);
2229 	/*
2230 	 * Shouldn't dirty a regular buffer in syncing context.  Private
2231 	 * objects may be dirtied in syncing context, but only if they
2232 	 * were already pre-dirtied in open context.
2233 	 */
2234 #ifdef ZFS_DEBUG
2235 	if (dn->dn_objset->os_dsl_dataset != NULL) {
2236 		rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2237 		    RW_READER, FTAG);
2238 	}
2239 	ASSERT(!dmu_tx_is_syncing(tx) ||
2240 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2241 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2242 	    dn->dn_objset->os_dsl_dataset == NULL);
2243 	if (dn->dn_objset->os_dsl_dataset != NULL)
2244 		rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2245 #endif
2246 	/*
2247 	 * We make this assert for private objects as well, but after we
2248 	 * check if we're already dirty.  They are allowed to re-dirty
2249 	 * in syncing context.
2250 	 */
2251 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2252 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2253 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2254 
2255 	mutex_enter(&db->db_mtx);
2256 	/*
2257 	 * XXX make this true for indirects too?  The problem is that
2258 	 * transactions created with dmu_tx_create_assigned() from
2259 	 * syncing context don't bother holding ahead.
2260 	 */
2261 	ASSERT(db->db_level != 0 ||
2262 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2263 	    db->db_state == DB_NOFILL);
2264 
2265 	mutex_enter(&dn->dn_mtx);
2266 	dnode_set_dirtyctx(dn, tx, db);
2267 	if (tx->tx_txg > dn->dn_dirty_txg)
2268 		dn->dn_dirty_txg = tx->tx_txg;
2269 	mutex_exit(&dn->dn_mtx);
2270 
2271 	if (db->db_blkid == DMU_SPILL_BLKID)
2272 		dn->dn_have_spill = B_TRUE;
2273 
2274 	/*
2275 	 * If this buffer is already dirty, we're done.
2276 	 */
2277 	dr_head = list_head(&db->db_dirty_records);
2278 	ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2279 	    db->db.db_object == DMU_META_DNODE_OBJECT);
2280 	dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2281 	if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2282 		DB_DNODE_EXIT(db);
2283 
2284 		dbuf_redirty(dr_next);
2285 		mutex_exit(&db->db_mtx);
2286 		return (dr_next);
2287 	}
2288 
2289 	/*
2290 	 * Only valid if not already dirty.
2291 	 */
2292 	ASSERT(dn->dn_object == 0 ||
2293 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2294 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2295 
2296 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
2297 
2298 	/*
2299 	 * We should only be dirtying in syncing context if it's the
2300 	 * mos or we're initializing the os or it's a special object.
2301 	 * However, we are allowed to dirty in syncing context provided
2302 	 * we already dirtied it in open context.  Hence we must make
2303 	 * this assertion only if we're not already dirty.
2304 	 */
2305 	os = dn->dn_objset;
2306 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2307 #ifdef ZFS_DEBUG
2308 	if (dn->dn_objset->os_dsl_dataset != NULL)
2309 		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2310 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2311 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2312 	if (dn->dn_objset->os_dsl_dataset != NULL)
2313 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2314 #endif
2315 	ASSERT(db->db.db_size != 0);
2316 
2317 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2318 
2319 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2320 		dmu_objset_willuse_space(os, db->db.db_size, tx);
2321 	}
2322 
2323 	/*
2324 	 * If this buffer is dirty in an old transaction group we need
2325 	 * to make a copy of it so that the changes we make in this
2326 	 * transaction group won't leak out when we sync the older txg.
2327 	 */
2328 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2329 	list_link_init(&dr->dr_dirty_node);
2330 	list_link_init(&dr->dr_dbuf_node);
2331 	dr->dr_dnode = dn;
2332 	if (db->db_level == 0) {
2333 		void *data_old = db->db_buf;
2334 
2335 		if (db->db_state != DB_NOFILL) {
2336 			if (db->db_blkid == DMU_BONUS_BLKID) {
2337 				dbuf_fix_old_data(db, tx->tx_txg);
2338 				data_old = db->db.db_data;
2339 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2340 				/*
2341 				 * Release the data buffer from the cache so
2342 				 * that we can modify it without impacting
2343 				 * possible other users of this cached data
2344 				 * block.  Note that indirect blocks and
2345 				 * private objects are not released until the
2346 				 * syncing state (since they are only modified
2347 				 * then).
2348 				 */
2349 				arc_release(db->db_buf, db);
2350 				dbuf_fix_old_data(db, tx->tx_txg);
2351 				data_old = db->db_buf;
2352 			}
2353 			ASSERT(data_old != NULL);
2354 		}
2355 		dr->dt.dl.dr_data = data_old;
2356 	} else {
2357 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2358 		list_create(&dr->dt.di.dr_children,
2359 		    sizeof (dbuf_dirty_record_t),
2360 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
2361 	}
2362 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2363 		dr->dr_accounted = db->db.db_size;
2364 	}
2365 	dr->dr_dbuf = db;
2366 	dr->dr_txg = tx->tx_txg;
2367 	list_insert_before(&db->db_dirty_records, dr_next, dr);
2368 
2369 	/*
2370 	 * We could have been freed_in_flight between the dbuf_noread
2371 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
2372 	 * happened after the free.
2373 	 */
2374 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2375 	    db->db_blkid != DMU_SPILL_BLKID) {
2376 		mutex_enter(&dn->dn_mtx);
2377 		if (dn->dn_free_ranges[txgoff] != NULL) {
2378 			range_tree_clear(dn->dn_free_ranges[txgoff],
2379 			    db->db_blkid, 1);
2380 		}
2381 		mutex_exit(&dn->dn_mtx);
2382 		db->db_freed_in_flight = FALSE;
2383 	}
2384 
2385 	/*
2386 	 * This buffer is now part of this txg
2387 	 */
2388 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2389 	db->db_dirtycnt += 1;
2390 	ASSERT3U(db->db_dirtycnt, <=, 3);
2391 
2392 	mutex_exit(&db->db_mtx);
2393 
2394 	if (db->db_blkid == DMU_BONUS_BLKID ||
2395 	    db->db_blkid == DMU_SPILL_BLKID) {
2396 		mutex_enter(&dn->dn_mtx);
2397 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2398 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2399 		mutex_exit(&dn->dn_mtx);
2400 		dnode_setdirty(dn, tx);
2401 		DB_DNODE_EXIT(db);
2402 		return (dr);
2403 	}
2404 
2405 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2406 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2407 		drop_struct_rwlock = B_TRUE;
2408 	}
2409 
2410 	/*
2411 	 * If we are overwriting a dedup BP, then unless it is snapshotted,
2412 	 * when we get to syncing context we will need to decrement its
2413 	 * refcount in the DDT.  Prefetch the relevant DDT block so that
2414 	 * syncing context won't have to wait for the i/o.
2415 	 */
2416 	if (db->db_blkptr != NULL) {
2417 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2418 		ddt_prefetch(os->os_spa, db->db_blkptr);
2419 		dmu_buf_unlock_parent(db, dblt, FTAG);
2420 	}
2421 
2422 	/*
2423 	 * We need to hold the dn_struct_rwlock to make this assertion,
2424 	 * because it protects dn_phys / dn_next_nlevels from changing.
2425 	 */
2426 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2427 	    dn->dn_phys->dn_nlevels > db->db_level ||
2428 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
2429 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2430 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2431 
2432 
2433 	if (db->db_level == 0) {
2434 		ASSERT(!db->db_objset->os_raw_receive ||
2435 		    dn->dn_maxblkid >= db->db_blkid);
2436 		dnode_new_blkid(dn, db->db_blkid, tx,
2437 		    drop_struct_rwlock, B_FALSE);
2438 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
2439 	}
2440 
2441 	if (db->db_level+1 < dn->dn_nlevels) {
2442 		dmu_buf_impl_t *parent = db->db_parent;
2443 		dbuf_dirty_record_t *di;
2444 		int parent_held = FALSE;
2445 
2446 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2447 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2448 			parent = dbuf_hold_level(dn, db->db_level + 1,
2449 			    db->db_blkid >> epbs, FTAG);
2450 			ASSERT(parent != NULL);
2451 			parent_held = TRUE;
2452 		}
2453 		if (drop_struct_rwlock)
2454 			rw_exit(&dn->dn_struct_rwlock);
2455 		ASSERT3U(db->db_level + 1, ==, parent->db_level);
2456 		di = dbuf_dirty(parent, tx);
2457 		if (parent_held)
2458 			dbuf_rele(parent, FTAG);
2459 
2460 		mutex_enter(&db->db_mtx);
2461 		/*
2462 		 * Since we've dropped the mutex, it's possible that
2463 		 * dbuf_undirty() might have changed this out from under us.
2464 		 */
2465 		if (list_head(&db->db_dirty_records) == dr ||
2466 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
2467 			mutex_enter(&di->dt.di.dr_mtx);
2468 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2469 			ASSERT(!list_link_active(&dr->dr_dirty_node));
2470 			list_insert_tail(&di->dt.di.dr_children, dr);
2471 			mutex_exit(&di->dt.di.dr_mtx);
2472 			dr->dr_parent = di;
2473 		}
2474 		mutex_exit(&db->db_mtx);
2475 	} else {
2476 		ASSERT(db->db_level + 1 == dn->dn_nlevels);
2477 		ASSERT(db->db_blkid < dn->dn_nblkptr);
2478 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2479 		mutex_enter(&dn->dn_mtx);
2480 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2481 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2482 		mutex_exit(&dn->dn_mtx);
2483 		if (drop_struct_rwlock)
2484 			rw_exit(&dn->dn_struct_rwlock);
2485 	}
2486 
2487 	dnode_setdirty(dn, tx);
2488 	DB_DNODE_EXIT(db);
2489 	return (dr);
2490 }
2491 
2492 static void
2493 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2494 {
2495 	dmu_buf_impl_t *db = dr->dr_dbuf;
2496 
2497 	if (dr->dt.dl.dr_data != db->db.db_data) {
2498 		struct dnode *dn = dr->dr_dnode;
2499 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2500 
2501 		kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2502 		arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2503 	}
2504 	db->db_data_pending = NULL;
2505 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2506 	list_remove(&db->db_dirty_records, dr);
2507 	if (dr->dr_dbuf->db_level != 0) {
2508 		mutex_destroy(&dr->dt.di.dr_mtx);
2509 		list_destroy(&dr->dt.di.dr_children);
2510 	}
2511 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2512 	ASSERT3U(db->db_dirtycnt, >, 0);
2513 	db->db_dirtycnt -= 1;
2514 }
2515 
2516 /*
2517  * Undirty a buffer in the transaction group referenced by the given
2518  * transaction.  Return whether this evicted the dbuf.
2519  */
2520 boolean_t
2521 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2522 {
2523 	uint64_t txg = tx->tx_txg;
2524 	boolean_t brtwrite;
2525 
2526 	ASSERT(txg != 0);
2527 
2528 	/*
2529 	 * Due to our use of dn_nlevels below, this can only be called
2530 	 * in open context, unless we are operating on the MOS.
2531 	 * From syncing context, dn_nlevels may be different from the
2532 	 * dn_nlevels used when dbuf was dirtied.
2533 	 */
2534 	ASSERT(db->db_objset ==
2535 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2536 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2537 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2538 	ASSERT0(db->db_level);
2539 	ASSERT(MUTEX_HELD(&db->db_mtx));
2540 
2541 	/*
2542 	 * If this buffer is not dirty, we're done.
2543 	 */
2544 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2545 	if (dr == NULL)
2546 		return (B_FALSE);
2547 	ASSERT(dr->dr_dbuf == db);
2548 
2549 	brtwrite = dr->dt.dl.dr_brtwrite;
2550 	if (brtwrite) {
2551 		/*
2552 		 * We are freeing a block that we cloned in the same
2553 		 * transaction group.
2554 		 */
2555 		brt_pending_remove(dmu_objset_spa(db->db_objset),
2556 		    &dr->dt.dl.dr_overridden_by, tx);
2557 	}
2558 
2559 	dnode_t *dn = dr->dr_dnode;
2560 
2561 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2562 
2563 	ASSERT(db->db.db_size != 0);
2564 
2565 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2566 	    dr->dr_accounted, txg);
2567 
2568 	list_remove(&db->db_dirty_records, dr);
2569 
2570 	/*
2571 	 * Note that there are three places in dbuf_dirty()
2572 	 * where this dirty record may be put on a list.
2573 	 * Make sure to do a list_remove corresponding to
2574 	 * every one of those list_insert calls.
2575 	 */
2576 	if (dr->dr_parent) {
2577 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2578 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2579 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2580 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
2581 	    db->db_level + 1 == dn->dn_nlevels) {
2582 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2583 		mutex_enter(&dn->dn_mtx);
2584 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2585 		mutex_exit(&dn->dn_mtx);
2586 	}
2587 
2588 	if (db->db_state != DB_NOFILL && !brtwrite) {
2589 		dbuf_unoverride(dr);
2590 
2591 		ASSERT(db->db_buf != NULL);
2592 		ASSERT(dr->dt.dl.dr_data != NULL);
2593 		if (dr->dt.dl.dr_data != db->db_buf)
2594 			arc_buf_destroy(dr->dt.dl.dr_data, db);
2595 	}
2596 
2597 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2598 
2599 	ASSERT(db->db_dirtycnt > 0);
2600 	db->db_dirtycnt -= 1;
2601 
2602 	if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2603 		ASSERT(db->db_state == DB_NOFILL || brtwrite ||
2604 		    arc_released(db->db_buf));
2605 		dbuf_destroy(db);
2606 		return (B_TRUE);
2607 	}
2608 
2609 	return (B_FALSE);
2610 }
2611 
2612 static void
2613 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2614 {
2615 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2616 
2617 	ASSERT(tx->tx_txg != 0);
2618 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2619 
2620 	/*
2621 	 * Quick check for dirtiness.  For already dirty blocks, this
2622 	 * reduces runtime of this function by >90%, and overall performance
2623 	 * by 50% for some workloads (e.g. file deletion with indirect blocks
2624 	 * cached).
2625 	 */
2626 	mutex_enter(&db->db_mtx);
2627 
2628 	if (db->db_state == DB_CACHED) {
2629 		dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2630 		/*
2631 		 * It's possible that it is already dirty but not cached,
2632 		 * because there are some calls to dbuf_dirty() that don't
2633 		 * go through dmu_buf_will_dirty().
2634 		 */
2635 		if (dr != NULL) {
2636 			/* This dbuf is already dirty and cached. */
2637 			dbuf_redirty(dr);
2638 			mutex_exit(&db->db_mtx);
2639 			return;
2640 		}
2641 	}
2642 	mutex_exit(&db->db_mtx);
2643 
2644 	DB_DNODE_ENTER(db);
2645 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2646 		flags |= DB_RF_HAVESTRUCT;
2647 	DB_DNODE_EXIT(db);
2648 	(void) dbuf_read(db, NULL, flags);
2649 	(void) dbuf_dirty(db, tx);
2650 }
2651 
2652 void
2653 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2654 {
2655 	dmu_buf_will_dirty_impl(db_fake,
2656 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2657 }
2658 
2659 boolean_t
2660 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2661 {
2662 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2663 	dbuf_dirty_record_t *dr;
2664 
2665 	mutex_enter(&db->db_mtx);
2666 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2667 	mutex_exit(&db->db_mtx);
2668 	return (dr != NULL);
2669 }
2670 
2671 void
2672 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2673 {
2674 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2675 
2676 	db->db_state = DB_NOFILL;
2677 	DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2678 	dmu_buf_will_fill(db_fake, tx);
2679 }
2680 
2681 void
2682 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2683 {
2684 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2685 
2686 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2687 	ASSERT(tx->tx_txg != 0);
2688 	ASSERT(db->db_level == 0);
2689 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2690 
2691 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2692 	    dmu_tx_private_ok(tx));
2693 
2694 	dbuf_noread(db);
2695 	(void) dbuf_dirty(db, tx);
2696 }
2697 
2698 /*
2699  * This function is effectively the same as dmu_buf_will_dirty(), but
2700  * indicates the caller expects raw encrypted data in the db, and provides
2701  * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2702  * blkptr_t when this dbuf is written.  This is only used for blocks of
2703  * dnodes, during raw receive.
2704  */
2705 void
2706 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2707     const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2708 {
2709 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2710 	dbuf_dirty_record_t *dr;
2711 
2712 	/*
2713 	 * dr_has_raw_params is only processed for blocks of dnodes
2714 	 * (see dbuf_sync_dnode_leaf_crypt()).
2715 	 */
2716 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2717 	ASSERT3U(db->db_level, ==, 0);
2718 	ASSERT(db->db_objset->os_raw_receive);
2719 
2720 	dmu_buf_will_dirty_impl(db_fake,
2721 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2722 
2723 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2724 
2725 	ASSERT3P(dr, !=, NULL);
2726 
2727 	dr->dt.dl.dr_has_raw_params = B_TRUE;
2728 	dr->dt.dl.dr_byteorder = byteorder;
2729 	memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2730 	memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2731 	memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2732 }
2733 
2734 static void
2735 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2736 {
2737 	struct dirty_leaf *dl;
2738 	dbuf_dirty_record_t *dr;
2739 
2740 	dr = list_head(&db->db_dirty_records);
2741 	ASSERT3P(dr, !=, NULL);
2742 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2743 	dl = &dr->dt.dl;
2744 	dl->dr_overridden_by = *bp;
2745 	dl->dr_override_state = DR_OVERRIDDEN;
2746 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2747 }
2748 
2749 void
2750 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2751 {
2752 	(void) tx;
2753 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2754 	dbuf_states_t old_state;
2755 	mutex_enter(&db->db_mtx);
2756 	DBUF_VERIFY(db);
2757 
2758 	old_state = db->db_state;
2759 	db->db_state = DB_CACHED;
2760 	if (old_state == DB_FILL) {
2761 		if (db->db_level == 0 && db->db_freed_in_flight) {
2762 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2763 			/* we were freed while filling */
2764 			/* XXX dbuf_undirty? */
2765 			memset(db->db.db_data, 0, db->db.db_size);
2766 			db->db_freed_in_flight = FALSE;
2767 			DTRACE_SET_STATE(db,
2768 			    "fill done handling freed in flight");
2769 		} else {
2770 			DTRACE_SET_STATE(db, "fill done");
2771 		}
2772 		cv_broadcast(&db->db_changed);
2773 	}
2774 	mutex_exit(&db->db_mtx);
2775 }
2776 
2777 void
2778 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2779     bp_embedded_type_t etype, enum zio_compress comp,
2780     int uncompressed_size, int compressed_size, int byteorder,
2781     dmu_tx_t *tx)
2782 {
2783 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2784 	struct dirty_leaf *dl;
2785 	dmu_object_type_t type;
2786 	dbuf_dirty_record_t *dr;
2787 
2788 	if (etype == BP_EMBEDDED_TYPE_DATA) {
2789 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2790 		    SPA_FEATURE_EMBEDDED_DATA));
2791 	}
2792 
2793 	DB_DNODE_ENTER(db);
2794 	type = DB_DNODE(db)->dn_type;
2795 	DB_DNODE_EXIT(db);
2796 
2797 	ASSERT0(db->db_level);
2798 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2799 
2800 	dmu_buf_will_not_fill(dbuf, tx);
2801 
2802 	dr = list_head(&db->db_dirty_records);
2803 	ASSERT3P(dr, !=, NULL);
2804 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2805 	dl = &dr->dt.dl;
2806 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
2807 	    data, comp, uncompressed_size, compressed_size);
2808 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2809 	BP_SET_TYPE(&dl->dr_overridden_by, type);
2810 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2811 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2812 
2813 	dl->dr_override_state = DR_OVERRIDDEN;
2814 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2815 }
2816 
2817 void
2818 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2819 {
2820 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2821 	dmu_object_type_t type;
2822 	ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2823 	    SPA_FEATURE_REDACTED_DATASETS));
2824 
2825 	DB_DNODE_ENTER(db);
2826 	type = DB_DNODE(db)->dn_type;
2827 	DB_DNODE_EXIT(db);
2828 
2829 	ASSERT0(db->db_level);
2830 	dmu_buf_will_not_fill(dbuf, tx);
2831 
2832 	blkptr_t bp = { { { {0} } } };
2833 	BP_SET_TYPE(&bp, type);
2834 	BP_SET_LEVEL(&bp, 0);
2835 	BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2836 	BP_SET_REDACTED(&bp);
2837 	BPE_SET_LSIZE(&bp, dbuf->db_size);
2838 
2839 	dbuf_override_impl(db, &bp, tx);
2840 }
2841 
2842 /*
2843  * Directly assign a provided arc buf to a given dbuf if it's not referenced
2844  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2845  */
2846 void
2847 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2848 {
2849 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2850 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2851 	ASSERT(db->db_level == 0);
2852 	ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2853 	ASSERT(buf != NULL);
2854 	ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2855 	ASSERT(tx->tx_txg != 0);
2856 
2857 	arc_return_buf(buf, db);
2858 	ASSERT(arc_released(buf));
2859 
2860 	mutex_enter(&db->db_mtx);
2861 
2862 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
2863 		cv_wait(&db->db_changed, &db->db_mtx);
2864 
2865 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2866 
2867 	if (db->db_state == DB_CACHED &&
2868 	    zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2869 		/*
2870 		 * In practice, we will never have a case where we have an
2871 		 * encrypted arc buffer while additional holds exist on the
2872 		 * dbuf. We don't handle this here so we simply assert that
2873 		 * fact instead.
2874 		 */
2875 		ASSERT(!arc_is_encrypted(buf));
2876 		mutex_exit(&db->db_mtx);
2877 		(void) dbuf_dirty(db, tx);
2878 		memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2879 		arc_buf_destroy(buf, db);
2880 		return;
2881 	}
2882 
2883 	if (db->db_state == DB_CACHED) {
2884 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2885 
2886 		ASSERT(db->db_buf != NULL);
2887 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2888 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
2889 
2890 			if (!arc_released(db->db_buf)) {
2891 				ASSERT(dr->dt.dl.dr_override_state ==
2892 				    DR_OVERRIDDEN);
2893 				arc_release(db->db_buf, db);
2894 			}
2895 			dr->dt.dl.dr_data = buf;
2896 			arc_buf_destroy(db->db_buf, db);
2897 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2898 			arc_release(db->db_buf, db);
2899 			arc_buf_destroy(db->db_buf, db);
2900 		}
2901 		db->db_buf = NULL;
2902 	}
2903 	ASSERT(db->db_buf == NULL);
2904 	dbuf_set_data(db, buf);
2905 	db->db_state = DB_FILL;
2906 	DTRACE_SET_STATE(db, "filling assigned arcbuf");
2907 	mutex_exit(&db->db_mtx);
2908 	(void) dbuf_dirty(db, tx);
2909 	dmu_buf_fill_done(&db->db, tx);
2910 }
2911 
2912 void
2913 dbuf_destroy(dmu_buf_impl_t *db)
2914 {
2915 	dnode_t *dn;
2916 	dmu_buf_impl_t *parent = db->db_parent;
2917 	dmu_buf_impl_t *dndb;
2918 
2919 	ASSERT(MUTEX_HELD(&db->db_mtx));
2920 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
2921 
2922 	if (db->db_buf != NULL) {
2923 		arc_buf_destroy(db->db_buf, db);
2924 		db->db_buf = NULL;
2925 	}
2926 
2927 	if (db->db_blkid == DMU_BONUS_BLKID) {
2928 		int slots = DB_DNODE(db)->dn_num_slots;
2929 		int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2930 		if (db->db.db_data != NULL) {
2931 			kmem_free(db->db.db_data, bonuslen);
2932 			arc_space_return(bonuslen, ARC_SPACE_BONUS);
2933 			db->db_state = DB_UNCACHED;
2934 			DTRACE_SET_STATE(db, "buffer cleared");
2935 		}
2936 	}
2937 
2938 	dbuf_clear_data(db);
2939 
2940 	if (multilist_link_active(&db->db_cache_link)) {
2941 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2942 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
2943 
2944 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
2945 		(void) zfs_refcount_remove_many(
2946 		    &dbuf_caches[db->db_caching_status].size,
2947 		    db->db.db_size, db);
2948 
2949 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2950 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
2951 		} else {
2952 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2953 			DBUF_STAT_BUMPDOWN(cache_count);
2954 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2955 			    db->db.db_size);
2956 		}
2957 		db->db_caching_status = DB_NO_CACHE;
2958 	}
2959 
2960 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2961 	ASSERT(db->db_data_pending == NULL);
2962 	ASSERT(list_is_empty(&db->db_dirty_records));
2963 
2964 	db->db_state = DB_EVICTING;
2965 	DTRACE_SET_STATE(db, "buffer eviction started");
2966 	db->db_blkptr = NULL;
2967 
2968 	/*
2969 	 * Now that db_state is DB_EVICTING, nobody else can find this via
2970 	 * the hash table.  We can now drop db_mtx, which allows us to
2971 	 * acquire the dn_dbufs_mtx.
2972 	 */
2973 	mutex_exit(&db->db_mtx);
2974 
2975 	DB_DNODE_ENTER(db);
2976 	dn = DB_DNODE(db);
2977 	dndb = dn->dn_dbuf;
2978 	if (db->db_blkid != DMU_BONUS_BLKID) {
2979 		boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2980 		if (needlock)
2981 			mutex_enter_nested(&dn->dn_dbufs_mtx,
2982 			    NESTED_SINGLE);
2983 		avl_remove(&dn->dn_dbufs, db);
2984 		membar_producer();
2985 		DB_DNODE_EXIT(db);
2986 		if (needlock)
2987 			mutex_exit(&dn->dn_dbufs_mtx);
2988 		/*
2989 		 * Decrementing the dbuf count means that the hold corresponding
2990 		 * to the removed dbuf is no longer discounted in dnode_move(),
2991 		 * so the dnode cannot be moved until after we release the hold.
2992 		 * The membar_producer() ensures visibility of the decremented
2993 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2994 		 * release any lock.
2995 		 */
2996 		mutex_enter(&dn->dn_mtx);
2997 		dnode_rele_and_unlock(dn, db, B_TRUE);
2998 		db->db_dnode_handle = NULL;
2999 
3000 		dbuf_hash_remove(db);
3001 	} else {
3002 		DB_DNODE_EXIT(db);
3003 	}
3004 
3005 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3006 
3007 	db->db_parent = NULL;
3008 
3009 	ASSERT(db->db_buf == NULL);
3010 	ASSERT(db->db.db_data == NULL);
3011 	ASSERT(db->db_hash_next == NULL);
3012 	ASSERT(db->db_blkptr == NULL);
3013 	ASSERT(db->db_data_pending == NULL);
3014 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3015 	ASSERT(!multilist_link_active(&db->db_cache_link));
3016 
3017 	/*
3018 	 * If this dbuf is referenced from an indirect dbuf,
3019 	 * decrement the ref count on the indirect dbuf.
3020 	 */
3021 	if (parent && parent != dndb) {
3022 		mutex_enter(&parent->db_mtx);
3023 		dbuf_rele_and_unlock(parent, db, B_TRUE);
3024 	}
3025 
3026 	kmem_cache_free(dbuf_kmem_cache, db);
3027 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3028 }
3029 
3030 /*
3031  * Note: While bpp will always be updated if the function returns success,
3032  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3033  * this happens when the dnode is the meta-dnode, or {user|group|project}used
3034  * object.
3035  */
3036 __attribute__((always_inline))
3037 static inline int
3038 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3039     dmu_buf_impl_t **parentp, blkptr_t **bpp)
3040 {
3041 	*parentp = NULL;
3042 	*bpp = NULL;
3043 
3044 	ASSERT(blkid != DMU_BONUS_BLKID);
3045 
3046 	if (blkid == DMU_SPILL_BLKID) {
3047 		mutex_enter(&dn->dn_mtx);
3048 		if (dn->dn_have_spill &&
3049 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3050 			*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3051 		else
3052 			*bpp = NULL;
3053 		dbuf_add_ref(dn->dn_dbuf, NULL);
3054 		*parentp = dn->dn_dbuf;
3055 		mutex_exit(&dn->dn_mtx);
3056 		return (0);
3057 	}
3058 
3059 	int nlevels =
3060 	    (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3061 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3062 
3063 	ASSERT3U(level * epbs, <, 64);
3064 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3065 	/*
3066 	 * This assertion shouldn't trip as long as the max indirect block size
3067 	 * is less than 1M.  The reason for this is that up to that point,
3068 	 * the number of levels required to address an entire object with blocks
3069 	 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.	 In
3070 	 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3071 	 * (i.e. we can address the entire object), objects will all use at most
3072 	 * N-1 levels and the assertion won't overflow.	 However, once epbs is
3073 	 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
3074 	 * enough to address an entire object, so objects will have 5 levels,
3075 	 * but then this assertion will overflow.
3076 	 *
3077 	 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3078 	 * need to redo this logic to handle overflows.
3079 	 */
3080 	ASSERT(level >= nlevels ||
3081 	    ((nlevels - level - 1) * epbs) +
3082 	    highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3083 	if (level >= nlevels ||
3084 	    blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3085 	    ((nlevels - level - 1) * epbs)) ||
3086 	    (fail_sparse &&
3087 	    blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3088 		/* the buffer has no parent yet */
3089 		return (SET_ERROR(ENOENT));
3090 	} else if (level < nlevels-1) {
3091 		/* this block is referenced from an indirect block */
3092 		int err;
3093 
3094 		err = dbuf_hold_impl(dn, level + 1,
3095 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3096 
3097 		if (err)
3098 			return (err);
3099 		err = dbuf_read(*parentp, NULL,
3100 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3101 		if (err) {
3102 			dbuf_rele(*parentp, NULL);
3103 			*parentp = NULL;
3104 			return (err);
3105 		}
3106 		rw_enter(&(*parentp)->db_rwlock, RW_READER);
3107 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3108 		    (blkid & ((1ULL << epbs) - 1));
3109 		if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3110 			ASSERT(BP_IS_HOLE(*bpp));
3111 		rw_exit(&(*parentp)->db_rwlock);
3112 		return (0);
3113 	} else {
3114 		/* the block is referenced from the dnode */
3115 		ASSERT3U(level, ==, nlevels-1);
3116 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3117 		    blkid < dn->dn_phys->dn_nblkptr);
3118 		if (dn->dn_dbuf) {
3119 			dbuf_add_ref(dn->dn_dbuf, NULL);
3120 			*parentp = dn->dn_dbuf;
3121 		}
3122 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
3123 		return (0);
3124 	}
3125 }
3126 
3127 static dmu_buf_impl_t *
3128 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3129     dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3130 {
3131 	objset_t *os = dn->dn_objset;
3132 	dmu_buf_impl_t *db, *odb;
3133 
3134 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3135 	ASSERT(dn->dn_type != DMU_OT_NONE);
3136 
3137 	db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3138 
3139 	list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3140 	    offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3141 
3142 	db->db_objset = os;
3143 	db->db.db_object = dn->dn_object;
3144 	db->db_level = level;
3145 	db->db_blkid = blkid;
3146 	db->db_dirtycnt = 0;
3147 	db->db_dnode_handle = dn->dn_handle;
3148 	db->db_parent = parent;
3149 	db->db_blkptr = blkptr;
3150 	db->db_hash = hash;
3151 
3152 	db->db_user = NULL;
3153 	db->db_user_immediate_evict = FALSE;
3154 	db->db_freed_in_flight = FALSE;
3155 	db->db_pending_evict = FALSE;
3156 
3157 	if (blkid == DMU_BONUS_BLKID) {
3158 		ASSERT3P(parent, ==, dn->dn_dbuf);
3159 		db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3160 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3161 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3162 		db->db.db_offset = DMU_BONUS_BLKID;
3163 		db->db_state = DB_UNCACHED;
3164 		DTRACE_SET_STATE(db, "bonus buffer created");
3165 		db->db_caching_status = DB_NO_CACHE;
3166 		/* the bonus dbuf is not placed in the hash table */
3167 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3168 		return (db);
3169 	} else if (blkid == DMU_SPILL_BLKID) {
3170 		db->db.db_size = (blkptr != NULL) ?
3171 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3172 		db->db.db_offset = 0;
3173 	} else {
3174 		int blocksize =
3175 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3176 		db->db.db_size = blocksize;
3177 		db->db.db_offset = db->db_blkid * blocksize;
3178 	}
3179 
3180 	/*
3181 	 * Hold the dn_dbufs_mtx while we get the new dbuf
3182 	 * in the hash table *and* added to the dbufs list.
3183 	 * This prevents a possible deadlock with someone
3184 	 * trying to look up this dbuf before it's added to the
3185 	 * dn_dbufs list.
3186 	 */
3187 	mutex_enter(&dn->dn_dbufs_mtx);
3188 	db->db_state = DB_EVICTING; /* not worth logging this state change */
3189 	if ((odb = dbuf_hash_insert(db)) != NULL) {
3190 		/* someone else inserted it first */
3191 		mutex_exit(&dn->dn_dbufs_mtx);
3192 		kmem_cache_free(dbuf_kmem_cache, db);
3193 		DBUF_STAT_BUMP(hash_insert_race);
3194 		return (odb);
3195 	}
3196 	avl_add(&dn->dn_dbufs, db);
3197 
3198 	db->db_state = DB_UNCACHED;
3199 	DTRACE_SET_STATE(db, "regular buffer created");
3200 	db->db_caching_status = DB_NO_CACHE;
3201 	mutex_exit(&dn->dn_dbufs_mtx);
3202 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3203 
3204 	if (parent && parent != dn->dn_dbuf)
3205 		dbuf_add_ref(parent, db);
3206 
3207 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3208 	    zfs_refcount_count(&dn->dn_holds) > 0);
3209 	(void) zfs_refcount_add(&dn->dn_holds, db);
3210 
3211 	dprintf_dbuf(db, "db=%p\n", db);
3212 
3213 	return (db);
3214 }
3215 
3216 /*
3217  * This function returns a block pointer and information about the object,
3218  * given a dnode and a block.  This is a publicly accessible version of
3219  * dbuf_findbp that only returns some information, rather than the
3220  * dbuf.  Note that the dnode passed in must be held, and the dn_struct_rwlock
3221  * should be locked as (at least) a reader.
3222  */
3223 int
3224 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3225     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3226 {
3227 	dmu_buf_impl_t *dbp = NULL;
3228 	blkptr_t *bp2;
3229 	int err = 0;
3230 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3231 
3232 	err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3233 	if (err == 0) {
3234 		ASSERT3P(bp2, !=, NULL);
3235 		*bp = *bp2;
3236 		if (dbp != NULL)
3237 			dbuf_rele(dbp, NULL);
3238 		if (datablkszsec != NULL)
3239 			*datablkszsec = dn->dn_phys->dn_datablkszsec;
3240 		if (indblkshift != NULL)
3241 			*indblkshift = dn->dn_phys->dn_indblkshift;
3242 	}
3243 
3244 	return (err);
3245 }
3246 
3247 typedef struct dbuf_prefetch_arg {
3248 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
3249 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3250 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3251 	int dpa_curlevel; /* The current level that we're reading */
3252 	dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3253 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3254 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3255 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3256 	dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3257 	void *dpa_arg; /* prefetch completion arg */
3258 } dbuf_prefetch_arg_t;
3259 
3260 static void
3261 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3262 {
3263 	if (dpa->dpa_cb != NULL) {
3264 		dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3265 		    dpa->dpa_zb.zb_blkid, io_done);
3266 	}
3267 	kmem_free(dpa, sizeof (*dpa));
3268 }
3269 
3270 static void
3271 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3272     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3273 {
3274 	(void) zio, (void) zb, (void) iobp;
3275 	dbuf_prefetch_arg_t *dpa = private;
3276 
3277 	if (abuf != NULL)
3278 		arc_buf_destroy(abuf, private);
3279 
3280 	dbuf_prefetch_fini(dpa, B_TRUE);
3281 }
3282 
3283 /*
3284  * Actually issue the prefetch read for the block given.
3285  */
3286 static void
3287 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3288 {
3289 	ASSERT(!BP_IS_REDACTED(bp) ||
3290 	    dsl_dataset_feature_is_active(
3291 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3292 	    SPA_FEATURE_REDACTED_DATASETS));
3293 
3294 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3295 		return (dbuf_prefetch_fini(dpa, B_FALSE));
3296 
3297 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3298 	arc_flags_t aflags =
3299 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3300 	    ARC_FLAG_NO_BUF;
3301 
3302 	/* dnodes are always read as raw and then converted later */
3303 	if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3304 	    dpa->dpa_curlevel == 0)
3305 		zio_flags |= ZIO_FLAG_RAW;
3306 
3307 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3308 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3309 	ASSERT(dpa->dpa_zio != NULL);
3310 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3311 	    dbuf_issue_final_prefetch_done, dpa,
3312 	    dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3313 }
3314 
3315 /*
3316  * Called when an indirect block above our prefetch target is read in.  This
3317  * will either read in the next indirect block down the tree or issue the actual
3318  * prefetch if the next block down is our target.
3319  */
3320 static void
3321 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3322     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3323 {
3324 	(void) zb, (void) iobp;
3325 	dbuf_prefetch_arg_t *dpa = private;
3326 
3327 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3328 	ASSERT3S(dpa->dpa_curlevel, >, 0);
3329 
3330 	if (abuf == NULL) {
3331 		ASSERT(zio == NULL || zio->io_error != 0);
3332 		dbuf_prefetch_fini(dpa, B_TRUE);
3333 		return;
3334 	}
3335 	ASSERT(zio == NULL || zio->io_error == 0);
3336 
3337 	/*
3338 	 * The dpa_dnode is only valid if we are called with a NULL
3339 	 * zio. This indicates that the arc_read() returned without
3340 	 * first calling zio_read() to issue a physical read. Once
3341 	 * a physical read is made the dpa_dnode must be invalidated
3342 	 * as the locks guarding it may have been dropped. If the
3343 	 * dpa_dnode is still valid, then we want to add it to the dbuf
3344 	 * cache. To do so, we must hold the dbuf associated with the block
3345 	 * we just prefetched, read its contents so that we associate it
3346 	 * with an arc_buf_t, and then release it.
3347 	 */
3348 	if (zio != NULL) {
3349 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3350 		if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3351 			ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3352 		} else {
3353 			ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3354 		}
3355 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3356 
3357 		dpa->dpa_dnode = NULL;
3358 	} else if (dpa->dpa_dnode != NULL) {
3359 		uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3360 		    (dpa->dpa_epbs * (dpa->dpa_curlevel -
3361 		    dpa->dpa_zb.zb_level));
3362 		dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3363 		    dpa->dpa_curlevel, curblkid, FTAG);
3364 		if (db == NULL) {
3365 			arc_buf_destroy(abuf, private);
3366 			dbuf_prefetch_fini(dpa, B_TRUE);
3367 			return;
3368 		}
3369 		(void) dbuf_read(db, NULL,
3370 		    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3371 		dbuf_rele(db, FTAG);
3372 	}
3373 
3374 	dpa->dpa_curlevel--;
3375 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3376 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3377 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3378 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3379 
3380 	ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3381 	    dsl_dataset_feature_is_active(
3382 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3383 	    SPA_FEATURE_REDACTED_DATASETS)));
3384 	if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3385 		arc_buf_destroy(abuf, private);
3386 		dbuf_prefetch_fini(dpa, B_TRUE);
3387 		return;
3388 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3389 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3390 		dbuf_issue_final_prefetch(dpa, bp);
3391 	} else {
3392 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3393 		zbookmark_phys_t zb;
3394 
3395 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3396 		if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3397 			iter_aflags |= ARC_FLAG_L2CACHE;
3398 
3399 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3400 
3401 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3402 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3403 
3404 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3405 		    bp, dbuf_prefetch_indirect_done, dpa,
3406 		    ZIO_PRIORITY_SYNC_READ,
3407 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3408 		    &iter_aflags, &zb);
3409 	}
3410 
3411 	arc_buf_destroy(abuf, private);
3412 }
3413 
3414 /*
3415  * Issue prefetch reads for the given block on the given level.  If the indirect
3416  * blocks above that block are not in memory, we will read them in
3417  * asynchronously.  As a result, this call never blocks waiting for a read to
3418  * complete. Note that the prefetch might fail if the dataset is encrypted and
3419  * the encryption key is unmapped before the IO completes.
3420  */
3421 int
3422 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3423     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3424     void *arg)
3425 {
3426 	blkptr_t bp;
3427 	int epbs, nlevels, curlevel;
3428 	uint64_t curblkid;
3429 
3430 	ASSERT(blkid != DMU_BONUS_BLKID);
3431 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3432 
3433 	if (blkid > dn->dn_maxblkid)
3434 		goto no_issue;
3435 
3436 	if (level == 0 && dnode_block_freed(dn, blkid))
3437 		goto no_issue;
3438 
3439 	/*
3440 	 * This dnode hasn't been written to disk yet, so there's nothing to
3441 	 * prefetch.
3442 	 */
3443 	nlevels = dn->dn_phys->dn_nlevels;
3444 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3445 		goto no_issue;
3446 
3447 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3448 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3449 		goto no_issue;
3450 
3451 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3452 	    level, blkid, NULL);
3453 	if (db != NULL) {
3454 		mutex_exit(&db->db_mtx);
3455 		/*
3456 		 * This dbuf already exists.  It is either CACHED, or
3457 		 * (we assume) about to be read or filled.
3458 		 */
3459 		goto no_issue;
3460 	}
3461 
3462 	/*
3463 	 * Find the closest ancestor (indirect block) of the target block
3464 	 * that is present in the cache.  In this indirect block, we will
3465 	 * find the bp that is at curlevel, curblkid.
3466 	 */
3467 	curlevel = level;
3468 	curblkid = blkid;
3469 	while (curlevel < nlevels - 1) {
3470 		int parent_level = curlevel + 1;
3471 		uint64_t parent_blkid = curblkid >> epbs;
3472 		dmu_buf_impl_t *db;
3473 
3474 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3475 		    FALSE, TRUE, FTAG, &db) == 0) {
3476 			blkptr_t *bpp = db->db_buf->b_data;
3477 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3478 			dbuf_rele(db, FTAG);
3479 			break;
3480 		}
3481 
3482 		curlevel = parent_level;
3483 		curblkid = parent_blkid;
3484 	}
3485 
3486 	if (curlevel == nlevels - 1) {
3487 		/* No cached indirect blocks found. */
3488 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3489 		bp = dn->dn_phys->dn_blkptr[curblkid];
3490 	}
3491 	ASSERT(!BP_IS_REDACTED(&bp) ||
3492 	    dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3493 	    SPA_FEATURE_REDACTED_DATASETS));
3494 	if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3495 		goto no_issue;
3496 
3497 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3498 
3499 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3500 	    ZIO_FLAG_CANFAIL);
3501 
3502 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3503 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3504 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3505 	    dn->dn_object, level, blkid);
3506 	dpa->dpa_curlevel = curlevel;
3507 	dpa->dpa_prio = prio;
3508 	dpa->dpa_aflags = aflags;
3509 	dpa->dpa_spa = dn->dn_objset->os_spa;
3510 	dpa->dpa_dnode = dn;
3511 	dpa->dpa_epbs = epbs;
3512 	dpa->dpa_zio = pio;
3513 	dpa->dpa_cb = cb;
3514 	dpa->dpa_arg = arg;
3515 
3516 	if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3517 		dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3518 	else if (dnode_level_is_l2cacheable(&bp, dn, level))
3519 		dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3520 
3521 	/*
3522 	 * If we have the indirect just above us, no need to do the asynchronous
3523 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
3524 	 * a higher level, though, we want to issue the prefetches for all the
3525 	 * indirect blocks asynchronously, so we can go on with whatever we were
3526 	 * doing.
3527 	 */
3528 	if (curlevel == level) {
3529 		ASSERT3U(curblkid, ==, blkid);
3530 		dbuf_issue_final_prefetch(dpa, &bp);
3531 	} else {
3532 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3533 		zbookmark_phys_t zb;
3534 
3535 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3536 		if (dnode_level_is_l2cacheable(&bp, dn, level))
3537 			iter_aflags |= ARC_FLAG_L2CACHE;
3538 
3539 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3540 		    dn->dn_object, curlevel, curblkid);
3541 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3542 		    &bp, dbuf_prefetch_indirect_done, dpa,
3543 		    ZIO_PRIORITY_SYNC_READ,
3544 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3545 		    &iter_aflags, &zb);
3546 	}
3547 	/*
3548 	 * We use pio here instead of dpa_zio since it's possible that
3549 	 * dpa may have already been freed.
3550 	 */
3551 	zio_nowait(pio);
3552 	return (1);
3553 no_issue:
3554 	if (cb != NULL)
3555 		cb(arg, level, blkid, B_FALSE);
3556 	return (0);
3557 }
3558 
3559 int
3560 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3561     arc_flags_t aflags)
3562 {
3563 
3564 	return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3565 }
3566 
3567 /*
3568  * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3569  * the case of encrypted, compressed and uncompressed buffers by
3570  * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3571  * arc_alloc_compressed_buf() or arc_alloc_buf().*
3572  *
3573  * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3574  */
3575 noinline static void
3576 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3577 {
3578 	dbuf_dirty_record_t *dr = db->db_data_pending;
3579 	arc_buf_t *data = dr->dt.dl.dr_data;
3580 	enum zio_compress compress_type = arc_get_compression(data);
3581 	uint8_t complevel = arc_get_complevel(data);
3582 
3583 	if (arc_is_encrypted(data)) {
3584 		boolean_t byteorder;
3585 		uint8_t salt[ZIO_DATA_SALT_LEN];
3586 		uint8_t iv[ZIO_DATA_IV_LEN];
3587 		uint8_t mac[ZIO_DATA_MAC_LEN];
3588 
3589 		arc_get_raw_params(data, &byteorder, salt, iv, mac);
3590 		dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3591 		    dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3592 		    dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3593 		    compress_type, complevel));
3594 	} else if (compress_type != ZIO_COMPRESS_OFF) {
3595 		dbuf_set_data(db, arc_alloc_compressed_buf(
3596 		    dn->dn_objset->os_spa, db, arc_buf_size(data),
3597 		    arc_buf_lsize(data), compress_type, complevel));
3598 	} else {
3599 		dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3600 		    DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3601 	}
3602 
3603 	rw_enter(&db->db_rwlock, RW_WRITER);
3604 	memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3605 	rw_exit(&db->db_rwlock);
3606 }
3607 
3608 /*
3609  * Returns with db_holds incremented, and db_mtx not held.
3610  * Note: dn_struct_rwlock must be held.
3611  */
3612 int
3613 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3614     boolean_t fail_sparse, boolean_t fail_uncached,
3615     const void *tag, dmu_buf_impl_t **dbp)
3616 {
3617 	dmu_buf_impl_t *db, *parent = NULL;
3618 	uint64_t hv;
3619 
3620 	/* If the pool has been created, verify the tx_sync_lock is not held */
3621 	spa_t *spa = dn->dn_objset->os_spa;
3622 	dsl_pool_t *dp = spa->spa_dsl_pool;
3623 	if (dp != NULL) {
3624 		ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3625 	}
3626 
3627 	ASSERT(blkid != DMU_BONUS_BLKID);
3628 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3629 	ASSERT3U(dn->dn_nlevels, >, level);
3630 
3631 	*dbp = NULL;
3632 
3633 	/* dbuf_find() returns with db_mtx held */
3634 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3635 
3636 	if (db == NULL) {
3637 		blkptr_t *bp = NULL;
3638 		int err;
3639 
3640 		if (fail_uncached)
3641 			return (SET_ERROR(ENOENT));
3642 
3643 		ASSERT3P(parent, ==, NULL);
3644 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3645 		if (fail_sparse) {
3646 			if (err == 0 && bp && BP_IS_HOLE(bp))
3647 				err = SET_ERROR(ENOENT);
3648 			if (err) {
3649 				if (parent)
3650 					dbuf_rele(parent, NULL);
3651 				return (err);
3652 			}
3653 		}
3654 		if (err && err != ENOENT)
3655 			return (err);
3656 		db = dbuf_create(dn, level, blkid, parent, bp, hv);
3657 	}
3658 
3659 	if (fail_uncached && db->db_state != DB_CACHED) {
3660 		mutex_exit(&db->db_mtx);
3661 		return (SET_ERROR(ENOENT));
3662 	}
3663 
3664 	if (db->db_buf != NULL) {
3665 		arc_buf_access(db->db_buf);
3666 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3667 	}
3668 
3669 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3670 
3671 	/*
3672 	 * If this buffer is currently syncing out, and we are
3673 	 * still referencing it from db_data, we need to make a copy
3674 	 * of it in case we decide we want to dirty it again in this txg.
3675 	 */
3676 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3677 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
3678 	    db->db_state == DB_CACHED && db->db_data_pending) {
3679 		dbuf_dirty_record_t *dr = db->db_data_pending;
3680 		if (dr->dt.dl.dr_data == db->db_buf) {
3681 			ASSERT3P(db->db_buf, !=, NULL);
3682 			dbuf_hold_copy(dn, db);
3683 		}
3684 	}
3685 
3686 	if (multilist_link_active(&db->db_cache_link)) {
3687 		ASSERT(zfs_refcount_is_zero(&db->db_holds));
3688 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3689 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3690 
3691 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3692 		(void) zfs_refcount_remove_many(
3693 		    &dbuf_caches[db->db_caching_status].size,
3694 		    db->db.db_size, db);
3695 
3696 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3697 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3698 		} else {
3699 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3700 			DBUF_STAT_BUMPDOWN(cache_count);
3701 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3702 			    db->db.db_size);
3703 		}
3704 		db->db_caching_status = DB_NO_CACHE;
3705 	}
3706 	(void) zfs_refcount_add(&db->db_holds, tag);
3707 	DBUF_VERIFY(db);
3708 	mutex_exit(&db->db_mtx);
3709 
3710 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
3711 	if (parent)
3712 		dbuf_rele(parent, NULL);
3713 
3714 	ASSERT3P(DB_DNODE(db), ==, dn);
3715 	ASSERT3U(db->db_blkid, ==, blkid);
3716 	ASSERT3U(db->db_level, ==, level);
3717 	*dbp = db;
3718 
3719 	return (0);
3720 }
3721 
3722 dmu_buf_impl_t *
3723 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3724 {
3725 	return (dbuf_hold_level(dn, 0, blkid, tag));
3726 }
3727 
3728 dmu_buf_impl_t *
3729 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3730 {
3731 	dmu_buf_impl_t *db;
3732 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3733 	return (err ? NULL : db);
3734 }
3735 
3736 void
3737 dbuf_create_bonus(dnode_t *dn)
3738 {
3739 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3740 
3741 	ASSERT(dn->dn_bonus == NULL);
3742 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
3743 	    dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
3744 }
3745 
3746 int
3747 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3748 {
3749 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3750 
3751 	if (db->db_blkid != DMU_SPILL_BLKID)
3752 		return (SET_ERROR(ENOTSUP));
3753 	if (blksz == 0)
3754 		blksz = SPA_MINBLOCKSIZE;
3755 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3756 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3757 
3758 	dbuf_new_size(db, blksz, tx);
3759 
3760 	return (0);
3761 }
3762 
3763 void
3764 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3765 {
3766 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3767 }
3768 
3769 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3770 void
3771 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3772 {
3773 	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3774 	VERIFY3S(holds, >, 1);
3775 }
3776 
3777 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3778 boolean_t
3779 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3780     const void *tag)
3781 {
3782 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3783 	dmu_buf_impl_t *found_db;
3784 	boolean_t result = B_FALSE;
3785 
3786 	if (blkid == DMU_BONUS_BLKID)
3787 		found_db = dbuf_find_bonus(os, obj);
3788 	else
3789 		found_db = dbuf_find(os, obj, 0, blkid, NULL);
3790 
3791 	if (found_db != NULL) {
3792 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3793 			(void) zfs_refcount_add(&db->db_holds, tag);
3794 			result = B_TRUE;
3795 		}
3796 		mutex_exit(&found_db->db_mtx);
3797 	}
3798 	return (result);
3799 }
3800 
3801 /*
3802  * If you call dbuf_rele() you had better not be referencing the dnode handle
3803  * unless you have some other direct or indirect hold on the dnode. (An indirect
3804  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3805  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3806  * dnode's parent dbuf evicting its dnode handles.
3807  */
3808 void
3809 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3810 {
3811 	mutex_enter(&db->db_mtx);
3812 	dbuf_rele_and_unlock(db, tag, B_FALSE);
3813 }
3814 
3815 void
3816 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3817 {
3818 	dbuf_rele((dmu_buf_impl_t *)db, tag);
3819 }
3820 
3821 /*
3822  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
3823  * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
3824  * argument should be set if we are already in the dbuf-evicting code
3825  * path, in which case we don't want to recursively evict.  This allows us to
3826  * avoid deeply nested stacks that would have a call flow similar to this:
3827  *
3828  * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3829  *	^						|
3830  *	|						|
3831  *	+-----dbuf_destroy()<--dbuf_evict_one()<--------+
3832  *
3833  */
3834 void
3835 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3836 {
3837 	int64_t holds;
3838 	uint64_t size;
3839 
3840 	ASSERT(MUTEX_HELD(&db->db_mtx));
3841 	DBUF_VERIFY(db);
3842 
3843 	/*
3844 	 * Remove the reference to the dbuf before removing its hold on the
3845 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
3846 	 * buffer has a corresponding dnode hold.
3847 	 */
3848 	holds = zfs_refcount_remove(&db->db_holds, tag);
3849 	ASSERT(holds >= 0);
3850 
3851 	/*
3852 	 * We can't freeze indirects if there is a possibility that they
3853 	 * may be modified in the current syncing context.
3854 	 */
3855 	if (db->db_buf != NULL &&
3856 	    holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3857 		arc_buf_freeze(db->db_buf);
3858 	}
3859 
3860 	if (holds == db->db_dirtycnt &&
3861 	    db->db_level == 0 && db->db_user_immediate_evict)
3862 		dbuf_evict_user(db);
3863 
3864 	if (holds == 0) {
3865 		if (db->db_blkid == DMU_BONUS_BLKID) {
3866 			dnode_t *dn;
3867 			boolean_t evict_dbuf = db->db_pending_evict;
3868 
3869 			/*
3870 			 * If the dnode moves here, we cannot cross this
3871 			 * barrier until the move completes.
3872 			 */
3873 			DB_DNODE_ENTER(db);
3874 
3875 			dn = DB_DNODE(db);
3876 			atomic_dec_32(&dn->dn_dbufs_count);
3877 
3878 			/*
3879 			 * Decrementing the dbuf count means that the bonus
3880 			 * buffer's dnode hold is no longer discounted in
3881 			 * dnode_move(). The dnode cannot move until after
3882 			 * the dnode_rele() below.
3883 			 */
3884 			DB_DNODE_EXIT(db);
3885 
3886 			/*
3887 			 * Do not reference db after its lock is dropped.
3888 			 * Another thread may evict it.
3889 			 */
3890 			mutex_exit(&db->db_mtx);
3891 
3892 			if (evict_dbuf)
3893 				dnode_evict_bonus(dn);
3894 
3895 			dnode_rele(dn, db);
3896 		} else if (db->db_buf == NULL) {
3897 			/*
3898 			 * This is a special case: we never associated this
3899 			 * dbuf with any data allocated from the ARC.
3900 			 */
3901 			ASSERT(db->db_state == DB_UNCACHED ||
3902 			    db->db_state == DB_NOFILL);
3903 			dbuf_destroy(db);
3904 		} else if (arc_released(db->db_buf)) {
3905 			/*
3906 			 * This dbuf has anonymous data associated with it.
3907 			 */
3908 			dbuf_destroy(db);
3909 		} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
3910 		    db->db_pending_evict) {
3911 			dbuf_destroy(db);
3912 		} else if (!multilist_link_active(&db->db_cache_link)) {
3913 			ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3914 
3915 			dbuf_cached_state_t dcs =
3916 			    dbuf_include_in_metadata_cache(db) ?
3917 			    DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3918 			db->db_caching_status = dcs;
3919 
3920 			multilist_insert(&dbuf_caches[dcs].cache, db);
3921 			uint64_t db_size = db->db.db_size;
3922 			size = zfs_refcount_add_many(
3923 			    &dbuf_caches[dcs].size, db_size, db);
3924 			uint8_t db_level = db->db_level;
3925 			mutex_exit(&db->db_mtx);
3926 
3927 			if (dcs == DB_DBUF_METADATA_CACHE) {
3928 				DBUF_STAT_BUMP(metadata_cache_count);
3929 				DBUF_STAT_MAX(metadata_cache_size_bytes_max,
3930 				    size);
3931 			} else {
3932 				DBUF_STAT_BUMP(cache_count);
3933 				DBUF_STAT_MAX(cache_size_bytes_max, size);
3934 				DBUF_STAT_BUMP(cache_levels[db_level]);
3935 				DBUF_STAT_INCR(cache_levels_bytes[db_level],
3936 				    db_size);
3937 			}
3938 
3939 			if (dcs == DB_DBUF_CACHE && !evicting)
3940 				dbuf_evict_notify(size);
3941 		}
3942 	} else {
3943 		mutex_exit(&db->db_mtx);
3944 	}
3945 
3946 }
3947 
3948 #pragma weak dmu_buf_refcount = dbuf_refcount
3949 uint64_t
3950 dbuf_refcount(dmu_buf_impl_t *db)
3951 {
3952 	return (zfs_refcount_count(&db->db_holds));
3953 }
3954 
3955 uint64_t
3956 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3957 {
3958 	uint64_t holds;
3959 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3960 
3961 	mutex_enter(&db->db_mtx);
3962 	ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3963 	holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3964 	mutex_exit(&db->db_mtx);
3965 
3966 	return (holds);
3967 }
3968 
3969 void *
3970 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3971     dmu_buf_user_t *new_user)
3972 {
3973 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3974 
3975 	mutex_enter(&db->db_mtx);
3976 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3977 	if (db->db_user == old_user)
3978 		db->db_user = new_user;
3979 	else
3980 		old_user = db->db_user;
3981 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3982 	mutex_exit(&db->db_mtx);
3983 
3984 	return (old_user);
3985 }
3986 
3987 void *
3988 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3989 {
3990 	return (dmu_buf_replace_user(db_fake, NULL, user));
3991 }
3992 
3993 void *
3994 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3995 {
3996 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3997 
3998 	db->db_user_immediate_evict = TRUE;
3999 	return (dmu_buf_set_user(db_fake, user));
4000 }
4001 
4002 void *
4003 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4004 {
4005 	return (dmu_buf_replace_user(db_fake, user, NULL));
4006 }
4007 
4008 void *
4009 dmu_buf_get_user(dmu_buf_t *db_fake)
4010 {
4011 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4012 
4013 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4014 	return (db->db_user);
4015 }
4016 
4017 void
4018 dmu_buf_user_evict_wait(void)
4019 {
4020 	taskq_wait(dbu_evict_taskq);
4021 }
4022 
4023 blkptr_t *
4024 dmu_buf_get_blkptr(dmu_buf_t *db)
4025 {
4026 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4027 	return (dbi->db_blkptr);
4028 }
4029 
4030 objset_t *
4031 dmu_buf_get_objset(dmu_buf_t *db)
4032 {
4033 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4034 	return (dbi->db_objset);
4035 }
4036 
4037 dnode_t *
4038 dmu_buf_dnode_enter(dmu_buf_t *db)
4039 {
4040 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4041 	DB_DNODE_ENTER(dbi);
4042 	return (DB_DNODE(dbi));
4043 }
4044 
4045 void
4046 dmu_buf_dnode_exit(dmu_buf_t *db)
4047 {
4048 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4049 	DB_DNODE_EXIT(dbi);
4050 }
4051 
4052 static void
4053 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4054 {
4055 	/* ASSERT(dmu_tx_is_syncing(tx) */
4056 	ASSERT(MUTEX_HELD(&db->db_mtx));
4057 
4058 	if (db->db_blkptr != NULL)
4059 		return;
4060 
4061 	if (db->db_blkid == DMU_SPILL_BLKID) {
4062 		db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4063 		BP_ZERO(db->db_blkptr);
4064 		return;
4065 	}
4066 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4067 		/*
4068 		 * This buffer was allocated at a time when there was
4069 		 * no available blkptrs from the dnode, or it was
4070 		 * inappropriate to hook it in (i.e., nlevels mismatch).
4071 		 */
4072 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4073 		ASSERT(db->db_parent == NULL);
4074 		db->db_parent = dn->dn_dbuf;
4075 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4076 		DBUF_VERIFY(db);
4077 	} else {
4078 		dmu_buf_impl_t *parent = db->db_parent;
4079 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4080 
4081 		ASSERT(dn->dn_phys->dn_nlevels > 1);
4082 		if (parent == NULL) {
4083 			mutex_exit(&db->db_mtx);
4084 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
4085 			parent = dbuf_hold_level(dn, db->db_level + 1,
4086 			    db->db_blkid >> epbs, db);
4087 			rw_exit(&dn->dn_struct_rwlock);
4088 			mutex_enter(&db->db_mtx);
4089 			db->db_parent = parent;
4090 		}
4091 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
4092 		    (db->db_blkid & ((1ULL << epbs) - 1));
4093 		DBUF_VERIFY(db);
4094 	}
4095 }
4096 
4097 static void
4098 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4099 {
4100 	dmu_buf_impl_t *db = dr->dr_dbuf;
4101 	void *data = dr->dt.dl.dr_data;
4102 
4103 	ASSERT0(db->db_level);
4104 	ASSERT(MUTEX_HELD(&db->db_mtx));
4105 	ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4106 	ASSERT(data != NULL);
4107 
4108 	dnode_t *dn = dr->dr_dnode;
4109 	ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4110 	    DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4111 	memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4112 
4113 	dbuf_sync_leaf_verify_bonus_dnode(dr);
4114 
4115 	dbuf_undirty_bonus(dr);
4116 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4117 }
4118 
4119 /*
4120  * When syncing out a blocks of dnodes, adjust the block to deal with
4121  * encryption.  Normally, we make sure the block is decrypted before writing
4122  * it.  If we have crypt params, then we are writing a raw (encrypted) block,
4123  * from a raw receive.  In this case, set the ARC buf's crypt params so
4124  * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4125  */
4126 static void
4127 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4128 {
4129 	int err;
4130 	dmu_buf_impl_t *db = dr->dr_dbuf;
4131 
4132 	ASSERT(MUTEX_HELD(&db->db_mtx));
4133 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4134 	ASSERT3U(db->db_level, ==, 0);
4135 
4136 	if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4137 		zbookmark_phys_t zb;
4138 
4139 		/*
4140 		 * Unfortunately, there is currently no mechanism for
4141 		 * syncing context to handle decryption errors. An error
4142 		 * here is only possible if an attacker maliciously
4143 		 * changed a dnode block and updated the associated
4144 		 * checksums going up the block tree.
4145 		 */
4146 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4147 		    db->db.db_object, db->db_level, db->db_blkid);
4148 		err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4149 		    &zb, B_TRUE);
4150 		if (err)
4151 			panic("Invalid dnode block MAC");
4152 	} else if (dr->dt.dl.dr_has_raw_params) {
4153 		(void) arc_release(dr->dt.dl.dr_data, db);
4154 		arc_convert_to_raw(dr->dt.dl.dr_data,
4155 		    dmu_objset_id(db->db_objset),
4156 		    dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4157 		    dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4158 	}
4159 }
4160 
4161 /*
4162  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4163  * is critical the we not allow the compiler to inline this function in to
4164  * dbuf_sync_list() thereby drastically bloating the stack usage.
4165  */
4166 noinline static void
4167 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4168 {
4169 	dmu_buf_impl_t *db = dr->dr_dbuf;
4170 	dnode_t *dn = dr->dr_dnode;
4171 
4172 	ASSERT(dmu_tx_is_syncing(tx));
4173 
4174 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4175 
4176 	mutex_enter(&db->db_mtx);
4177 
4178 	ASSERT(db->db_level > 0);
4179 	DBUF_VERIFY(db);
4180 
4181 	/* Read the block if it hasn't been read yet. */
4182 	if (db->db_buf == NULL) {
4183 		mutex_exit(&db->db_mtx);
4184 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4185 		mutex_enter(&db->db_mtx);
4186 	}
4187 	ASSERT3U(db->db_state, ==, DB_CACHED);
4188 	ASSERT(db->db_buf != NULL);
4189 
4190 	/* Indirect block size must match what the dnode thinks it is. */
4191 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4192 	dbuf_check_blkptr(dn, db);
4193 
4194 	/* Provide the pending dirty record to child dbufs */
4195 	db->db_data_pending = dr;
4196 
4197 	mutex_exit(&db->db_mtx);
4198 
4199 	dbuf_write(dr, db->db_buf, tx);
4200 
4201 	zio_t *zio = dr->dr_zio;
4202 	mutex_enter(&dr->dt.di.dr_mtx);
4203 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4204 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4205 	mutex_exit(&dr->dt.di.dr_mtx);
4206 	zio_nowait(zio);
4207 }
4208 
4209 /*
4210  * Verify that the size of the data in our bonus buffer does not exceed
4211  * its recorded size.
4212  *
4213  * The purpose of this verification is to catch any cases in development
4214  * where the size of a phys structure (i.e space_map_phys_t) grows and,
4215  * due to incorrect feature management, older pools expect to read more
4216  * data even though they didn't actually write it to begin with.
4217  *
4218  * For a example, this would catch an error in the feature logic where we
4219  * open an older pool and we expect to write the space map histogram of
4220  * a space map with size SPACE_MAP_SIZE_V0.
4221  */
4222 static void
4223 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4224 {
4225 #ifdef ZFS_DEBUG
4226 	dnode_t *dn = dr->dr_dnode;
4227 
4228 	/*
4229 	 * Encrypted bonus buffers can have data past their bonuslen.
4230 	 * Skip the verification of these blocks.
4231 	 */
4232 	if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4233 		return;
4234 
4235 	uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4236 	uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4237 	ASSERT3U(bonuslen, <=, maxbonuslen);
4238 
4239 	arc_buf_t *datap = dr->dt.dl.dr_data;
4240 	char *datap_end = ((char *)datap) + bonuslen;
4241 	char *datap_max = ((char *)datap) + maxbonuslen;
4242 
4243 	/* ensure that everything is zero after our data */
4244 	for (; datap_end < datap_max; datap_end++)
4245 		ASSERT(*datap_end == 0);
4246 #endif
4247 }
4248 
4249 static blkptr_t *
4250 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4251 {
4252 	/* This must be a lightweight dirty record. */
4253 	ASSERT3P(dr->dr_dbuf, ==, NULL);
4254 	dnode_t *dn = dr->dr_dnode;
4255 
4256 	if (dn->dn_phys->dn_nlevels == 1) {
4257 		VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4258 		return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4259 	} else {
4260 		dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4261 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4262 		VERIFY3U(parent_db->db_level, ==, 1);
4263 		VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4264 		VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4265 		blkptr_t *bp = parent_db->db.db_data;
4266 		return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4267 	}
4268 }
4269 
4270 static void
4271 dbuf_lightweight_ready(zio_t *zio)
4272 {
4273 	dbuf_dirty_record_t *dr = zio->io_private;
4274 	blkptr_t *bp = zio->io_bp;
4275 
4276 	if (zio->io_error != 0)
4277 		return;
4278 
4279 	dnode_t *dn = dr->dr_dnode;
4280 
4281 	blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4282 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4283 	int64_t delta = bp_get_dsize_sync(spa, bp) -
4284 	    bp_get_dsize_sync(spa, bp_orig);
4285 	dnode_diduse_space(dn, delta);
4286 
4287 	uint64_t blkid = dr->dt.dll.dr_blkid;
4288 	mutex_enter(&dn->dn_mtx);
4289 	if (blkid > dn->dn_phys->dn_maxblkid) {
4290 		ASSERT0(dn->dn_objset->os_raw_receive);
4291 		dn->dn_phys->dn_maxblkid = blkid;
4292 	}
4293 	mutex_exit(&dn->dn_mtx);
4294 
4295 	if (!BP_IS_EMBEDDED(bp)) {
4296 		uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4297 		BP_SET_FILL(bp, fill);
4298 	}
4299 
4300 	dmu_buf_impl_t *parent_db;
4301 	EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4302 	if (dr->dr_parent == NULL) {
4303 		parent_db = dn->dn_dbuf;
4304 	} else {
4305 		parent_db = dr->dr_parent->dr_dbuf;
4306 	}
4307 	rw_enter(&parent_db->db_rwlock, RW_WRITER);
4308 	*bp_orig = *bp;
4309 	rw_exit(&parent_db->db_rwlock);
4310 }
4311 
4312 static void
4313 dbuf_lightweight_physdone(zio_t *zio)
4314 {
4315 	dbuf_dirty_record_t *dr = zio->io_private;
4316 	dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
4317 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4318 
4319 	/*
4320 	 * The callback will be called io_phys_children times.  Retire one
4321 	 * portion of our dirty space each time we are called.  Any rounding
4322 	 * error will be cleaned up by dbuf_lightweight_done().
4323 	 */
4324 	int delta = dr->dr_accounted / zio->io_phys_children;
4325 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4326 }
4327 
4328 static void
4329 dbuf_lightweight_done(zio_t *zio)
4330 {
4331 	dbuf_dirty_record_t *dr = zio->io_private;
4332 
4333 	VERIFY0(zio->io_error);
4334 
4335 	objset_t *os = dr->dr_dnode->dn_objset;
4336 	dmu_tx_t *tx = os->os_synctx;
4337 
4338 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4339 		ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4340 	} else {
4341 		dsl_dataset_t *ds = os->os_dsl_dataset;
4342 		(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4343 		dsl_dataset_block_born(ds, zio->io_bp, tx);
4344 	}
4345 
4346 	/*
4347 	 * See comment in dbuf_write_done().
4348 	 */
4349 	if (zio->io_phys_children == 0) {
4350 		dsl_pool_undirty_space(dmu_objset_pool(os),
4351 		    dr->dr_accounted, zio->io_txg);
4352 	} else {
4353 		dsl_pool_undirty_space(dmu_objset_pool(os),
4354 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4355 	}
4356 
4357 	abd_free(dr->dt.dll.dr_abd);
4358 	kmem_free(dr, sizeof (*dr));
4359 }
4360 
4361 noinline static void
4362 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4363 {
4364 	dnode_t *dn = dr->dr_dnode;
4365 	zio_t *pio;
4366 	if (dn->dn_phys->dn_nlevels == 1) {
4367 		pio = dn->dn_zio;
4368 	} else {
4369 		pio = dr->dr_parent->dr_zio;
4370 	}
4371 
4372 	zbookmark_phys_t zb = {
4373 		.zb_objset = dmu_objset_id(dn->dn_objset),
4374 		.zb_object = dn->dn_object,
4375 		.zb_level = 0,
4376 		.zb_blkid = dr->dt.dll.dr_blkid,
4377 	};
4378 
4379 	/*
4380 	 * See comment in dbuf_write().  This is so that zio->io_bp_orig
4381 	 * will have the old BP in dbuf_lightweight_done().
4382 	 */
4383 	dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4384 
4385 	dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4386 	    dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4387 	    dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4388 	    &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4389 	    dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
4390 	    ZIO_PRIORITY_ASYNC_WRITE,
4391 	    ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4392 
4393 	zio_nowait(dr->dr_zio);
4394 }
4395 
4396 /*
4397  * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4398  * critical the we not allow the compiler to inline this function in to
4399  * dbuf_sync_list() thereby drastically bloating the stack usage.
4400  */
4401 noinline static void
4402 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4403 {
4404 	arc_buf_t **datap = &dr->dt.dl.dr_data;
4405 	dmu_buf_impl_t *db = dr->dr_dbuf;
4406 	dnode_t *dn = dr->dr_dnode;
4407 	objset_t *os;
4408 	uint64_t txg = tx->tx_txg;
4409 
4410 	ASSERT(dmu_tx_is_syncing(tx));
4411 
4412 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4413 
4414 	mutex_enter(&db->db_mtx);
4415 	/*
4416 	 * To be synced, we must be dirtied.  But we
4417 	 * might have been freed after the dirty.
4418 	 */
4419 	if (db->db_state == DB_UNCACHED) {
4420 		/* This buffer has been freed since it was dirtied */
4421 		ASSERT(db->db.db_data == NULL);
4422 	} else if (db->db_state == DB_FILL) {
4423 		/* This buffer was freed and is now being re-filled */
4424 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4425 	} else {
4426 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4427 	}
4428 	DBUF_VERIFY(db);
4429 
4430 	if (db->db_blkid == DMU_SPILL_BLKID) {
4431 		mutex_enter(&dn->dn_mtx);
4432 		if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4433 			/*
4434 			 * In the previous transaction group, the bonus buffer
4435 			 * was entirely used to store the attributes for the
4436 			 * dnode which overrode the dn_spill field.  However,
4437 			 * when adding more attributes to the file a spill
4438 			 * block was required to hold the extra attributes.
4439 			 *
4440 			 * Make sure to clear the garbage left in the dn_spill
4441 			 * field from the previous attributes in the bonus
4442 			 * buffer.  Otherwise, after writing out the spill
4443 			 * block to the new allocated dva, it will free
4444 			 * the old block pointed to by the invalid dn_spill.
4445 			 */
4446 			db->db_blkptr = NULL;
4447 		}
4448 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4449 		mutex_exit(&dn->dn_mtx);
4450 	}
4451 
4452 	/*
4453 	 * If this is a bonus buffer, simply copy the bonus data into the
4454 	 * dnode.  It will be written out when the dnode is synced (and it
4455 	 * will be synced, since it must have been dirty for dbuf_sync to
4456 	 * be called).
4457 	 */
4458 	if (db->db_blkid == DMU_BONUS_BLKID) {
4459 		ASSERT(dr->dr_dbuf == db);
4460 		dbuf_sync_bonus(dr, tx);
4461 		return;
4462 	}
4463 
4464 	os = dn->dn_objset;
4465 
4466 	/*
4467 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
4468 	 * operation to sneak in. As a result, we need to ensure that we
4469 	 * don't check the dr_override_state until we have returned from
4470 	 * dbuf_check_blkptr.
4471 	 */
4472 	dbuf_check_blkptr(dn, db);
4473 
4474 	/*
4475 	 * If this buffer is in the middle of an immediate write,
4476 	 * wait for the synchronous IO to complete.
4477 	 */
4478 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4479 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4480 		cv_wait(&db->db_changed, &db->db_mtx);
4481 	}
4482 
4483 	/*
4484 	 * If this is a dnode block, ensure it is appropriately encrypted
4485 	 * or decrypted, depending on what we are writing to it this txg.
4486 	 */
4487 	if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4488 		dbuf_prepare_encrypted_dnode_leaf(dr);
4489 
4490 	if (db->db_state != DB_NOFILL &&
4491 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
4492 	    zfs_refcount_count(&db->db_holds) > 1 &&
4493 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4494 	    *datap == db->db_buf) {
4495 		/*
4496 		 * If this buffer is currently "in use" (i.e., there
4497 		 * are active holds and db_data still references it),
4498 		 * then make a copy before we start the write so that
4499 		 * any modifications from the open txg will not leak
4500 		 * into this write.
4501 		 *
4502 		 * NOTE: this copy does not need to be made for
4503 		 * objects only modified in the syncing context (e.g.
4504 		 * DNONE_DNODE blocks).
4505 		 */
4506 		int psize = arc_buf_size(*datap);
4507 		int lsize = arc_buf_lsize(*datap);
4508 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4509 		enum zio_compress compress_type = arc_get_compression(*datap);
4510 		uint8_t complevel = arc_get_complevel(*datap);
4511 
4512 		if (arc_is_encrypted(*datap)) {
4513 			boolean_t byteorder;
4514 			uint8_t salt[ZIO_DATA_SALT_LEN];
4515 			uint8_t iv[ZIO_DATA_IV_LEN];
4516 			uint8_t mac[ZIO_DATA_MAC_LEN];
4517 
4518 			arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4519 			*datap = arc_alloc_raw_buf(os->os_spa, db,
4520 			    dmu_objset_id(os), byteorder, salt, iv, mac,
4521 			    dn->dn_type, psize, lsize, compress_type,
4522 			    complevel);
4523 		} else if (compress_type != ZIO_COMPRESS_OFF) {
4524 			ASSERT3U(type, ==, ARC_BUFC_DATA);
4525 			*datap = arc_alloc_compressed_buf(os->os_spa, db,
4526 			    psize, lsize, compress_type, complevel);
4527 		} else {
4528 			*datap = arc_alloc_buf(os->os_spa, db, type, psize);
4529 		}
4530 		memcpy((*datap)->b_data, db->db.db_data, psize);
4531 	}
4532 	db->db_data_pending = dr;
4533 
4534 	mutex_exit(&db->db_mtx);
4535 
4536 	dbuf_write(dr, *datap, tx);
4537 
4538 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4539 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4540 		list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4541 	} else {
4542 		zio_nowait(dr->dr_zio);
4543 	}
4544 }
4545 
4546 void
4547 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4548 {
4549 	dbuf_dirty_record_t *dr;
4550 
4551 	while ((dr = list_head(list))) {
4552 		if (dr->dr_zio != NULL) {
4553 			/*
4554 			 * If we find an already initialized zio then we
4555 			 * are processing the meta-dnode, and we have finished.
4556 			 * The dbufs for all dnodes are put back on the list
4557 			 * during processing, so that we can zio_wait()
4558 			 * these IOs after initiating all child IOs.
4559 			 */
4560 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4561 			    DMU_META_DNODE_OBJECT);
4562 			break;
4563 		}
4564 		list_remove(list, dr);
4565 		if (dr->dr_dbuf == NULL) {
4566 			dbuf_sync_lightweight(dr, tx);
4567 		} else {
4568 			if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4569 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4570 				VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4571 			}
4572 			if (dr->dr_dbuf->db_level > 0)
4573 				dbuf_sync_indirect(dr, tx);
4574 			else
4575 				dbuf_sync_leaf(dr, tx);
4576 		}
4577 	}
4578 }
4579 
4580 static void
4581 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4582 {
4583 	(void) buf;
4584 	dmu_buf_impl_t *db = vdb;
4585 	dnode_t *dn;
4586 	blkptr_t *bp = zio->io_bp;
4587 	blkptr_t *bp_orig = &zio->io_bp_orig;
4588 	spa_t *spa = zio->io_spa;
4589 	int64_t delta;
4590 	uint64_t fill = 0;
4591 	int i;
4592 
4593 	ASSERT3P(db->db_blkptr, !=, NULL);
4594 	ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4595 
4596 	DB_DNODE_ENTER(db);
4597 	dn = DB_DNODE(db);
4598 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4599 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4600 	zio->io_prev_space_delta = delta;
4601 
4602 	if (bp->blk_birth != 0) {
4603 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4604 		    BP_GET_TYPE(bp) == dn->dn_type) ||
4605 		    (db->db_blkid == DMU_SPILL_BLKID &&
4606 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4607 		    BP_IS_EMBEDDED(bp));
4608 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4609 	}
4610 
4611 	mutex_enter(&db->db_mtx);
4612 
4613 #ifdef ZFS_DEBUG
4614 	if (db->db_blkid == DMU_SPILL_BLKID) {
4615 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4616 		ASSERT(!(BP_IS_HOLE(bp)) &&
4617 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4618 	}
4619 #endif
4620 
4621 	if (db->db_level == 0) {
4622 		mutex_enter(&dn->dn_mtx);
4623 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4624 		    db->db_blkid != DMU_SPILL_BLKID) {
4625 			ASSERT0(db->db_objset->os_raw_receive);
4626 			dn->dn_phys->dn_maxblkid = db->db_blkid;
4627 		}
4628 		mutex_exit(&dn->dn_mtx);
4629 
4630 		if (dn->dn_type == DMU_OT_DNODE) {
4631 			i = 0;
4632 			while (i < db->db.db_size) {
4633 				dnode_phys_t *dnp =
4634 				    (void *)(((char *)db->db.db_data) + i);
4635 
4636 				i += DNODE_MIN_SIZE;
4637 				if (dnp->dn_type != DMU_OT_NONE) {
4638 					fill++;
4639 					i += dnp->dn_extra_slots *
4640 					    DNODE_MIN_SIZE;
4641 				}
4642 			}
4643 		} else {
4644 			if (BP_IS_HOLE(bp)) {
4645 				fill = 0;
4646 			} else {
4647 				fill = 1;
4648 			}
4649 		}
4650 	} else {
4651 		blkptr_t *ibp = db->db.db_data;
4652 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4653 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4654 			if (BP_IS_HOLE(ibp))
4655 				continue;
4656 			fill += BP_GET_FILL(ibp);
4657 		}
4658 	}
4659 	DB_DNODE_EXIT(db);
4660 
4661 	if (!BP_IS_EMBEDDED(bp))
4662 		BP_SET_FILL(bp, fill);
4663 
4664 	mutex_exit(&db->db_mtx);
4665 
4666 	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4667 	*db->db_blkptr = *bp;
4668 	dmu_buf_unlock_parent(db, dblt, FTAG);
4669 }
4670 
4671 /*
4672  * This function gets called just prior to running through the compression
4673  * stage of the zio pipeline. If we're an indirect block comprised of only
4674  * holes, then we want this indirect to be compressed away to a hole. In
4675  * order to do that we must zero out any information about the holes that
4676  * this indirect points to prior to before we try to compress it.
4677  */
4678 static void
4679 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4680 {
4681 	(void) zio, (void) buf;
4682 	dmu_buf_impl_t *db = vdb;
4683 	dnode_t *dn;
4684 	blkptr_t *bp;
4685 	unsigned int epbs, i;
4686 
4687 	ASSERT3U(db->db_level, >, 0);
4688 	DB_DNODE_ENTER(db);
4689 	dn = DB_DNODE(db);
4690 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4691 	ASSERT3U(epbs, <, 31);
4692 
4693 	/* Determine if all our children are holes */
4694 	for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4695 		if (!BP_IS_HOLE(bp))
4696 			break;
4697 	}
4698 
4699 	/*
4700 	 * If all the children are holes, then zero them all out so that
4701 	 * we may get compressed away.
4702 	 */
4703 	if (i == 1ULL << epbs) {
4704 		/*
4705 		 * We only found holes. Grab the rwlock to prevent
4706 		 * anybody from reading the blocks we're about to
4707 		 * zero out.
4708 		 */
4709 		rw_enter(&db->db_rwlock, RW_WRITER);
4710 		memset(db->db.db_data, 0, db->db.db_size);
4711 		rw_exit(&db->db_rwlock);
4712 	}
4713 	DB_DNODE_EXIT(db);
4714 }
4715 
4716 /*
4717  * The SPA will call this callback several times for each zio - once
4718  * for every physical child i/o (zio->io_phys_children times).  This
4719  * allows the DMU to monitor the progress of each logical i/o.  For example,
4720  * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4721  * block.  There may be a long delay before all copies/fragments are completed,
4722  * so this callback allows us to retire dirty space gradually, as the physical
4723  * i/os complete.
4724  */
4725 static void
4726 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4727 {
4728 	(void) buf;
4729 	dmu_buf_impl_t *db = arg;
4730 	objset_t *os = db->db_objset;
4731 	dsl_pool_t *dp = dmu_objset_pool(os);
4732 	dbuf_dirty_record_t *dr;
4733 	int delta = 0;
4734 
4735 	dr = db->db_data_pending;
4736 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4737 
4738 	/*
4739 	 * The callback will be called io_phys_children times.  Retire one
4740 	 * portion of our dirty space each time we are called.  Any rounding
4741 	 * error will be cleaned up by dbuf_write_done().
4742 	 */
4743 	delta = dr->dr_accounted / zio->io_phys_children;
4744 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4745 }
4746 
4747 static void
4748 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4749 {
4750 	(void) buf;
4751 	dmu_buf_impl_t *db = vdb;
4752 	blkptr_t *bp_orig = &zio->io_bp_orig;
4753 	blkptr_t *bp = db->db_blkptr;
4754 	objset_t *os = db->db_objset;
4755 	dmu_tx_t *tx = os->os_synctx;
4756 
4757 	ASSERT0(zio->io_error);
4758 	ASSERT(db->db_blkptr == bp);
4759 
4760 	/*
4761 	 * For nopwrites and rewrites we ensure that the bp matches our
4762 	 * original and bypass all the accounting.
4763 	 */
4764 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4765 		ASSERT(BP_EQUAL(bp, bp_orig));
4766 	} else {
4767 		dsl_dataset_t *ds = os->os_dsl_dataset;
4768 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4769 		dsl_dataset_block_born(ds, bp, tx);
4770 	}
4771 
4772 	mutex_enter(&db->db_mtx);
4773 
4774 	DBUF_VERIFY(db);
4775 
4776 	dbuf_dirty_record_t *dr = db->db_data_pending;
4777 	dnode_t *dn = dr->dr_dnode;
4778 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4779 	ASSERT(dr->dr_dbuf == db);
4780 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4781 	list_remove(&db->db_dirty_records, dr);
4782 
4783 #ifdef ZFS_DEBUG
4784 	if (db->db_blkid == DMU_SPILL_BLKID) {
4785 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4786 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4787 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4788 	}
4789 #endif
4790 
4791 	if (db->db_level == 0) {
4792 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4793 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4794 		if (db->db_state != DB_NOFILL) {
4795 			if (dr->dt.dl.dr_data != NULL &&
4796 			    dr->dt.dl.dr_data != db->db_buf) {
4797 				arc_buf_destroy(dr->dt.dl.dr_data, db);
4798 			}
4799 		}
4800 	} else {
4801 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4802 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4803 		if (!BP_IS_HOLE(db->db_blkptr)) {
4804 			int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4805 			    SPA_BLKPTRSHIFT;
4806 			ASSERT3U(db->db_blkid, <=,
4807 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4808 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4809 			    db->db.db_size);
4810 		}
4811 		mutex_destroy(&dr->dt.di.dr_mtx);
4812 		list_destroy(&dr->dt.di.dr_children);
4813 	}
4814 
4815 	cv_broadcast(&db->db_changed);
4816 	ASSERT(db->db_dirtycnt > 0);
4817 	db->db_dirtycnt -= 1;
4818 	db->db_data_pending = NULL;
4819 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4820 
4821 	/*
4822 	 * If we didn't do a physical write in this ZIO and we
4823 	 * still ended up here, it means that the space of the
4824 	 * dbuf that we just released (and undirtied) above hasn't
4825 	 * been marked as undirtied in the pool's accounting.
4826 	 *
4827 	 * Thus, we undirty that space in the pool's view of the
4828 	 * world here. For physical writes this type of update
4829 	 * happens in dbuf_write_physdone().
4830 	 *
4831 	 * If we did a physical write, cleanup any rounding errors
4832 	 * that came up due to writing multiple copies of a block
4833 	 * on disk [see dbuf_write_physdone()].
4834 	 */
4835 	if (zio->io_phys_children == 0) {
4836 		dsl_pool_undirty_space(dmu_objset_pool(os),
4837 		    dr->dr_accounted, zio->io_txg);
4838 	} else {
4839 		dsl_pool_undirty_space(dmu_objset_pool(os),
4840 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4841 	}
4842 
4843 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
4844 }
4845 
4846 static void
4847 dbuf_write_nofill_ready(zio_t *zio)
4848 {
4849 	dbuf_write_ready(zio, NULL, zio->io_private);
4850 }
4851 
4852 static void
4853 dbuf_write_nofill_done(zio_t *zio)
4854 {
4855 	dbuf_write_done(zio, NULL, zio->io_private);
4856 }
4857 
4858 static void
4859 dbuf_write_override_ready(zio_t *zio)
4860 {
4861 	dbuf_dirty_record_t *dr = zio->io_private;
4862 	dmu_buf_impl_t *db = dr->dr_dbuf;
4863 
4864 	dbuf_write_ready(zio, NULL, db);
4865 }
4866 
4867 static void
4868 dbuf_write_override_done(zio_t *zio)
4869 {
4870 	dbuf_dirty_record_t *dr = zio->io_private;
4871 	dmu_buf_impl_t *db = dr->dr_dbuf;
4872 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4873 
4874 	mutex_enter(&db->db_mtx);
4875 	if (!BP_EQUAL(zio->io_bp, obp)) {
4876 		if (!BP_IS_HOLE(obp))
4877 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4878 		arc_release(dr->dt.dl.dr_data, db);
4879 	}
4880 	mutex_exit(&db->db_mtx);
4881 
4882 	dbuf_write_done(zio, NULL, db);
4883 
4884 	if (zio->io_abd != NULL)
4885 		abd_free(zio->io_abd);
4886 }
4887 
4888 typedef struct dbuf_remap_impl_callback_arg {
4889 	objset_t	*drica_os;
4890 	uint64_t	drica_blk_birth;
4891 	dmu_tx_t	*drica_tx;
4892 } dbuf_remap_impl_callback_arg_t;
4893 
4894 static void
4895 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4896     void *arg)
4897 {
4898 	dbuf_remap_impl_callback_arg_t *drica = arg;
4899 	objset_t *os = drica->drica_os;
4900 	spa_t *spa = dmu_objset_spa(os);
4901 	dmu_tx_t *tx = drica->drica_tx;
4902 
4903 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4904 
4905 	if (os == spa_meta_objset(spa)) {
4906 		spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4907 	} else {
4908 		dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4909 		    size, drica->drica_blk_birth, tx);
4910 	}
4911 }
4912 
4913 static void
4914 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4915 {
4916 	blkptr_t bp_copy = *bp;
4917 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4918 	dbuf_remap_impl_callback_arg_t drica;
4919 
4920 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4921 
4922 	drica.drica_os = dn->dn_objset;
4923 	drica.drica_blk_birth = bp->blk_birth;
4924 	drica.drica_tx = tx;
4925 	if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4926 	    &drica)) {
4927 		/*
4928 		 * If the blkptr being remapped is tracked by a livelist,
4929 		 * then we need to make sure the livelist reflects the update.
4930 		 * First, cancel out the old blkptr by appending a 'FREE'
4931 		 * entry. Next, add an 'ALLOC' to track the new version. This
4932 		 * way we avoid trying to free an inaccurate blkptr at delete.
4933 		 * Note that embedded blkptrs are not tracked in livelists.
4934 		 */
4935 		if (dn->dn_objset != spa_meta_objset(spa)) {
4936 			dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4937 			if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4938 			    bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4939 				ASSERT(!BP_IS_EMBEDDED(bp));
4940 				ASSERT(dsl_dir_is_clone(ds->ds_dir));
4941 				ASSERT(spa_feature_is_enabled(spa,
4942 				    SPA_FEATURE_LIVELIST));
4943 				bplist_append(&ds->ds_dir->dd_pending_frees,
4944 				    bp);
4945 				bplist_append(&ds->ds_dir->dd_pending_allocs,
4946 				    &bp_copy);
4947 			}
4948 		}
4949 
4950 		/*
4951 		 * The db_rwlock prevents dbuf_read_impl() from
4952 		 * dereferencing the BP while we are changing it.  To
4953 		 * avoid lock contention, only grab it when we are actually
4954 		 * changing the BP.
4955 		 */
4956 		if (rw != NULL)
4957 			rw_enter(rw, RW_WRITER);
4958 		*bp = bp_copy;
4959 		if (rw != NULL)
4960 			rw_exit(rw);
4961 	}
4962 }
4963 
4964 /*
4965  * Remap any existing BP's to concrete vdevs, if possible.
4966  */
4967 static void
4968 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4969 {
4970 	spa_t *spa = dmu_objset_spa(db->db_objset);
4971 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4972 
4973 	if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4974 		return;
4975 
4976 	if (db->db_level > 0) {
4977 		blkptr_t *bp = db->db.db_data;
4978 		for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4979 			dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4980 		}
4981 	} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4982 		dnode_phys_t *dnp = db->db.db_data;
4983 		ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4984 		    DMU_OT_DNODE);
4985 		for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4986 		    i += dnp[i].dn_extra_slots + 1) {
4987 			for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4988 				krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4989 				    &dn->dn_dbuf->db_rwlock);
4990 				dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4991 				    tx);
4992 			}
4993 		}
4994 	}
4995 }
4996 
4997 
4998 /* Issue I/O to commit a dirty buffer to disk. */
4999 static void
5000 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5001 {
5002 	dmu_buf_impl_t *db = dr->dr_dbuf;
5003 	dnode_t *dn = dr->dr_dnode;
5004 	objset_t *os;
5005 	dmu_buf_impl_t *parent = db->db_parent;
5006 	uint64_t txg = tx->tx_txg;
5007 	zbookmark_phys_t zb;
5008 	zio_prop_t zp;
5009 	zio_t *pio; /* parent I/O */
5010 	int wp_flag = 0;
5011 
5012 	ASSERT(dmu_tx_is_syncing(tx));
5013 
5014 	os = dn->dn_objset;
5015 
5016 	if (db->db_state != DB_NOFILL) {
5017 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5018 			/*
5019 			 * Private object buffers are released here rather
5020 			 * than in dbuf_dirty() since they are only modified
5021 			 * in the syncing context and we don't want the
5022 			 * overhead of making multiple copies of the data.
5023 			 */
5024 			if (BP_IS_HOLE(db->db_blkptr)) {
5025 				arc_buf_thaw(data);
5026 			} else {
5027 				dbuf_release_bp(db);
5028 			}
5029 			dbuf_remap(dn, db, tx);
5030 		}
5031 	}
5032 
5033 	if (parent != dn->dn_dbuf) {
5034 		/* Our parent is an indirect block. */
5035 		/* We have a dirty parent that has been scheduled for write. */
5036 		ASSERT(parent && parent->db_data_pending);
5037 		/* Our parent's buffer is one level closer to the dnode. */
5038 		ASSERT(db->db_level == parent->db_level-1);
5039 		/*
5040 		 * We're about to modify our parent's db_data by modifying
5041 		 * our block pointer, so the parent must be released.
5042 		 */
5043 		ASSERT(arc_released(parent->db_buf));
5044 		pio = parent->db_data_pending->dr_zio;
5045 	} else {
5046 		/* Our parent is the dnode itself. */
5047 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5048 		    db->db_blkid != DMU_SPILL_BLKID) ||
5049 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5050 		if (db->db_blkid != DMU_SPILL_BLKID)
5051 			ASSERT3P(db->db_blkptr, ==,
5052 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
5053 		pio = dn->dn_zio;
5054 	}
5055 
5056 	ASSERT(db->db_level == 0 || data == db->db_buf);
5057 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
5058 	ASSERT(pio);
5059 
5060 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5061 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5062 	    db->db.db_object, db->db_level, db->db_blkid);
5063 
5064 	if (db->db_blkid == DMU_SPILL_BLKID)
5065 		wp_flag = WP_SPILL;
5066 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
5067 
5068 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5069 
5070 	/*
5071 	 * We copy the blkptr now (rather than when we instantiate the dirty
5072 	 * record), because its value can change between open context and
5073 	 * syncing context. We do not need to hold dn_struct_rwlock to read
5074 	 * db_blkptr because we are in syncing context.
5075 	 */
5076 	dr->dr_bp_copy = *db->db_blkptr;
5077 
5078 	if (db->db_level == 0 &&
5079 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5080 		/*
5081 		 * The BP for this block has been provided by open context
5082 		 * (by dmu_sync() or dmu_buf_write_embedded()).
5083 		 */
5084 		abd_t *contents = (data != NULL) ?
5085 		    abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5086 
5087 		dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5088 		    contents, db->db.db_size, db->db.db_size, &zp,
5089 		    dbuf_write_override_ready, NULL, NULL,
5090 		    dbuf_write_override_done,
5091 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5092 		mutex_enter(&db->db_mtx);
5093 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5094 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5095 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5096 		    dr->dt.dl.dr_brtwrite);
5097 		mutex_exit(&db->db_mtx);
5098 	} else if (db->db_state == DB_NOFILL) {
5099 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5100 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5101 		dr->dr_zio = zio_write(pio, os->os_spa, txg,
5102 		    &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5103 		    dbuf_write_nofill_ready, NULL, NULL,
5104 		    dbuf_write_nofill_done, db,
5105 		    ZIO_PRIORITY_ASYNC_WRITE,
5106 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5107 	} else {
5108 		ASSERT(arc_released(data));
5109 
5110 		/*
5111 		 * For indirect blocks, we want to setup the children
5112 		 * ready callback so that we can properly handle an indirect
5113 		 * block that only contains holes.
5114 		 */
5115 		arc_write_done_func_t *children_ready_cb = NULL;
5116 		if (db->db_level != 0)
5117 			children_ready_cb = dbuf_write_children_ready;
5118 
5119 		dr->dr_zio = arc_write(pio, os->os_spa, txg,
5120 		    &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5121 		    dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
5122 		    children_ready_cb, dbuf_write_physdone,
5123 		    dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
5124 		    ZIO_FLAG_MUSTSUCCEED, &zb);
5125 	}
5126 }
5127 
5128 EXPORT_SYMBOL(dbuf_find);
5129 EXPORT_SYMBOL(dbuf_is_metadata);
5130 EXPORT_SYMBOL(dbuf_destroy);
5131 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5132 EXPORT_SYMBOL(dbuf_whichblock);
5133 EXPORT_SYMBOL(dbuf_read);
5134 EXPORT_SYMBOL(dbuf_unoverride);
5135 EXPORT_SYMBOL(dbuf_free_range);
5136 EXPORT_SYMBOL(dbuf_new_size);
5137 EXPORT_SYMBOL(dbuf_release_bp);
5138 EXPORT_SYMBOL(dbuf_dirty);
5139 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5140 EXPORT_SYMBOL(dmu_buf_will_dirty);
5141 EXPORT_SYMBOL(dmu_buf_is_dirty);
5142 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5143 EXPORT_SYMBOL(dmu_buf_will_fill);
5144 EXPORT_SYMBOL(dmu_buf_fill_done);
5145 EXPORT_SYMBOL(dmu_buf_rele);
5146 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5147 EXPORT_SYMBOL(dbuf_prefetch);
5148 EXPORT_SYMBOL(dbuf_hold_impl);
5149 EXPORT_SYMBOL(dbuf_hold);
5150 EXPORT_SYMBOL(dbuf_hold_level);
5151 EXPORT_SYMBOL(dbuf_create_bonus);
5152 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5153 EXPORT_SYMBOL(dbuf_rm_spill);
5154 EXPORT_SYMBOL(dbuf_add_ref);
5155 EXPORT_SYMBOL(dbuf_rele);
5156 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5157 EXPORT_SYMBOL(dbuf_refcount);
5158 EXPORT_SYMBOL(dbuf_sync_list);
5159 EXPORT_SYMBOL(dmu_buf_set_user);
5160 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5161 EXPORT_SYMBOL(dmu_buf_get_user);
5162 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5163 
5164 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5165 	"Maximum size in bytes of the dbuf cache.");
5166 
5167 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5168 	"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5169 
5170 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5171 	"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5172 
5173 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5174 	"Maximum size in bytes of dbuf metadata cache.");
5175 
5176 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5177 	"Set size of dbuf cache to log2 fraction of arc size.");
5178 
5179 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5180 	"Set size of dbuf metadata cache to log2 fraction of arc size.");
5181 
5182 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5183 	"Set size of dbuf cache mutex array as log2 shift.");
5184