xref: /freebsd/sys/contrib/openzfs/module/zfs/dbuf.c (revision 87b759f0fa1f7554d50ce640c40138512bbded44)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or https://opensource.org/licenses/CDDL-1.0.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2019, Klara Inc.
28  * Copyright (c) 2019, Allan Jude
29  * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30  */
31 
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
59 
60 static kstat_t *dbuf_ksp;
61 
62 typedef struct dbuf_stats {
63 	/*
64 	 * Various statistics about the size of the dbuf cache.
65 	 */
66 	kstat_named_t cache_count;
67 	kstat_named_t cache_size_bytes;
68 	kstat_named_t cache_size_bytes_max;
69 	/*
70 	 * Statistics regarding the bounds on the dbuf cache size.
71 	 */
72 	kstat_named_t cache_target_bytes;
73 	kstat_named_t cache_lowater_bytes;
74 	kstat_named_t cache_hiwater_bytes;
75 	/*
76 	 * Total number of dbuf cache evictions that have occurred.
77 	 */
78 	kstat_named_t cache_total_evicts;
79 	/*
80 	 * The distribution of dbuf levels in the dbuf cache and
81 	 * the total size of all dbufs at each level.
82 	 */
83 	kstat_named_t cache_levels[DN_MAX_LEVELS];
84 	kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
85 	/*
86 	 * Statistics about the dbuf hash table.
87 	 */
88 	kstat_named_t hash_hits;
89 	kstat_named_t hash_misses;
90 	kstat_named_t hash_collisions;
91 	kstat_named_t hash_elements;
92 	kstat_named_t hash_elements_max;
93 	/*
94 	 * Number of sublists containing more than one dbuf in the dbuf
95 	 * hash table. Keep track of the longest hash chain.
96 	 */
97 	kstat_named_t hash_chains;
98 	kstat_named_t hash_chain_max;
99 	/*
100 	 * Number of times a dbuf_create() discovers that a dbuf was
101 	 * already created and in the dbuf hash table.
102 	 */
103 	kstat_named_t hash_insert_race;
104 	/*
105 	 * Number of entries in the hash table dbuf and mutex arrays.
106 	 */
107 	kstat_named_t hash_table_count;
108 	kstat_named_t hash_mutex_count;
109 	/*
110 	 * Statistics about the size of the metadata dbuf cache.
111 	 */
112 	kstat_named_t metadata_cache_count;
113 	kstat_named_t metadata_cache_size_bytes;
114 	kstat_named_t metadata_cache_size_bytes_max;
115 	/*
116 	 * For diagnostic purposes, this is incremented whenever we can't add
117 	 * something to the metadata cache because it's full, and instead put
118 	 * the data in the regular dbuf cache.
119 	 */
120 	kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122 
123 dbuf_stats_t dbuf_stats = {
124 	{ "cache_count",			KSTAT_DATA_UINT64 },
125 	{ "cache_size_bytes",			KSTAT_DATA_UINT64 },
126 	{ "cache_size_bytes_max",		KSTAT_DATA_UINT64 },
127 	{ "cache_target_bytes",			KSTAT_DATA_UINT64 },
128 	{ "cache_lowater_bytes",		KSTAT_DATA_UINT64 },
129 	{ "cache_hiwater_bytes",		KSTAT_DATA_UINT64 },
130 	{ "cache_total_evicts",			KSTAT_DATA_UINT64 },
131 	{ { "cache_levels_N",			KSTAT_DATA_UINT64 } },
132 	{ { "cache_levels_bytes_N",		KSTAT_DATA_UINT64 } },
133 	{ "hash_hits",				KSTAT_DATA_UINT64 },
134 	{ "hash_misses",			KSTAT_DATA_UINT64 },
135 	{ "hash_collisions",			KSTAT_DATA_UINT64 },
136 	{ "hash_elements",			KSTAT_DATA_UINT64 },
137 	{ "hash_elements_max",			KSTAT_DATA_UINT64 },
138 	{ "hash_chains",			KSTAT_DATA_UINT64 },
139 	{ "hash_chain_max",			KSTAT_DATA_UINT64 },
140 	{ "hash_insert_race",			KSTAT_DATA_UINT64 },
141 	{ "hash_table_count",			KSTAT_DATA_UINT64 },
142 	{ "hash_mutex_count",			KSTAT_DATA_UINT64 },
143 	{ "metadata_cache_count",		KSTAT_DATA_UINT64 },
144 	{ "metadata_cache_size_bytes",		KSTAT_DATA_UINT64 },
145 	{ "metadata_cache_size_bytes_max",	KSTAT_DATA_UINT64 },
146 	{ "metadata_cache_overflow",		KSTAT_DATA_UINT64 }
147 };
148 
149 struct {
150 	wmsum_t cache_count;
151 	wmsum_t cache_total_evicts;
152 	wmsum_t cache_levels[DN_MAX_LEVELS];
153 	wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 	wmsum_t hash_hits;
155 	wmsum_t hash_misses;
156 	wmsum_t hash_collisions;
157 	wmsum_t hash_chains;
158 	wmsum_t hash_insert_race;
159 	wmsum_t metadata_cache_count;
160 	wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162 
163 #define	DBUF_STAT_INCR(stat, val)	\
164 	wmsum_add(&dbuf_sums.stat, val)
165 #define	DBUF_STAT_DECR(stat, val)	\
166 	DBUF_STAT_INCR(stat, -(val))
167 #define	DBUF_STAT_BUMP(stat)		\
168 	DBUF_STAT_INCR(stat, 1)
169 #define	DBUF_STAT_BUMPDOWN(stat)	\
170 	DBUF_STAT_INCR(stat, -1)
171 #define	DBUF_STAT_MAX(stat, v) {					\
172 	uint64_t _m;							\
173 	while ((v) > (_m = dbuf_stats.stat.value.ui64) &&		\
174 	    (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 		continue;						\
176 }
177 
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 
181 /*
182  * Global data structures and functions for the dbuf cache.
183  */
184 static kmem_cache_t *dbuf_kmem_cache;
185 static taskq_t *dbu_evict_taskq;
186 
187 static kthread_t *dbuf_cache_evict_thread;
188 static kmutex_t dbuf_evict_lock;
189 static kcondvar_t dbuf_evict_cv;
190 static boolean_t dbuf_evict_thread_exit;
191 
192 /*
193  * There are two dbuf caches; each dbuf can only be in one of them at a time.
194  *
195  * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
196  *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
197  *    that represent the metadata that describes filesystems/snapshots/
198  *    bookmarks/properties/etc. We only evict from this cache when we export a
199  *    pool, to short-circuit as much I/O as possible for all administrative
200  *    commands that need the metadata. There is no eviction policy for this
201  *    cache, because we try to only include types in it which would occupy a
202  *    very small amount of space per object but create a large impact on the
203  *    performance of these commands. Instead, after it reaches a maximum size
204  *    (which should only happen on very small memory systems with a very large
205  *    number of filesystem objects), we stop taking new dbufs into the
206  *    metadata cache, instead putting them in the normal dbuf cache.
207  *
208  * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
209  *    are not currently held but have been recently released. These dbufs
210  *    are not eligible for arc eviction until they are aged out of the cache.
211  *    Dbufs that are aged out of the cache will be immediately destroyed and
212  *    become eligible for arc eviction.
213  *
214  * Dbufs are added to these caches once the last hold is released. If a dbuf is
215  * later accessed and still exists in the dbuf cache, then it will be removed
216  * from the cache and later re-added to the head of the cache.
217  *
218  * If a given dbuf meets the requirements for the metadata cache, it will go
219  * there, otherwise it will be considered for the generic LRU dbuf cache. The
220  * caches and the refcounts tracking their sizes are stored in an array indexed
221  * by those caches' matching enum values (from dbuf_cached_state_t).
222  */
223 typedef struct dbuf_cache {
224 	multilist_t cache;
225 	zfs_refcount_t size ____cacheline_aligned;
226 } dbuf_cache_t;
227 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
228 
229 /* Size limits for the caches */
230 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
231 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
232 
233 /* Set the default sizes of the caches to log2 fraction of arc size */
234 static uint_t dbuf_cache_shift = 5;
235 static uint_t dbuf_metadata_cache_shift = 6;
236 
237 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
238 static uint_t dbuf_mutex_cache_shift = 0;
239 
240 static unsigned long dbuf_cache_target_bytes(void);
241 static unsigned long dbuf_metadata_cache_target_bytes(void);
242 
243 /*
244  * The LRU dbuf cache uses a three-stage eviction policy:
245  *	- A low water marker designates when the dbuf eviction thread
246  *	should stop evicting from the dbuf cache.
247  *	- When we reach the maximum size (aka mid water mark), we
248  *	signal the eviction thread to run.
249  *	- The high water mark indicates when the eviction thread
250  *	is unable to keep up with the incoming load and eviction must
251  *	happen in the context of the calling thread.
252  *
253  * The dbuf cache:
254  *                                                 (max size)
255  *                                      low water   mid water   hi water
256  * +----------------------------------------+----------+----------+
257  * |                                        |          |          |
258  * |                                        |          |          |
259  * |                                        |          |          |
260  * |                                        |          |          |
261  * +----------------------------------------+----------+----------+
262  *                                        stop        signal     evict
263  *                                      evicting     eviction   directly
264  *                                                    thread
265  *
266  * The high and low water marks indicate the operating range for the eviction
267  * thread. The low water mark is, by default, 90% of the total size of the
268  * cache and the high water mark is at 110% (both of these percentages can be
269  * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
270  * respectively). The eviction thread will try to ensure that the cache remains
271  * within this range by waking up every second and checking if the cache is
272  * above the low water mark. The thread can also be woken up by callers adding
273  * elements into the cache if the cache is larger than the mid water (i.e max
274  * cache size). Once the eviction thread is woken up and eviction is required,
275  * it will continue evicting buffers until it's able to reduce the cache size
276  * to the low water mark. If the cache size continues to grow and hits the high
277  * water mark, then callers adding elements to the cache will begin to evict
278  * directly from the cache until the cache is no longer above the high water
279  * mark.
280  */
281 
282 /*
283  * The percentage above and below the maximum cache size.
284  */
285 static uint_t dbuf_cache_hiwater_pct = 10;
286 static uint_t dbuf_cache_lowater_pct = 10;
287 
288 static int
289 dbuf_cons(void *vdb, void *unused, int kmflag)
290 {
291 	(void) unused, (void) kmflag;
292 	dmu_buf_impl_t *db = vdb;
293 	memset(db, 0, sizeof (dmu_buf_impl_t));
294 
295 	mutex_init(&db->db_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
296 	rw_init(&db->db_rwlock, NULL, RW_NOLOCKDEP, NULL);
297 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
298 	multilist_link_init(&db->db_cache_link);
299 	zfs_refcount_create(&db->db_holds);
300 
301 	return (0);
302 }
303 
304 static void
305 dbuf_dest(void *vdb, void *unused)
306 {
307 	(void) unused;
308 	dmu_buf_impl_t *db = vdb;
309 	mutex_destroy(&db->db_mtx);
310 	rw_destroy(&db->db_rwlock);
311 	cv_destroy(&db->db_changed);
312 	ASSERT(!multilist_link_active(&db->db_cache_link));
313 	zfs_refcount_destroy(&db->db_holds);
314 }
315 
316 /*
317  * dbuf hash table routines
318  */
319 static dbuf_hash_table_t dbuf_hash_table;
320 
321 /*
322  * We use Cityhash for this. It's fast, and has good hash properties without
323  * requiring any large static buffers.
324  */
325 static uint64_t
326 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
327 {
328 	return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
329 }
330 
331 #define	DTRACE_SET_STATE(db, why) \
332 	DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db,	\
333 	    const char *, why)
334 
335 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
336 	((dbuf)->db.db_object == (obj) &&		\
337 	(dbuf)->db_objset == (os) &&			\
338 	(dbuf)->db_level == (level) &&			\
339 	(dbuf)->db_blkid == (blkid))
340 
341 dmu_buf_impl_t *
342 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
343     uint64_t *hash_out)
344 {
345 	dbuf_hash_table_t *h = &dbuf_hash_table;
346 	uint64_t hv;
347 	uint64_t idx;
348 	dmu_buf_impl_t *db;
349 
350 	hv = dbuf_hash(os, obj, level, blkid);
351 	idx = hv & h->hash_table_mask;
352 
353 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
354 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
355 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
356 			mutex_enter(&db->db_mtx);
357 			if (db->db_state != DB_EVICTING) {
358 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
359 				return (db);
360 			}
361 			mutex_exit(&db->db_mtx);
362 		}
363 	}
364 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
365 	if (hash_out != NULL)
366 		*hash_out = hv;
367 	return (NULL);
368 }
369 
370 static dmu_buf_impl_t *
371 dbuf_find_bonus(objset_t *os, uint64_t object)
372 {
373 	dnode_t *dn;
374 	dmu_buf_impl_t *db = NULL;
375 
376 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
377 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
378 		if (dn->dn_bonus != NULL) {
379 			db = dn->dn_bonus;
380 			mutex_enter(&db->db_mtx);
381 		}
382 		rw_exit(&dn->dn_struct_rwlock);
383 		dnode_rele(dn, FTAG);
384 	}
385 	return (db);
386 }
387 
388 /*
389  * Insert an entry into the hash table.  If there is already an element
390  * equal to elem in the hash table, then the already existing element
391  * will be returned and the new element will not be inserted.
392  * Otherwise returns NULL.
393  */
394 static dmu_buf_impl_t *
395 dbuf_hash_insert(dmu_buf_impl_t *db)
396 {
397 	dbuf_hash_table_t *h = &dbuf_hash_table;
398 	objset_t *os = db->db_objset;
399 	uint64_t obj = db->db.db_object;
400 	int level = db->db_level;
401 	uint64_t blkid, idx;
402 	dmu_buf_impl_t *dbf;
403 	uint32_t i;
404 
405 	blkid = db->db_blkid;
406 	ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
407 	idx = db->db_hash & h->hash_table_mask;
408 
409 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
410 	for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
411 	    dbf = dbf->db_hash_next, i++) {
412 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
413 			mutex_enter(&dbf->db_mtx);
414 			if (dbf->db_state != DB_EVICTING) {
415 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
416 				return (dbf);
417 			}
418 			mutex_exit(&dbf->db_mtx);
419 		}
420 	}
421 
422 	if (i > 0) {
423 		DBUF_STAT_BUMP(hash_collisions);
424 		if (i == 1)
425 			DBUF_STAT_BUMP(hash_chains);
426 
427 		DBUF_STAT_MAX(hash_chain_max, i);
428 	}
429 
430 	mutex_enter(&db->db_mtx);
431 	db->db_hash_next = h->hash_table[idx];
432 	h->hash_table[idx] = db;
433 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
434 	uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
435 	DBUF_STAT_MAX(hash_elements_max, he);
436 
437 	return (NULL);
438 }
439 
440 /*
441  * This returns whether this dbuf should be stored in the metadata cache, which
442  * is based on whether it's from one of the dnode types that store data related
443  * to traversing dataset hierarchies.
444  */
445 static boolean_t
446 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
447 {
448 	DB_DNODE_ENTER(db);
449 	dmu_object_type_t type = DB_DNODE(db)->dn_type;
450 	DB_DNODE_EXIT(db);
451 
452 	/* Check if this dbuf is one of the types we care about */
453 	if (DMU_OT_IS_METADATA_CACHED(type)) {
454 		/* If we hit this, then we set something up wrong in dmu_ot */
455 		ASSERT(DMU_OT_IS_METADATA(type));
456 
457 		/*
458 		 * Sanity check for small-memory systems: don't allocate too
459 		 * much memory for this purpose.
460 		 */
461 		if (zfs_refcount_count(
462 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
463 		    dbuf_metadata_cache_target_bytes()) {
464 			DBUF_STAT_BUMP(metadata_cache_overflow);
465 			return (B_FALSE);
466 		}
467 
468 		return (B_TRUE);
469 	}
470 
471 	return (B_FALSE);
472 }
473 
474 /*
475  * Remove an entry from the hash table.  It must be in the EVICTING state.
476  */
477 static void
478 dbuf_hash_remove(dmu_buf_impl_t *db)
479 {
480 	dbuf_hash_table_t *h = &dbuf_hash_table;
481 	uint64_t idx;
482 	dmu_buf_impl_t *dbf, **dbp;
483 
484 	ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
485 	    db->db_blkid), ==, db->db_hash);
486 	idx = db->db_hash & h->hash_table_mask;
487 
488 	/*
489 	 * We mustn't hold db_mtx to maintain lock ordering:
490 	 * DBUF_HASH_MUTEX > db_mtx.
491 	 */
492 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
493 	ASSERT(db->db_state == DB_EVICTING);
494 	ASSERT(!MUTEX_HELD(&db->db_mtx));
495 
496 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
497 	dbp = &h->hash_table[idx];
498 	while ((dbf = *dbp) != db) {
499 		dbp = &dbf->db_hash_next;
500 		ASSERT(dbf != NULL);
501 	}
502 	*dbp = db->db_hash_next;
503 	db->db_hash_next = NULL;
504 	if (h->hash_table[idx] &&
505 	    h->hash_table[idx]->db_hash_next == NULL)
506 		DBUF_STAT_BUMPDOWN(hash_chains);
507 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
508 	atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
509 }
510 
511 typedef enum {
512 	DBVU_EVICTING,
513 	DBVU_NOT_EVICTING
514 } dbvu_verify_type_t;
515 
516 static void
517 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
518 {
519 #ifdef ZFS_DEBUG
520 	int64_t holds;
521 
522 	if (db->db_user == NULL)
523 		return;
524 
525 	/* Only data blocks support the attachment of user data. */
526 	ASSERT(db->db_level == 0);
527 
528 	/* Clients must resolve a dbuf before attaching user data. */
529 	ASSERT(db->db.db_data != NULL);
530 	ASSERT3U(db->db_state, ==, DB_CACHED);
531 
532 	holds = zfs_refcount_count(&db->db_holds);
533 	if (verify_type == DBVU_EVICTING) {
534 		/*
535 		 * Immediate eviction occurs when holds == dirtycnt.
536 		 * For normal eviction buffers, holds is zero on
537 		 * eviction, except when dbuf_fix_old_data() calls
538 		 * dbuf_clear_data().  However, the hold count can grow
539 		 * during eviction even though db_mtx is held (see
540 		 * dmu_bonus_hold() for an example), so we can only
541 		 * test the generic invariant that holds >= dirtycnt.
542 		 */
543 		ASSERT3U(holds, >=, db->db_dirtycnt);
544 	} else {
545 		if (db->db_user_immediate_evict == TRUE)
546 			ASSERT3U(holds, >=, db->db_dirtycnt);
547 		else
548 			ASSERT3U(holds, >, 0);
549 	}
550 #endif
551 }
552 
553 static void
554 dbuf_evict_user(dmu_buf_impl_t *db)
555 {
556 	dmu_buf_user_t *dbu = db->db_user;
557 
558 	ASSERT(MUTEX_HELD(&db->db_mtx));
559 
560 	if (dbu == NULL)
561 		return;
562 
563 	dbuf_verify_user(db, DBVU_EVICTING);
564 	db->db_user = NULL;
565 
566 #ifdef ZFS_DEBUG
567 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
568 		*dbu->dbu_clear_on_evict_dbufp = NULL;
569 #endif
570 
571 	if (db->db_caching_status != DB_NO_CACHE) {
572 		/*
573 		 * This is a cached dbuf, so the size of the user data is
574 		 * included in its cached amount. We adjust it here because the
575 		 * user data has already been detached from the dbuf, and the
576 		 * sync functions are not supposed to touch it (the dbuf might
577 		 * not exist anymore by the time the sync functions run.
578 		 */
579 		uint64_t size = dbu->dbu_size;
580 		(void) zfs_refcount_remove_many(
581 		    &dbuf_caches[db->db_caching_status].size, size, dbu);
582 		if (db->db_caching_status == DB_DBUF_CACHE)
583 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size);
584 	}
585 
586 	/*
587 	 * There are two eviction callbacks - one that we call synchronously
588 	 * and one that we invoke via a taskq.  The async one is useful for
589 	 * avoiding lock order reversals and limiting stack depth.
590 	 *
591 	 * Note that if we have a sync callback but no async callback,
592 	 * it's likely that the sync callback will free the structure
593 	 * containing the dbu.  In that case we need to take care to not
594 	 * dereference dbu after calling the sync evict func.
595 	 */
596 	boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
597 
598 	if (dbu->dbu_evict_func_sync != NULL)
599 		dbu->dbu_evict_func_sync(dbu);
600 
601 	if (has_async) {
602 		taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
603 		    dbu, 0, &dbu->dbu_tqent);
604 	}
605 }
606 
607 boolean_t
608 dbuf_is_metadata(dmu_buf_impl_t *db)
609 {
610 	/*
611 	 * Consider indirect blocks and spill blocks to be meta data.
612 	 */
613 	if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
614 		return (B_TRUE);
615 	} else {
616 		boolean_t is_metadata;
617 
618 		DB_DNODE_ENTER(db);
619 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
620 		DB_DNODE_EXIT(db);
621 
622 		return (is_metadata);
623 	}
624 }
625 
626 /*
627  * We want to exclude buffers that are on a special allocation class from
628  * L2ARC.
629  */
630 boolean_t
631 dbuf_is_l2cacheable(dmu_buf_impl_t *db, blkptr_t *bp)
632 {
633 	if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
634 	    (db->db_objset->os_secondary_cache ==
635 	    ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
636 		if (l2arc_exclude_special == 0)
637 			return (B_TRUE);
638 
639 		/*
640 		 * bp must be checked in the event it was passed from
641 		 * dbuf_read_impl() as the result of a the BP being set from
642 		 * a Direct I/O write in dbuf_read(). See comments in
643 		 * dbuf_read().
644 		 */
645 		blkptr_t *db_bp = bp == NULL ? db->db_blkptr : bp;
646 
647 		if (db_bp == NULL || BP_IS_HOLE(db_bp))
648 			return (B_FALSE);
649 		uint64_t vdev = DVA_GET_VDEV(db_bp->blk_dva);
650 		vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
651 		vdev_t *vd = NULL;
652 
653 		if (vdev < rvd->vdev_children)
654 			vd = rvd->vdev_child[vdev];
655 
656 		if (vd == NULL)
657 			return (B_TRUE);
658 
659 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
660 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
661 			return (B_TRUE);
662 	}
663 	return (B_FALSE);
664 }
665 
666 static inline boolean_t
667 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
668 {
669 	if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
670 	    (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
671 	    (level > 0 ||
672 	    DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
673 		if (l2arc_exclude_special == 0)
674 			return (B_TRUE);
675 
676 		if (bp == NULL || BP_IS_HOLE(bp))
677 			return (B_FALSE);
678 		uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
679 		vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
680 		vdev_t *vd = NULL;
681 
682 		if (vdev < rvd->vdev_children)
683 			vd = rvd->vdev_child[vdev];
684 
685 		if (vd == NULL)
686 			return (B_TRUE);
687 
688 		if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
689 		    vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
690 			return (B_TRUE);
691 	}
692 	return (B_FALSE);
693 }
694 
695 
696 /*
697  * This function *must* return indices evenly distributed between all
698  * sublists of the multilist. This is needed due to how the dbuf eviction
699  * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
700  * distributed between all sublists and uses this assumption when
701  * deciding which sublist to evict from and how much to evict from it.
702  */
703 static unsigned int
704 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
705 {
706 	dmu_buf_impl_t *db = obj;
707 
708 	/*
709 	 * The assumption here, is the hash value for a given
710 	 * dmu_buf_impl_t will remain constant throughout it's lifetime
711 	 * (i.e. it's objset, object, level and blkid fields don't change).
712 	 * Thus, we don't need to store the dbuf's sublist index
713 	 * on insertion, as this index can be recalculated on removal.
714 	 *
715 	 * Also, the low order bits of the hash value are thought to be
716 	 * distributed evenly. Otherwise, in the case that the multilist
717 	 * has a power of two number of sublists, each sublists' usage
718 	 * would not be evenly distributed. In this context full 64bit
719 	 * division would be a waste of time, so limit it to 32 bits.
720 	 */
721 	return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
722 	    db->db_level, db->db_blkid) %
723 	    multilist_get_num_sublists(ml));
724 }
725 
726 /*
727  * The target size of the dbuf cache can grow with the ARC target,
728  * unless limited by the tunable dbuf_cache_max_bytes.
729  */
730 static inline unsigned long
731 dbuf_cache_target_bytes(void)
732 {
733 	return (MIN(dbuf_cache_max_bytes,
734 	    arc_target_bytes() >> dbuf_cache_shift));
735 }
736 
737 /*
738  * The target size of the dbuf metadata cache can grow with the ARC target,
739  * unless limited by the tunable dbuf_metadata_cache_max_bytes.
740  */
741 static inline unsigned long
742 dbuf_metadata_cache_target_bytes(void)
743 {
744 	return (MIN(dbuf_metadata_cache_max_bytes,
745 	    arc_target_bytes() >> dbuf_metadata_cache_shift));
746 }
747 
748 static inline uint64_t
749 dbuf_cache_hiwater_bytes(void)
750 {
751 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
752 	return (dbuf_cache_target +
753 	    (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
754 }
755 
756 static inline uint64_t
757 dbuf_cache_lowater_bytes(void)
758 {
759 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
760 	return (dbuf_cache_target -
761 	    (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
762 }
763 
764 static inline boolean_t
765 dbuf_cache_above_lowater(void)
766 {
767 	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
768 	    dbuf_cache_lowater_bytes());
769 }
770 
771 /*
772  * Evict the oldest eligible dbuf from the dbuf cache.
773  */
774 static void
775 dbuf_evict_one(void)
776 {
777 	int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
778 	multilist_sublist_t *mls = multilist_sublist_lock_idx(
779 	    &dbuf_caches[DB_DBUF_CACHE].cache, idx);
780 
781 	ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
782 
783 	dmu_buf_impl_t *db = multilist_sublist_tail(mls);
784 	while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
785 		db = multilist_sublist_prev(mls, db);
786 	}
787 
788 	DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
789 	    multilist_sublist_t *, mls);
790 
791 	if (db != NULL) {
792 		multilist_sublist_remove(mls, db);
793 		multilist_sublist_unlock(mls);
794 		uint64_t size = db->db.db_size;
795 		uint64_t usize = dmu_buf_user_size(&db->db);
796 		(void) zfs_refcount_remove_many(
797 		    &dbuf_caches[DB_DBUF_CACHE].size, size, db);
798 		(void) zfs_refcount_remove_many(
799 		    &dbuf_caches[DB_DBUF_CACHE].size, usize, db->db_user);
800 		DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
801 		DBUF_STAT_BUMPDOWN(cache_count);
802 		DBUF_STAT_DECR(cache_levels_bytes[db->db_level], size + usize);
803 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
804 		db->db_caching_status = DB_NO_CACHE;
805 		dbuf_destroy(db);
806 		DBUF_STAT_BUMP(cache_total_evicts);
807 	} else {
808 		multilist_sublist_unlock(mls);
809 	}
810 }
811 
812 /*
813  * The dbuf evict thread is responsible for aging out dbufs from the
814  * cache. Once the cache has reached it's maximum size, dbufs are removed
815  * and destroyed. The eviction thread will continue running until the size
816  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
817  * out of the cache it is destroyed and becomes eligible for arc eviction.
818  */
819 static __attribute__((noreturn)) void
820 dbuf_evict_thread(void *unused)
821 {
822 	(void) unused;
823 	callb_cpr_t cpr;
824 
825 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
826 
827 	mutex_enter(&dbuf_evict_lock);
828 	while (!dbuf_evict_thread_exit) {
829 		while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
830 			CALLB_CPR_SAFE_BEGIN(&cpr);
831 			(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
832 			    &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
833 			CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
834 		}
835 		mutex_exit(&dbuf_evict_lock);
836 
837 		/*
838 		 * Keep evicting as long as we're above the low water mark
839 		 * for the cache. We do this without holding the locks to
840 		 * minimize lock contention.
841 		 */
842 		while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
843 			dbuf_evict_one();
844 		}
845 
846 		mutex_enter(&dbuf_evict_lock);
847 	}
848 
849 	dbuf_evict_thread_exit = B_FALSE;
850 	cv_broadcast(&dbuf_evict_cv);
851 	CALLB_CPR_EXIT(&cpr);	/* drops dbuf_evict_lock */
852 	thread_exit();
853 }
854 
855 /*
856  * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
857  * If the dbuf cache is at its high water mark, then evict a dbuf from the
858  * dbuf cache using the caller's context.
859  */
860 static void
861 dbuf_evict_notify(uint64_t size)
862 {
863 	/*
864 	 * We check if we should evict without holding the dbuf_evict_lock,
865 	 * because it's OK to occasionally make the wrong decision here,
866 	 * and grabbing the lock results in massive lock contention.
867 	 */
868 	if (size > dbuf_cache_target_bytes()) {
869 		if (size > dbuf_cache_hiwater_bytes())
870 			dbuf_evict_one();
871 		cv_signal(&dbuf_evict_cv);
872 	}
873 }
874 
875 static int
876 dbuf_kstat_update(kstat_t *ksp, int rw)
877 {
878 	dbuf_stats_t *ds = ksp->ks_data;
879 	dbuf_hash_table_t *h = &dbuf_hash_table;
880 
881 	if (rw == KSTAT_WRITE)
882 		return (SET_ERROR(EACCES));
883 
884 	ds->cache_count.value.ui64 =
885 	    wmsum_value(&dbuf_sums.cache_count);
886 	ds->cache_size_bytes.value.ui64 =
887 	    zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
888 	ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
889 	ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
890 	ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
891 	ds->cache_total_evicts.value.ui64 =
892 	    wmsum_value(&dbuf_sums.cache_total_evicts);
893 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
894 		ds->cache_levels[i].value.ui64 =
895 		    wmsum_value(&dbuf_sums.cache_levels[i]);
896 		ds->cache_levels_bytes[i].value.ui64 =
897 		    wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
898 	}
899 	ds->hash_hits.value.ui64 =
900 	    wmsum_value(&dbuf_sums.hash_hits);
901 	ds->hash_misses.value.ui64 =
902 	    wmsum_value(&dbuf_sums.hash_misses);
903 	ds->hash_collisions.value.ui64 =
904 	    wmsum_value(&dbuf_sums.hash_collisions);
905 	ds->hash_chains.value.ui64 =
906 	    wmsum_value(&dbuf_sums.hash_chains);
907 	ds->hash_insert_race.value.ui64 =
908 	    wmsum_value(&dbuf_sums.hash_insert_race);
909 	ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
910 	ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
911 	ds->metadata_cache_count.value.ui64 =
912 	    wmsum_value(&dbuf_sums.metadata_cache_count);
913 	ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
914 	    &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
915 	ds->metadata_cache_overflow.value.ui64 =
916 	    wmsum_value(&dbuf_sums.metadata_cache_overflow);
917 	return (0);
918 }
919 
920 void
921 dbuf_init(void)
922 {
923 	uint64_t hmsize, hsize = 1ULL << 16;
924 	dbuf_hash_table_t *h = &dbuf_hash_table;
925 
926 	/*
927 	 * The hash table is big enough to fill one eighth of physical memory
928 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
929 	 * By default, the table will take up
930 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
931 	 */
932 	while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
933 		hsize <<= 1;
934 
935 	h->hash_table = NULL;
936 	while (h->hash_table == NULL) {
937 		h->hash_table_mask = hsize - 1;
938 
939 		h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
940 		if (h->hash_table == NULL)
941 			hsize >>= 1;
942 
943 		ASSERT3U(hsize, >=, 1ULL << 10);
944 	}
945 
946 	/*
947 	 * The hash table buckets are protected by an array of mutexes where
948 	 * each mutex is reponsible for protecting 128 buckets.  A minimum
949 	 * array size of 8192 is targeted to avoid contention.
950 	 */
951 	if (dbuf_mutex_cache_shift == 0)
952 		hmsize = MAX(hsize >> 7, 1ULL << 13);
953 	else
954 		hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
955 
956 	h->hash_mutexes = NULL;
957 	while (h->hash_mutexes == NULL) {
958 		h->hash_mutex_mask = hmsize - 1;
959 
960 		h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
961 		    KM_SLEEP);
962 		if (h->hash_mutexes == NULL)
963 			hmsize >>= 1;
964 	}
965 
966 	dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
967 	    sizeof (dmu_buf_impl_t),
968 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
969 
970 	for (int i = 0; i < hmsize; i++)
971 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_NOLOCKDEP, NULL);
972 
973 	dbuf_stats_init(h);
974 
975 	/*
976 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
977 	 * configuration is not required.
978 	 */
979 	dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
980 
981 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
982 		multilist_create(&dbuf_caches[dcs].cache,
983 		    sizeof (dmu_buf_impl_t),
984 		    offsetof(dmu_buf_impl_t, db_cache_link),
985 		    dbuf_cache_multilist_index_func);
986 		zfs_refcount_create(&dbuf_caches[dcs].size);
987 	}
988 
989 	dbuf_evict_thread_exit = B_FALSE;
990 	mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
991 	cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
992 	dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
993 	    NULL, 0, &p0, TS_RUN, minclsyspri);
994 
995 	wmsum_init(&dbuf_sums.cache_count, 0);
996 	wmsum_init(&dbuf_sums.cache_total_evicts, 0);
997 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
998 		wmsum_init(&dbuf_sums.cache_levels[i], 0);
999 		wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
1000 	}
1001 	wmsum_init(&dbuf_sums.hash_hits, 0);
1002 	wmsum_init(&dbuf_sums.hash_misses, 0);
1003 	wmsum_init(&dbuf_sums.hash_collisions, 0);
1004 	wmsum_init(&dbuf_sums.hash_chains, 0);
1005 	wmsum_init(&dbuf_sums.hash_insert_race, 0);
1006 	wmsum_init(&dbuf_sums.metadata_cache_count, 0);
1007 	wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
1008 
1009 	dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
1010 	    KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
1011 	    KSTAT_FLAG_VIRTUAL);
1012 	if (dbuf_ksp != NULL) {
1013 		for (int i = 0; i < DN_MAX_LEVELS; i++) {
1014 			snprintf(dbuf_stats.cache_levels[i].name,
1015 			    KSTAT_STRLEN, "cache_level_%d", i);
1016 			dbuf_stats.cache_levels[i].data_type =
1017 			    KSTAT_DATA_UINT64;
1018 			snprintf(dbuf_stats.cache_levels_bytes[i].name,
1019 			    KSTAT_STRLEN, "cache_level_%d_bytes", i);
1020 			dbuf_stats.cache_levels_bytes[i].data_type =
1021 			    KSTAT_DATA_UINT64;
1022 		}
1023 		dbuf_ksp->ks_data = &dbuf_stats;
1024 		dbuf_ksp->ks_update = dbuf_kstat_update;
1025 		kstat_install(dbuf_ksp);
1026 	}
1027 }
1028 
1029 void
1030 dbuf_fini(void)
1031 {
1032 	dbuf_hash_table_t *h = &dbuf_hash_table;
1033 
1034 	dbuf_stats_destroy();
1035 
1036 	for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1037 		mutex_destroy(&h->hash_mutexes[i]);
1038 
1039 	vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1040 	vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1041 	    sizeof (kmutex_t));
1042 
1043 	kmem_cache_destroy(dbuf_kmem_cache);
1044 	taskq_destroy(dbu_evict_taskq);
1045 
1046 	mutex_enter(&dbuf_evict_lock);
1047 	dbuf_evict_thread_exit = B_TRUE;
1048 	while (dbuf_evict_thread_exit) {
1049 		cv_signal(&dbuf_evict_cv);
1050 		cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1051 	}
1052 	mutex_exit(&dbuf_evict_lock);
1053 
1054 	mutex_destroy(&dbuf_evict_lock);
1055 	cv_destroy(&dbuf_evict_cv);
1056 
1057 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1058 		zfs_refcount_destroy(&dbuf_caches[dcs].size);
1059 		multilist_destroy(&dbuf_caches[dcs].cache);
1060 	}
1061 
1062 	if (dbuf_ksp != NULL) {
1063 		kstat_delete(dbuf_ksp);
1064 		dbuf_ksp = NULL;
1065 	}
1066 
1067 	wmsum_fini(&dbuf_sums.cache_count);
1068 	wmsum_fini(&dbuf_sums.cache_total_evicts);
1069 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
1070 		wmsum_fini(&dbuf_sums.cache_levels[i]);
1071 		wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1072 	}
1073 	wmsum_fini(&dbuf_sums.hash_hits);
1074 	wmsum_fini(&dbuf_sums.hash_misses);
1075 	wmsum_fini(&dbuf_sums.hash_collisions);
1076 	wmsum_fini(&dbuf_sums.hash_chains);
1077 	wmsum_fini(&dbuf_sums.hash_insert_race);
1078 	wmsum_fini(&dbuf_sums.metadata_cache_count);
1079 	wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1080 }
1081 
1082 /*
1083  * Other stuff.
1084  */
1085 
1086 #ifdef ZFS_DEBUG
1087 static void
1088 dbuf_verify(dmu_buf_impl_t *db)
1089 {
1090 	dnode_t *dn;
1091 	dbuf_dirty_record_t *dr;
1092 	uint32_t txg_prev;
1093 
1094 	ASSERT(MUTEX_HELD(&db->db_mtx));
1095 
1096 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1097 		return;
1098 
1099 	ASSERT(db->db_objset != NULL);
1100 	DB_DNODE_ENTER(db);
1101 	dn = DB_DNODE(db);
1102 	if (dn == NULL) {
1103 		ASSERT(db->db_parent == NULL);
1104 		ASSERT(db->db_blkptr == NULL);
1105 	} else {
1106 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
1107 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
1108 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
1109 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1110 		    db->db_blkid == DMU_SPILL_BLKID ||
1111 		    !avl_is_empty(&dn->dn_dbufs));
1112 	}
1113 	if (db->db_blkid == DMU_BONUS_BLKID) {
1114 		ASSERT(dn != NULL);
1115 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1116 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1117 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
1118 		ASSERT(dn != NULL);
1119 		ASSERT0(db->db.db_offset);
1120 	} else {
1121 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1122 	}
1123 
1124 	if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1125 		ASSERT(dr->dr_dbuf == db);
1126 		txg_prev = dr->dr_txg;
1127 		for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1128 		    dr = list_next(&db->db_dirty_records, dr)) {
1129 			ASSERT(dr->dr_dbuf == db);
1130 			ASSERT(txg_prev > dr->dr_txg);
1131 			txg_prev = dr->dr_txg;
1132 		}
1133 	}
1134 
1135 	/*
1136 	 * We can't assert that db_size matches dn_datablksz because it
1137 	 * can be momentarily different when another thread is doing
1138 	 * dnode_set_blksz().
1139 	 */
1140 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1141 		dr = db->db_data_pending;
1142 		/*
1143 		 * It should only be modified in syncing context, so
1144 		 * make sure we only have one copy of the data.
1145 		 */
1146 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1147 	}
1148 
1149 	/* verify db->db_blkptr */
1150 	if (db->db_blkptr) {
1151 		if (db->db_parent == dn->dn_dbuf) {
1152 			/* db is pointed to by the dnode */
1153 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1154 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1155 				ASSERT(db->db_parent == NULL);
1156 			else
1157 				ASSERT(db->db_parent != NULL);
1158 			if (db->db_blkid != DMU_SPILL_BLKID)
1159 				ASSERT3P(db->db_blkptr, ==,
1160 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
1161 		} else {
1162 			/* db is pointed to by an indirect block */
1163 			int epb __maybe_unused = db->db_parent->db.db_size >>
1164 			    SPA_BLKPTRSHIFT;
1165 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1166 			ASSERT3U(db->db_parent->db.db_object, ==,
1167 			    db->db.db_object);
1168 			/*
1169 			 * dnode_grow_indblksz() can make this fail if we don't
1170 			 * have the parent's rwlock.  XXX indblksz no longer
1171 			 * grows.  safe to do this now?
1172 			 */
1173 			if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1174 				ASSERT3P(db->db_blkptr, ==,
1175 				    ((blkptr_t *)db->db_parent->db.db_data +
1176 				    db->db_blkid % epb));
1177 			}
1178 		}
1179 	}
1180 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1181 	    (db->db_buf == NULL || db->db_buf->b_data) &&
1182 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1183 	    db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1184 		/*
1185 		 * If the blkptr isn't set but they have nonzero data,
1186 		 * it had better be dirty, otherwise we'll lose that
1187 		 * data when we evict this buffer.
1188 		 *
1189 		 * There is an exception to this rule for indirect blocks; in
1190 		 * this case, if the indirect block is a hole, we fill in a few
1191 		 * fields on each of the child blocks (importantly, birth time)
1192 		 * to prevent hole birth times from being lost when you
1193 		 * partially fill in a hole.
1194 		 */
1195 		if (db->db_dirtycnt == 0) {
1196 			if (db->db_level == 0) {
1197 				uint64_t *buf = db->db.db_data;
1198 				int i;
1199 
1200 				for (i = 0; i < db->db.db_size >> 3; i++) {
1201 					ASSERT(buf[i] == 0);
1202 				}
1203 			} else {
1204 				blkptr_t *bps = db->db.db_data;
1205 				ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1206 				    db->db.db_size);
1207 				/*
1208 				 * We want to verify that all the blkptrs in the
1209 				 * indirect block are holes, but we may have
1210 				 * automatically set up a few fields for them.
1211 				 * We iterate through each blkptr and verify
1212 				 * they only have those fields set.
1213 				 */
1214 				for (int i = 0;
1215 				    i < db->db.db_size / sizeof (blkptr_t);
1216 				    i++) {
1217 					blkptr_t *bp = &bps[i];
1218 					ASSERT(ZIO_CHECKSUM_IS_ZERO(
1219 					    &bp->blk_cksum));
1220 					ASSERT(
1221 					    DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1222 					    DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1223 					    DVA_IS_EMPTY(&bp->blk_dva[2]));
1224 					ASSERT0(bp->blk_fill);
1225 					ASSERT0(bp->blk_pad[0]);
1226 					ASSERT0(bp->blk_pad[1]);
1227 					ASSERT(!BP_IS_EMBEDDED(bp));
1228 					ASSERT(BP_IS_HOLE(bp));
1229 					ASSERT0(BP_GET_PHYSICAL_BIRTH(bp));
1230 				}
1231 			}
1232 		}
1233 	}
1234 	DB_DNODE_EXIT(db);
1235 }
1236 #endif
1237 
1238 static void
1239 dbuf_clear_data(dmu_buf_impl_t *db)
1240 {
1241 	ASSERT(MUTEX_HELD(&db->db_mtx));
1242 	dbuf_evict_user(db);
1243 	ASSERT3P(db->db_buf, ==, NULL);
1244 	db->db.db_data = NULL;
1245 	if (db->db_state != DB_NOFILL) {
1246 		db->db_state = DB_UNCACHED;
1247 		DTRACE_SET_STATE(db, "clear data");
1248 	}
1249 }
1250 
1251 static void
1252 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1253 {
1254 	ASSERT(MUTEX_HELD(&db->db_mtx));
1255 	ASSERT(buf != NULL);
1256 
1257 	db->db_buf = buf;
1258 	ASSERT(buf->b_data != NULL);
1259 	db->db.db_data = buf->b_data;
1260 }
1261 
1262 static arc_buf_t *
1263 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1264 {
1265 	spa_t *spa = db->db_objset->os_spa;
1266 
1267 	return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1268 }
1269 
1270 /*
1271  * Loan out an arc_buf for read.  Return the loaned arc_buf.
1272  */
1273 arc_buf_t *
1274 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1275 {
1276 	arc_buf_t *abuf;
1277 
1278 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1279 	mutex_enter(&db->db_mtx);
1280 	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1281 		int blksz = db->db.db_size;
1282 		spa_t *spa = db->db_objset->os_spa;
1283 
1284 		mutex_exit(&db->db_mtx);
1285 		abuf = arc_loan_buf(spa, B_FALSE, blksz);
1286 		memcpy(abuf->b_data, db->db.db_data, blksz);
1287 	} else {
1288 		abuf = db->db_buf;
1289 		arc_loan_inuse_buf(abuf, db);
1290 		db->db_buf = NULL;
1291 		dbuf_clear_data(db);
1292 		mutex_exit(&db->db_mtx);
1293 	}
1294 	return (abuf);
1295 }
1296 
1297 /*
1298  * Calculate which level n block references the data at the level 0 offset
1299  * provided.
1300  */
1301 uint64_t
1302 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1303 {
1304 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1305 		/*
1306 		 * The level n blkid is equal to the level 0 blkid divided by
1307 		 * the number of level 0s in a level n block.
1308 		 *
1309 		 * The level 0 blkid is offset >> datablkshift =
1310 		 * offset / 2^datablkshift.
1311 		 *
1312 		 * The number of level 0s in a level n is the number of block
1313 		 * pointers in an indirect block, raised to the power of level.
1314 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1315 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1316 		 *
1317 		 * Thus, the level n blkid is: offset /
1318 		 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1319 		 * = offset / 2^(datablkshift + level *
1320 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1321 		 * = offset >> (datablkshift + level *
1322 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1323 		 */
1324 
1325 		const unsigned exp = dn->dn_datablkshift +
1326 		    level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1327 
1328 		if (exp >= 8 * sizeof (offset)) {
1329 			/* This only happens on the highest indirection level */
1330 			ASSERT3U(level, ==, dn->dn_nlevels - 1);
1331 			return (0);
1332 		}
1333 
1334 		ASSERT3U(exp, <, 8 * sizeof (offset));
1335 
1336 		return (offset >> exp);
1337 	} else {
1338 		ASSERT3U(offset, <, dn->dn_datablksz);
1339 		return (0);
1340 	}
1341 }
1342 
1343 /*
1344  * This function is used to lock the parent of the provided dbuf. This should be
1345  * used when modifying or reading db_blkptr.
1346  */
1347 db_lock_type_t
1348 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1349 {
1350 	enum db_lock_type ret = DLT_NONE;
1351 	if (db->db_parent != NULL) {
1352 		rw_enter(&db->db_parent->db_rwlock, rw);
1353 		ret = DLT_PARENT;
1354 	} else if (dmu_objset_ds(db->db_objset) != NULL) {
1355 		rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1356 		    tag);
1357 		ret = DLT_OBJSET;
1358 	}
1359 	/*
1360 	 * We only return a DLT_NONE lock when it's the top-most indirect block
1361 	 * of the meta-dnode of the MOS.
1362 	 */
1363 	return (ret);
1364 }
1365 
1366 /*
1367  * We need to pass the lock type in because it's possible that the block will
1368  * move from being the topmost indirect block in a dnode (and thus, have no
1369  * parent) to not the top-most via an indirection increase. This would cause a
1370  * panic if we didn't pass the lock type in.
1371  */
1372 void
1373 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1374 {
1375 	if (type == DLT_PARENT)
1376 		rw_exit(&db->db_parent->db_rwlock);
1377 	else if (type == DLT_OBJSET)
1378 		rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1379 }
1380 
1381 static void
1382 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1383     arc_buf_t *buf, void *vdb)
1384 {
1385 	(void) zb, (void) bp;
1386 	dmu_buf_impl_t *db = vdb;
1387 
1388 	mutex_enter(&db->db_mtx);
1389 	ASSERT3U(db->db_state, ==, DB_READ);
1390 
1391 	/*
1392 	 * All reads are synchronous, so we must have a hold on the dbuf
1393 	 */
1394 	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1395 	ASSERT(db->db_buf == NULL);
1396 	ASSERT(db->db.db_data == NULL);
1397 	if (buf == NULL) {
1398 		/* i/o error */
1399 		ASSERT(zio == NULL || zio->io_error != 0);
1400 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1401 		ASSERT3P(db->db_buf, ==, NULL);
1402 		db->db_state = DB_UNCACHED;
1403 		DTRACE_SET_STATE(db, "i/o error");
1404 	} else if (db->db_level == 0 && db->db_freed_in_flight) {
1405 		/* freed in flight */
1406 		ASSERT(zio == NULL || zio->io_error == 0);
1407 		arc_release(buf, db);
1408 		memset(buf->b_data, 0, db->db.db_size);
1409 		arc_buf_freeze(buf);
1410 		db->db_freed_in_flight = FALSE;
1411 		dbuf_set_data(db, buf);
1412 		db->db_state = DB_CACHED;
1413 		DTRACE_SET_STATE(db, "freed in flight");
1414 	} else {
1415 		/* success */
1416 		ASSERT(zio == NULL || zio->io_error == 0);
1417 		dbuf_set_data(db, buf);
1418 		db->db_state = DB_CACHED;
1419 		DTRACE_SET_STATE(db, "successful read");
1420 	}
1421 	cv_broadcast(&db->db_changed);
1422 	dbuf_rele_and_unlock(db, NULL, B_FALSE);
1423 }
1424 
1425 /*
1426  * Shortcut for performing reads on bonus dbufs.  Returns
1427  * an error if we fail to verify the dnode associated with
1428  * a decrypted block. Otherwise success.
1429  */
1430 static int
1431 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn)
1432 {
1433 	int bonuslen, max_bonuslen;
1434 
1435 	bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1436 	max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1437 	ASSERT(MUTEX_HELD(&db->db_mtx));
1438 	ASSERT(DB_DNODE_HELD(db));
1439 	ASSERT3U(bonuslen, <=, db->db.db_size);
1440 	db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1441 	arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1442 	if (bonuslen < max_bonuslen)
1443 		memset(db->db.db_data, 0, max_bonuslen);
1444 	if (bonuslen)
1445 		memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1446 	db->db_state = DB_CACHED;
1447 	DTRACE_SET_STATE(db, "bonus buffer filled");
1448 	return (0);
1449 }
1450 
1451 static void
1452 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1453 {
1454 	blkptr_t *bps = db->db.db_data;
1455 	uint32_t indbs = 1ULL << dn->dn_indblkshift;
1456 	int n_bps = indbs >> SPA_BLKPTRSHIFT;
1457 
1458 	for (int i = 0; i < n_bps; i++) {
1459 		blkptr_t *bp = &bps[i];
1460 
1461 		ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1462 		BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1463 		    dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1464 		BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1465 		BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1466 		BP_SET_BIRTH(bp, BP_GET_LOGICAL_BIRTH(dbbp), 0);
1467 	}
1468 }
1469 
1470 /*
1471  * Handle reads on dbufs that are holes, if necessary.  This function
1472  * requires that the dbuf's mutex is held. Returns success (0) if action
1473  * was taken, ENOENT if no action was taken.
1474  */
1475 static int
1476 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1477 {
1478 	ASSERT(MUTEX_HELD(&db->db_mtx));
1479 
1480 	int is_hole = bp == NULL || BP_IS_HOLE(bp);
1481 	/*
1482 	 * For level 0 blocks only, if the above check fails:
1483 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1484 	 * processes the delete record and clears the bp while we are waiting
1485 	 * for the dn_mtx (resulting in a "no" from block_freed).
1486 	 */
1487 	if (!is_hole && db->db_level == 0)
1488 		is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1489 
1490 	if (is_hole) {
1491 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1492 		memset(db->db.db_data, 0, db->db.db_size);
1493 
1494 		if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1495 		    BP_GET_LOGICAL_BIRTH(bp) != 0) {
1496 			dbuf_handle_indirect_hole(db, dn, bp);
1497 		}
1498 		db->db_state = DB_CACHED;
1499 		DTRACE_SET_STATE(db, "hole read satisfied");
1500 		return (0);
1501 	}
1502 	return (ENOENT);
1503 }
1504 
1505 /*
1506  * This function ensures that, when doing a decrypting read of a block,
1507  * we make sure we have decrypted the dnode associated with it. We must do
1508  * this so that we ensure we are fully authenticating the checksum-of-MACs
1509  * tree from the root of the objset down to this block. Indirect blocks are
1510  * always verified against their secure checksum-of-MACs assuming that the
1511  * dnode containing them is correct. Now that we are doing a decrypting read,
1512  * we can be sure that the key is loaded and verify that assumption. This is
1513  * especially important considering that we always read encrypted dnode
1514  * blocks as raw data (without verifying their MACs) to start, and
1515  * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1516  */
1517 static int
1518 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1519 {
1520 	objset_t *os = db->db_objset;
1521 	dmu_buf_impl_t *dndb;
1522 	arc_buf_t *dnbuf;
1523 	zbookmark_phys_t zb;
1524 	int err;
1525 
1526 	if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1527 	    !os->os_encrypted || os->os_raw_receive ||
1528 	    (dndb = dn->dn_dbuf) == NULL)
1529 		return (0);
1530 
1531 	dnbuf = dndb->db_buf;
1532 	if (!arc_is_encrypted(dnbuf))
1533 		return (0);
1534 
1535 	mutex_enter(&dndb->db_mtx);
1536 
1537 	/*
1538 	 * Since dnode buffer is modified by sync process, there can be only
1539 	 * one copy of it.  It means we can not modify (decrypt) it while it
1540 	 * is being written.  I don't see how this may happen now, since
1541 	 * encrypted dnode writes by receive should be completed before any
1542 	 * plain-text reads due to txg wait, but better be safe than sorry.
1543 	 */
1544 	while (1) {
1545 		if (!arc_is_encrypted(dnbuf)) {
1546 			mutex_exit(&dndb->db_mtx);
1547 			return (0);
1548 		}
1549 		dbuf_dirty_record_t *dr = dndb->db_data_pending;
1550 		if (dr == NULL || dr->dt.dl.dr_data != dnbuf)
1551 			break;
1552 		cv_wait(&dndb->db_changed, &dndb->db_mtx);
1553 	};
1554 
1555 	SET_BOOKMARK(&zb, dmu_objset_id(os),
1556 	    DMU_META_DNODE_OBJECT, 0, dndb->db_blkid);
1557 	err = arc_untransform(dnbuf, os->os_spa, &zb, B_TRUE);
1558 
1559 	/*
1560 	 * An error code of EACCES tells us that the key is still not
1561 	 * available. This is ok if we are only reading authenticated
1562 	 * (and therefore non-encrypted) blocks.
1563 	 */
1564 	if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1565 	    !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1566 	    (db->db_blkid == DMU_BONUS_BLKID &&
1567 	    !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1568 		err = 0;
1569 
1570 	mutex_exit(&dndb->db_mtx);
1571 
1572 	return (err);
1573 }
1574 
1575 /*
1576  * Drops db_mtx and the parent lock specified by dblt and tag before
1577  * returning.
1578  */
1579 static int
1580 dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, uint32_t flags,
1581     db_lock_type_t dblt, blkptr_t *bp, const void *tag)
1582 {
1583 	zbookmark_phys_t zb;
1584 	uint32_t aflags = ARC_FLAG_NOWAIT;
1585 	int err, zio_flags;
1586 
1587 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1588 	ASSERT(MUTEX_HELD(&db->db_mtx));
1589 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1590 	ASSERT(db->db_buf == NULL);
1591 	ASSERT(db->db_parent == NULL ||
1592 	    RW_LOCK_HELD(&db->db_parent->db_rwlock));
1593 
1594 	if (db->db_blkid == DMU_BONUS_BLKID) {
1595 		err = dbuf_read_bonus(db, dn);
1596 		goto early_unlock;
1597 	}
1598 
1599 	err = dbuf_read_hole(db, dn, bp);
1600 	if (err == 0)
1601 		goto early_unlock;
1602 
1603 	ASSERT(bp != NULL);
1604 
1605 	/*
1606 	 * Any attempt to read a redacted block should result in an error. This
1607 	 * will never happen under normal conditions, but can be useful for
1608 	 * debugging purposes.
1609 	 */
1610 	if (BP_IS_REDACTED(bp)) {
1611 		ASSERT(dsl_dataset_feature_is_active(
1612 		    db->db_objset->os_dsl_dataset,
1613 		    SPA_FEATURE_REDACTED_DATASETS));
1614 		err = SET_ERROR(EIO);
1615 		goto early_unlock;
1616 	}
1617 
1618 	SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1619 	    db->db.db_object, db->db_level, db->db_blkid);
1620 
1621 	/*
1622 	 * All bps of an encrypted os should have the encryption bit set.
1623 	 * If this is not true it indicates tampering and we report an error.
1624 	 */
1625 	if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bp)) {
1626 		spa_log_error(db->db_objset->os_spa, &zb,
1627 		    BP_GET_LOGICAL_BIRTH(bp));
1628 		err = SET_ERROR(EIO);
1629 		goto early_unlock;
1630 	}
1631 
1632 	db->db_state = DB_READ;
1633 	DTRACE_SET_STATE(db, "read issued");
1634 	mutex_exit(&db->db_mtx);
1635 
1636 	if (!DBUF_IS_CACHEABLE(db))
1637 		aflags |= ARC_FLAG_UNCACHED;
1638 	else if (dbuf_is_l2cacheable(db, bp))
1639 		aflags |= ARC_FLAG_L2CACHE;
1640 
1641 	dbuf_add_ref(db, NULL);
1642 
1643 	zio_flags = (flags & DB_RF_CANFAIL) ?
1644 	    ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1645 
1646 	if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(bp))
1647 		zio_flags |= ZIO_FLAG_RAW;
1648 
1649 	/*
1650 	 * The zio layer will copy the provided blkptr later, but we need to
1651 	 * do this now so that we can release the parent's rwlock. We have to
1652 	 * do that now so that if dbuf_read_done is called synchronously (on
1653 	 * an l1 cache hit) we don't acquire the db_mtx while holding the
1654 	 * parent's rwlock, which would be a lock ordering violation.
1655 	 */
1656 	blkptr_t copy = *bp;
1657 	dmu_buf_unlock_parent(db, dblt, tag);
1658 	return (arc_read(zio, db->db_objset->os_spa, &copy,
1659 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1660 	    &aflags, &zb));
1661 
1662 early_unlock:
1663 	mutex_exit(&db->db_mtx);
1664 	dmu_buf_unlock_parent(db, dblt, tag);
1665 	return (err);
1666 }
1667 
1668 /*
1669  * This is our just-in-time copy function.  It makes a copy of buffers that
1670  * have been modified in a previous transaction group before we access them in
1671  * the current active group.
1672  *
1673  * This function is used in three places: when we are dirtying a buffer for the
1674  * first time in a txg, when we are freeing a range in a dnode that includes
1675  * this buffer, and when we are accessing a buffer which was received compressed
1676  * and later referenced in a WRITE_BYREF record.
1677  *
1678  * Note that when we are called from dbuf_free_range() we do not put a hold on
1679  * the buffer, we just traverse the active dbuf list for the dnode.
1680  */
1681 static void
1682 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1683 {
1684 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1685 
1686 	ASSERT(MUTEX_HELD(&db->db_mtx));
1687 	ASSERT(db->db.db_data != NULL);
1688 	ASSERT(db->db_level == 0);
1689 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1690 
1691 	if (dr == NULL ||
1692 	    (dr->dt.dl.dr_data !=
1693 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1694 		return;
1695 
1696 	/*
1697 	 * If the last dirty record for this dbuf has not yet synced
1698 	 * and its referencing the dbuf data, either:
1699 	 *	reset the reference to point to a new copy,
1700 	 * or (if there a no active holders)
1701 	 *	just null out the current db_data pointer.
1702 	 */
1703 	ASSERT3U(dr->dr_txg, >=, txg - 2);
1704 	if (db->db_blkid == DMU_BONUS_BLKID) {
1705 		dnode_t *dn = DB_DNODE(db);
1706 		int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1707 		dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1708 		arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1709 		memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1710 	} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1711 		dnode_t *dn = DB_DNODE(db);
1712 		int size = arc_buf_size(db->db_buf);
1713 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1714 		spa_t *spa = db->db_objset->os_spa;
1715 		enum zio_compress compress_type =
1716 		    arc_get_compression(db->db_buf);
1717 		uint8_t complevel = arc_get_complevel(db->db_buf);
1718 
1719 		if (arc_is_encrypted(db->db_buf)) {
1720 			boolean_t byteorder;
1721 			uint8_t salt[ZIO_DATA_SALT_LEN];
1722 			uint8_t iv[ZIO_DATA_IV_LEN];
1723 			uint8_t mac[ZIO_DATA_MAC_LEN];
1724 
1725 			arc_get_raw_params(db->db_buf, &byteorder, salt,
1726 			    iv, mac);
1727 			dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1728 			    dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1729 			    mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1730 			    compress_type, complevel);
1731 		} else if (compress_type != ZIO_COMPRESS_OFF) {
1732 			ASSERT3U(type, ==, ARC_BUFC_DATA);
1733 			dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1734 			    size, arc_buf_lsize(db->db_buf), compress_type,
1735 			    complevel);
1736 		} else {
1737 			dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1738 		}
1739 		memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1740 	} else {
1741 		db->db_buf = NULL;
1742 		dbuf_clear_data(db);
1743 	}
1744 }
1745 
1746 int
1747 dbuf_read(dmu_buf_impl_t *db, zio_t *pio, uint32_t flags)
1748 {
1749 	dnode_t *dn;
1750 	boolean_t miss = B_TRUE, need_wait = B_FALSE, prefetch;
1751 	int err;
1752 
1753 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1754 
1755 	DB_DNODE_ENTER(db);
1756 	dn = DB_DNODE(db);
1757 
1758 	/*
1759 	 * Ensure that this block's dnode has been decrypted if the caller
1760 	 * has requested decrypted data.
1761 	 */
1762 	err = dbuf_read_verify_dnode_crypt(db, dn, flags);
1763 	if (err != 0)
1764 		goto done;
1765 
1766 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1767 	    (flags & DB_RF_NOPREFETCH) == 0;
1768 
1769 	mutex_enter(&db->db_mtx);
1770 	if (flags & DB_RF_PARTIAL_FIRST)
1771 		db->db_partial_read = B_TRUE;
1772 	else if (!(flags & DB_RF_PARTIAL_MORE))
1773 		db->db_partial_read = B_FALSE;
1774 	miss = (db->db_state != DB_CACHED);
1775 
1776 	if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1777 		/*
1778 		 * Another reader came in while the dbuf was in flight between
1779 		 * UNCACHED and CACHED.  Either a writer will finish filling
1780 		 * the buffer, sending the dbuf to CACHED, or the first reader's
1781 		 * request will reach the read_done callback and send the dbuf
1782 		 * to CACHED.  Otherwise, a failure occurred and the dbuf will
1783 		 * be sent to UNCACHED.
1784 		 */
1785 		if (flags & DB_RF_NEVERWAIT) {
1786 			mutex_exit(&db->db_mtx);
1787 			DB_DNODE_EXIT(db);
1788 			goto done;
1789 		}
1790 		do {
1791 			ASSERT(db->db_state == DB_READ ||
1792 			    (flags & DB_RF_HAVESTRUCT) == 0);
1793 			DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *, db,
1794 			    zio_t *, pio);
1795 			cv_wait(&db->db_changed, &db->db_mtx);
1796 		} while (db->db_state == DB_READ || db->db_state == DB_FILL);
1797 		if (db->db_state == DB_UNCACHED) {
1798 			err = SET_ERROR(EIO);
1799 			mutex_exit(&db->db_mtx);
1800 			DB_DNODE_EXIT(db);
1801 			goto done;
1802 		}
1803 	}
1804 
1805 	if (db->db_state == DB_CACHED) {
1806 		/*
1807 		 * If the arc buf is compressed or encrypted and the caller
1808 		 * requested uncompressed data, we need to untransform it
1809 		 * before returning. We also call arc_untransform() on any
1810 		 * unauthenticated blocks, which will verify their MAC if
1811 		 * the key is now available.
1812 		 */
1813 		if ((flags & DB_RF_NO_DECRYPT) == 0 && db->db_buf != NULL &&
1814 		    (arc_is_encrypted(db->db_buf) ||
1815 		    arc_is_unauthenticated(db->db_buf) ||
1816 		    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1817 			spa_t *spa = dn->dn_objset->os_spa;
1818 			zbookmark_phys_t zb;
1819 
1820 			SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1821 			    db->db.db_object, db->db_level, db->db_blkid);
1822 			dbuf_fix_old_data(db, spa_syncing_txg(spa));
1823 			err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1824 			dbuf_set_data(db, db->db_buf);
1825 		}
1826 		mutex_exit(&db->db_mtx);
1827 	} else {
1828 		ASSERT(db->db_state == DB_UNCACHED ||
1829 		    db->db_state == DB_NOFILL);
1830 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1831 		blkptr_t *bp;
1832 
1833 		/*
1834 		 * If a block clone or Direct I/O write has occurred we will
1835 		 * get the dirty records overridden BP so we get the most
1836 		 * recent data.
1837 		 */
1838 		err = dmu_buf_get_bp_from_dbuf(db, &bp);
1839 
1840 		if (!err) {
1841 			if (pio == NULL && (db->db_state == DB_NOFILL ||
1842 			    (bp != NULL && !BP_IS_HOLE(bp)))) {
1843 				spa_t *spa = dn->dn_objset->os_spa;
1844 				pio =
1845 				    zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1846 				need_wait = B_TRUE;
1847 			}
1848 
1849 			err =
1850 			    dbuf_read_impl(db, dn, pio, flags, dblt, bp, FTAG);
1851 		} else {
1852 			mutex_exit(&db->db_mtx);
1853 			dmu_buf_unlock_parent(db, dblt, FTAG);
1854 		}
1855 		/* dbuf_read_impl drops db_mtx and parent's rwlock. */
1856 		miss = (db->db_state != DB_CACHED);
1857 	}
1858 
1859 	if (err == 0 && prefetch) {
1860 		dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE, miss,
1861 		    flags & DB_RF_HAVESTRUCT);
1862 	}
1863 	DB_DNODE_EXIT(db);
1864 
1865 	/*
1866 	 * If we created a zio we must execute it to avoid leaking it, even if
1867 	 * it isn't attached to any work due to an error in dbuf_read_impl().
1868 	 */
1869 	if (need_wait) {
1870 		if (err == 0)
1871 			err = zio_wait(pio);
1872 		else
1873 			(void) zio_wait(pio);
1874 		pio = NULL;
1875 	}
1876 
1877 done:
1878 	if (miss)
1879 		DBUF_STAT_BUMP(hash_misses);
1880 	else
1881 		DBUF_STAT_BUMP(hash_hits);
1882 	if (pio && err != 0) {
1883 		zio_t *zio = zio_null(pio, pio->io_spa, NULL, NULL, NULL,
1884 		    ZIO_FLAG_CANFAIL);
1885 		zio->io_error = err;
1886 		zio_nowait(zio);
1887 	}
1888 
1889 	return (err);
1890 }
1891 
1892 static void
1893 dbuf_noread(dmu_buf_impl_t *db)
1894 {
1895 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1896 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1897 	mutex_enter(&db->db_mtx);
1898 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1899 		cv_wait(&db->db_changed, &db->db_mtx);
1900 	if (db->db_state == DB_UNCACHED) {
1901 		ASSERT(db->db_buf == NULL);
1902 		ASSERT(db->db.db_data == NULL);
1903 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1904 		db->db_state = DB_FILL;
1905 		DTRACE_SET_STATE(db, "assigning filled buffer");
1906 	} else if (db->db_state == DB_NOFILL) {
1907 		dbuf_clear_data(db);
1908 	} else {
1909 		ASSERT3U(db->db_state, ==, DB_CACHED);
1910 	}
1911 	mutex_exit(&db->db_mtx);
1912 }
1913 
1914 void
1915 dbuf_unoverride(dbuf_dirty_record_t *dr)
1916 {
1917 	dmu_buf_impl_t *db = dr->dr_dbuf;
1918 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1919 	uint64_t txg = dr->dr_txg;
1920 
1921 	ASSERT(MUTEX_HELD(&db->db_mtx));
1922 
1923 	/*
1924 	 * This assert is valid because dmu_sync() expects to be called by
1925 	 * a zilog's get_data while holding a range lock.  This call only
1926 	 * comes from dbuf_dirty() callers who must also hold a range lock.
1927 	 */
1928 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1929 	ASSERT(db->db_level == 0);
1930 
1931 	if (db->db_blkid == DMU_BONUS_BLKID ||
1932 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1933 		return;
1934 
1935 	ASSERT(db->db_data_pending != dr);
1936 
1937 	/* free this block */
1938 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1939 		zio_free(db->db_objset->os_spa, txg, bp);
1940 
1941 	if (dr->dt.dl.dr_brtwrite || dr->dt.dl.dr_diowrite) {
1942 		ASSERT0P(dr->dt.dl.dr_data);
1943 		dr->dt.dl.dr_data = db->db_buf;
1944 	}
1945 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1946 	dr->dt.dl.dr_nopwrite = B_FALSE;
1947 	dr->dt.dl.dr_brtwrite = B_FALSE;
1948 	dr->dt.dl.dr_diowrite = B_FALSE;
1949 	dr->dt.dl.dr_has_raw_params = B_FALSE;
1950 
1951 	/*
1952 	 * In the event that Direct I/O was used, we do not
1953 	 * need to release the buffer from the ARC.
1954 	 *
1955 	 * Release the already-written buffer, so we leave it in
1956 	 * a consistent dirty state.  Note that all callers are
1957 	 * modifying the buffer, so they will immediately do
1958 	 * another (redundant) arc_release().  Therefore, leave
1959 	 * the buf thawed to save the effort of freezing &
1960 	 * immediately re-thawing it.
1961 	 */
1962 	if (dr->dt.dl.dr_data)
1963 		arc_release(dr->dt.dl.dr_data, db);
1964 }
1965 
1966 /*
1967  * Evict (if its unreferenced) or clear (if its referenced) any level-0
1968  * data blocks in the free range, so that any future readers will find
1969  * empty blocks.
1970  */
1971 void
1972 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1973     dmu_tx_t *tx)
1974 {
1975 	dmu_buf_impl_t *db_search;
1976 	dmu_buf_impl_t *db, *db_next;
1977 	uint64_t txg = tx->tx_txg;
1978 	avl_index_t where;
1979 	dbuf_dirty_record_t *dr;
1980 
1981 	if (end_blkid > dn->dn_maxblkid &&
1982 	    !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1983 		end_blkid = dn->dn_maxblkid;
1984 	dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1985 	    (u_longlong_t)end_blkid);
1986 
1987 	db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1988 	db_search->db_level = 0;
1989 	db_search->db_blkid = start_blkid;
1990 	db_search->db_state = DB_SEARCH;
1991 
1992 	mutex_enter(&dn->dn_dbufs_mtx);
1993 	db = avl_find(&dn->dn_dbufs, db_search, &where);
1994 	ASSERT3P(db, ==, NULL);
1995 
1996 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1997 
1998 	for (; db != NULL; db = db_next) {
1999 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
2000 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2001 
2002 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
2003 			break;
2004 		}
2005 		ASSERT3U(db->db_blkid, >=, start_blkid);
2006 
2007 		/* found a level 0 buffer in the range */
2008 		mutex_enter(&db->db_mtx);
2009 		if (dbuf_undirty(db, tx)) {
2010 			/* mutex has been dropped and dbuf destroyed */
2011 			continue;
2012 		}
2013 
2014 		if (db->db_state == DB_UNCACHED ||
2015 		    db->db_state == DB_NOFILL ||
2016 		    db->db_state == DB_EVICTING) {
2017 			ASSERT(db->db.db_data == NULL);
2018 			mutex_exit(&db->db_mtx);
2019 			continue;
2020 		}
2021 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2022 			/* will be handled in dbuf_read_done or dbuf_rele */
2023 			db->db_freed_in_flight = TRUE;
2024 			mutex_exit(&db->db_mtx);
2025 			continue;
2026 		}
2027 		if (zfs_refcount_count(&db->db_holds) == 0) {
2028 			ASSERT(db->db_buf);
2029 			dbuf_destroy(db);
2030 			continue;
2031 		}
2032 		/* The dbuf is referenced */
2033 
2034 		dr = list_head(&db->db_dirty_records);
2035 		if (dr != NULL) {
2036 			if (dr->dr_txg == txg) {
2037 				/*
2038 				 * This buffer is "in-use", re-adjust the file
2039 				 * size to reflect that this buffer may
2040 				 * contain new data when we sync.
2041 				 */
2042 				if (db->db_blkid != DMU_SPILL_BLKID &&
2043 				    db->db_blkid > dn->dn_maxblkid)
2044 					dn->dn_maxblkid = db->db_blkid;
2045 				dbuf_unoverride(dr);
2046 			} else {
2047 				/*
2048 				 * This dbuf is not dirty in the open context.
2049 				 * Either uncache it (if its not referenced in
2050 				 * the open context) or reset its contents to
2051 				 * empty.
2052 				 */
2053 				dbuf_fix_old_data(db, txg);
2054 			}
2055 		}
2056 		/* clear the contents if its cached */
2057 		if (db->db_state == DB_CACHED) {
2058 			ASSERT(db->db.db_data != NULL);
2059 			arc_release(db->db_buf, db);
2060 			rw_enter(&db->db_rwlock, RW_WRITER);
2061 			memset(db->db.db_data, 0, db->db.db_size);
2062 			rw_exit(&db->db_rwlock);
2063 			arc_buf_freeze(db->db_buf);
2064 		}
2065 
2066 		mutex_exit(&db->db_mtx);
2067 	}
2068 
2069 	mutex_exit(&dn->dn_dbufs_mtx);
2070 	kmem_free(db_search, sizeof (dmu_buf_impl_t));
2071 }
2072 
2073 void
2074 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2075 {
2076 	arc_buf_t *buf, *old_buf;
2077 	dbuf_dirty_record_t *dr;
2078 	int osize = db->db.db_size;
2079 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2080 	dnode_t *dn;
2081 
2082 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2083 
2084 	DB_DNODE_ENTER(db);
2085 	dn = DB_DNODE(db);
2086 
2087 	/*
2088 	 * XXX we should be doing a dbuf_read, checking the return
2089 	 * value and returning that up to our callers
2090 	 */
2091 	dmu_buf_will_dirty(&db->db, tx);
2092 
2093 	VERIFY3P(db->db_buf, !=, NULL);
2094 
2095 	/* create the data buffer for the new block */
2096 	buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2097 
2098 	/* copy old block data to the new block */
2099 	old_buf = db->db_buf;
2100 	memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2101 	/* zero the remainder */
2102 	if (size > osize)
2103 		memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2104 
2105 	mutex_enter(&db->db_mtx);
2106 	dbuf_set_data(db, buf);
2107 	arc_buf_destroy(old_buf, db);
2108 	db->db.db_size = size;
2109 
2110 	dr = list_head(&db->db_dirty_records);
2111 	/* dirty record added by dmu_buf_will_dirty() */
2112 	VERIFY(dr != NULL);
2113 	if (db->db_level == 0)
2114 		dr->dt.dl.dr_data = buf;
2115 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2116 	ASSERT3U(dr->dr_accounted, ==, osize);
2117 	dr->dr_accounted = size;
2118 	mutex_exit(&db->db_mtx);
2119 
2120 	dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2121 	DB_DNODE_EXIT(db);
2122 }
2123 
2124 void
2125 dbuf_release_bp(dmu_buf_impl_t *db)
2126 {
2127 	objset_t *os __maybe_unused = db->db_objset;
2128 
2129 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2130 	ASSERT(arc_released(os->os_phys_buf) ||
2131 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
2132 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2133 
2134 	(void) arc_release(db->db_buf, db);
2135 }
2136 
2137 /*
2138  * We already have a dirty record for this TXG, and we are being
2139  * dirtied again.
2140  */
2141 static void
2142 dbuf_redirty(dbuf_dirty_record_t *dr)
2143 {
2144 	dmu_buf_impl_t *db = dr->dr_dbuf;
2145 
2146 	ASSERT(MUTEX_HELD(&db->db_mtx));
2147 
2148 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2149 		/*
2150 		 * If this buffer has already been written out,
2151 		 * we now need to reset its state.
2152 		 */
2153 		dbuf_unoverride(dr);
2154 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2155 		    db->db_state != DB_NOFILL) {
2156 			/* Already released on initial dirty, so just thaw. */
2157 			ASSERT(arc_released(db->db_buf));
2158 			arc_buf_thaw(db->db_buf);
2159 		}
2160 	}
2161 }
2162 
2163 dbuf_dirty_record_t *
2164 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2165 {
2166 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2167 	IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2168 	dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2169 	ASSERT(dn->dn_maxblkid >= blkid);
2170 
2171 	dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2172 	list_link_init(&dr->dr_dirty_node);
2173 	list_link_init(&dr->dr_dbuf_node);
2174 	dr->dr_dnode = dn;
2175 	dr->dr_txg = tx->tx_txg;
2176 	dr->dt.dll.dr_blkid = blkid;
2177 	dr->dr_accounted = dn->dn_datablksz;
2178 
2179 	/*
2180 	 * There should not be any dbuf for the block that we're dirtying.
2181 	 * Otherwise the buffer contents could be inconsistent between the
2182 	 * dbuf and the lightweight dirty record.
2183 	 */
2184 	ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2185 	    NULL));
2186 
2187 	mutex_enter(&dn->dn_mtx);
2188 	int txgoff = tx->tx_txg & TXG_MASK;
2189 	if (dn->dn_free_ranges[txgoff] != NULL) {
2190 		range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2191 	}
2192 
2193 	if (dn->dn_nlevels == 1) {
2194 		ASSERT3U(blkid, <, dn->dn_nblkptr);
2195 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2196 		mutex_exit(&dn->dn_mtx);
2197 		rw_exit(&dn->dn_struct_rwlock);
2198 		dnode_setdirty(dn, tx);
2199 	} else {
2200 		mutex_exit(&dn->dn_mtx);
2201 
2202 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2203 		dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2204 		    1, blkid >> epbs, FTAG);
2205 		rw_exit(&dn->dn_struct_rwlock);
2206 		if (parent_db == NULL) {
2207 			kmem_free(dr, sizeof (*dr));
2208 			return (NULL);
2209 		}
2210 		int err = dbuf_read(parent_db, NULL,
2211 		    (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2212 		if (err != 0) {
2213 			dbuf_rele(parent_db, FTAG);
2214 			kmem_free(dr, sizeof (*dr));
2215 			return (NULL);
2216 		}
2217 
2218 		dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2219 		dbuf_rele(parent_db, FTAG);
2220 		mutex_enter(&parent_dr->dt.di.dr_mtx);
2221 		ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2222 		list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2223 		mutex_exit(&parent_dr->dt.di.dr_mtx);
2224 		dr->dr_parent = parent_dr;
2225 	}
2226 
2227 	dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2228 
2229 	return (dr);
2230 }
2231 
2232 dbuf_dirty_record_t *
2233 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2234 {
2235 	dnode_t *dn;
2236 	objset_t *os;
2237 	dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2238 	int txgoff = tx->tx_txg & TXG_MASK;
2239 	boolean_t drop_struct_rwlock = B_FALSE;
2240 
2241 	ASSERT(tx->tx_txg != 0);
2242 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2243 	DMU_TX_DIRTY_BUF(tx, db);
2244 
2245 	DB_DNODE_ENTER(db);
2246 	dn = DB_DNODE(db);
2247 	/*
2248 	 * Shouldn't dirty a regular buffer in syncing context.  Private
2249 	 * objects may be dirtied in syncing context, but only if they
2250 	 * were already pre-dirtied in open context.
2251 	 */
2252 #ifdef ZFS_DEBUG
2253 	if (dn->dn_objset->os_dsl_dataset != NULL) {
2254 		rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2255 		    RW_READER, FTAG);
2256 	}
2257 	ASSERT(!dmu_tx_is_syncing(tx) ||
2258 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2259 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2260 	    dn->dn_objset->os_dsl_dataset == NULL);
2261 	if (dn->dn_objset->os_dsl_dataset != NULL)
2262 		rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2263 #endif
2264 	/*
2265 	 * We make this assert for private objects as well, but after we
2266 	 * check if we're already dirty.  They are allowed to re-dirty
2267 	 * in syncing context.
2268 	 */
2269 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2270 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2271 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2272 
2273 	mutex_enter(&db->db_mtx);
2274 	/*
2275 	 * XXX make this true for indirects too?  The problem is that
2276 	 * transactions created with dmu_tx_create_assigned() from
2277 	 * syncing context don't bother holding ahead.
2278 	 */
2279 	ASSERT(db->db_level != 0 ||
2280 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2281 	    db->db_state == DB_NOFILL);
2282 
2283 	mutex_enter(&dn->dn_mtx);
2284 	dnode_set_dirtyctx(dn, tx, db);
2285 	if (tx->tx_txg > dn->dn_dirty_txg)
2286 		dn->dn_dirty_txg = tx->tx_txg;
2287 	mutex_exit(&dn->dn_mtx);
2288 
2289 	if (db->db_blkid == DMU_SPILL_BLKID)
2290 		dn->dn_have_spill = B_TRUE;
2291 
2292 	/*
2293 	 * If this buffer is already dirty, we're done.
2294 	 */
2295 	dr_head = list_head(&db->db_dirty_records);
2296 	ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2297 	    db->db.db_object == DMU_META_DNODE_OBJECT);
2298 	dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2299 	if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2300 		DB_DNODE_EXIT(db);
2301 
2302 		dbuf_redirty(dr_next);
2303 		mutex_exit(&db->db_mtx);
2304 		return (dr_next);
2305 	}
2306 
2307 	/*
2308 	 * Only valid if not already dirty.
2309 	 */
2310 	ASSERT(dn->dn_object == 0 ||
2311 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2312 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2313 
2314 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
2315 
2316 	/*
2317 	 * We should only be dirtying in syncing context if it's the
2318 	 * mos or we're initializing the os or it's a special object.
2319 	 * However, we are allowed to dirty in syncing context provided
2320 	 * we already dirtied it in open context.  Hence we must make
2321 	 * this assertion only if we're not already dirty.
2322 	 */
2323 	os = dn->dn_objset;
2324 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2325 #ifdef ZFS_DEBUG
2326 	if (dn->dn_objset->os_dsl_dataset != NULL)
2327 		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2328 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2329 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2330 	if (dn->dn_objset->os_dsl_dataset != NULL)
2331 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2332 #endif
2333 	ASSERT(db->db.db_size != 0);
2334 
2335 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2336 
2337 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2338 		dmu_objset_willuse_space(os, db->db.db_size, tx);
2339 	}
2340 
2341 	/*
2342 	 * If this buffer is dirty in an old transaction group we need
2343 	 * to make a copy of it so that the changes we make in this
2344 	 * transaction group won't leak out when we sync the older txg.
2345 	 */
2346 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2347 	list_link_init(&dr->dr_dirty_node);
2348 	list_link_init(&dr->dr_dbuf_node);
2349 	dr->dr_dnode = dn;
2350 	if (db->db_level == 0) {
2351 		void *data_old = db->db_buf;
2352 
2353 		if (db->db_state != DB_NOFILL) {
2354 			if (db->db_blkid == DMU_BONUS_BLKID) {
2355 				dbuf_fix_old_data(db, tx->tx_txg);
2356 				data_old = db->db.db_data;
2357 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2358 				/*
2359 				 * Release the data buffer from the cache so
2360 				 * that we can modify it without impacting
2361 				 * possible other users of this cached data
2362 				 * block.  Note that indirect blocks and
2363 				 * private objects are not released until the
2364 				 * syncing state (since they are only modified
2365 				 * then).
2366 				 */
2367 				arc_release(db->db_buf, db);
2368 				dbuf_fix_old_data(db, tx->tx_txg);
2369 				data_old = db->db_buf;
2370 			}
2371 			ASSERT(data_old != NULL);
2372 		}
2373 		dr->dt.dl.dr_data = data_old;
2374 	} else {
2375 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2376 		list_create(&dr->dt.di.dr_children,
2377 		    sizeof (dbuf_dirty_record_t),
2378 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
2379 	}
2380 	if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2381 		dr->dr_accounted = db->db.db_size;
2382 	}
2383 	dr->dr_dbuf = db;
2384 	dr->dr_txg = tx->tx_txg;
2385 	list_insert_before(&db->db_dirty_records, dr_next, dr);
2386 
2387 	/*
2388 	 * We could have been freed_in_flight between the dbuf_noread
2389 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
2390 	 * happened after the free.
2391 	 */
2392 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2393 	    db->db_blkid != DMU_SPILL_BLKID) {
2394 		mutex_enter(&dn->dn_mtx);
2395 		if (dn->dn_free_ranges[txgoff] != NULL) {
2396 			range_tree_clear(dn->dn_free_ranges[txgoff],
2397 			    db->db_blkid, 1);
2398 		}
2399 		mutex_exit(&dn->dn_mtx);
2400 		db->db_freed_in_flight = FALSE;
2401 	}
2402 
2403 	/*
2404 	 * This buffer is now part of this txg
2405 	 */
2406 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2407 	db->db_dirtycnt += 1;
2408 	ASSERT3U(db->db_dirtycnt, <=, 3);
2409 
2410 	mutex_exit(&db->db_mtx);
2411 
2412 	if (db->db_blkid == DMU_BONUS_BLKID ||
2413 	    db->db_blkid == DMU_SPILL_BLKID) {
2414 		mutex_enter(&dn->dn_mtx);
2415 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2416 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2417 		mutex_exit(&dn->dn_mtx);
2418 		dnode_setdirty(dn, tx);
2419 		DB_DNODE_EXIT(db);
2420 		return (dr);
2421 	}
2422 
2423 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2424 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2425 		drop_struct_rwlock = B_TRUE;
2426 	}
2427 
2428 	/*
2429 	 * If we are overwriting a dedup BP, then unless it is snapshotted,
2430 	 * when we get to syncing context we will need to decrement its
2431 	 * refcount in the DDT.  Prefetch the relevant DDT block so that
2432 	 * syncing context won't have to wait for the i/o.
2433 	 */
2434 	if (db->db_blkptr != NULL) {
2435 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2436 		ddt_prefetch(os->os_spa, db->db_blkptr);
2437 		dmu_buf_unlock_parent(db, dblt, FTAG);
2438 	}
2439 
2440 	/*
2441 	 * We need to hold the dn_struct_rwlock to make this assertion,
2442 	 * because it protects dn_phys / dn_next_nlevels from changing.
2443 	 */
2444 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2445 	    dn->dn_phys->dn_nlevels > db->db_level ||
2446 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
2447 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2448 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2449 
2450 
2451 	if (db->db_level == 0) {
2452 		ASSERT(!db->db_objset->os_raw_receive ||
2453 		    dn->dn_maxblkid >= db->db_blkid);
2454 		dnode_new_blkid(dn, db->db_blkid, tx,
2455 		    drop_struct_rwlock, B_FALSE);
2456 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
2457 	}
2458 
2459 	if (db->db_level+1 < dn->dn_nlevels) {
2460 		dmu_buf_impl_t *parent = db->db_parent;
2461 		dbuf_dirty_record_t *di;
2462 		int parent_held = FALSE;
2463 
2464 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2465 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2466 			parent = dbuf_hold_level(dn, db->db_level + 1,
2467 			    db->db_blkid >> epbs, FTAG);
2468 			ASSERT(parent != NULL);
2469 			parent_held = TRUE;
2470 		}
2471 		if (drop_struct_rwlock)
2472 			rw_exit(&dn->dn_struct_rwlock);
2473 		ASSERT3U(db->db_level + 1, ==, parent->db_level);
2474 		di = dbuf_dirty(parent, tx);
2475 		if (parent_held)
2476 			dbuf_rele(parent, FTAG);
2477 
2478 		mutex_enter(&db->db_mtx);
2479 		/*
2480 		 * Since we've dropped the mutex, it's possible that
2481 		 * dbuf_undirty() might have changed this out from under us.
2482 		 */
2483 		if (list_head(&db->db_dirty_records) == dr ||
2484 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
2485 			mutex_enter(&di->dt.di.dr_mtx);
2486 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2487 			ASSERT(!list_link_active(&dr->dr_dirty_node));
2488 			list_insert_tail(&di->dt.di.dr_children, dr);
2489 			mutex_exit(&di->dt.di.dr_mtx);
2490 			dr->dr_parent = di;
2491 		}
2492 		mutex_exit(&db->db_mtx);
2493 	} else {
2494 		ASSERT(db->db_level + 1 == dn->dn_nlevels);
2495 		ASSERT(db->db_blkid < dn->dn_nblkptr);
2496 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2497 		mutex_enter(&dn->dn_mtx);
2498 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2499 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2500 		mutex_exit(&dn->dn_mtx);
2501 		if (drop_struct_rwlock)
2502 			rw_exit(&dn->dn_struct_rwlock);
2503 	}
2504 
2505 	dnode_setdirty(dn, tx);
2506 	DB_DNODE_EXIT(db);
2507 	return (dr);
2508 }
2509 
2510 static void
2511 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2512 {
2513 	dmu_buf_impl_t *db = dr->dr_dbuf;
2514 
2515 	if (dr->dt.dl.dr_data != db->db.db_data) {
2516 		struct dnode *dn = dr->dr_dnode;
2517 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2518 
2519 		kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2520 		arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2521 	}
2522 	db->db_data_pending = NULL;
2523 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2524 	list_remove(&db->db_dirty_records, dr);
2525 	if (dr->dr_dbuf->db_level != 0) {
2526 		mutex_destroy(&dr->dt.di.dr_mtx);
2527 		list_destroy(&dr->dt.di.dr_children);
2528 	}
2529 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2530 	ASSERT3U(db->db_dirtycnt, >, 0);
2531 	db->db_dirtycnt -= 1;
2532 }
2533 
2534 /*
2535  * Undirty a buffer in the transaction group referenced by the given
2536  * transaction.  Return whether this evicted the dbuf.
2537  */
2538 boolean_t
2539 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2540 {
2541 	uint64_t txg = tx->tx_txg;
2542 	boolean_t brtwrite;
2543 	boolean_t diowrite;
2544 
2545 	ASSERT(txg != 0);
2546 
2547 	/*
2548 	 * Due to our use of dn_nlevels below, this can only be called
2549 	 * in open context, unless we are operating on the MOS.
2550 	 * From syncing context, dn_nlevels may be different from the
2551 	 * dn_nlevels used when dbuf was dirtied.
2552 	 */
2553 	ASSERT(db->db_objset ==
2554 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2555 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2556 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2557 	ASSERT0(db->db_level);
2558 	ASSERT(MUTEX_HELD(&db->db_mtx));
2559 
2560 	/*
2561 	 * If this buffer is not dirty, we're done.
2562 	 */
2563 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2564 	if (dr == NULL)
2565 		return (B_FALSE);
2566 	ASSERT(dr->dr_dbuf == db);
2567 
2568 	brtwrite = dr->dt.dl.dr_brtwrite;
2569 	diowrite = dr->dt.dl.dr_diowrite;
2570 	if (brtwrite) {
2571 		ASSERT3B(diowrite, ==, B_FALSE);
2572 		/*
2573 		 * We are freeing a block that we cloned in the same
2574 		 * transaction group.
2575 		 */
2576 		brt_pending_remove(dmu_objset_spa(db->db_objset),
2577 		    &dr->dt.dl.dr_overridden_by, tx);
2578 	}
2579 
2580 	dnode_t *dn = dr->dr_dnode;
2581 
2582 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2583 
2584 	ASSERT(db->db.db_size != 0);
2585 
2586 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2587 	    dr->dr_accounted, txg);
2588 
2589 	list_remove(&db->db_dirty_records, dr);
2590 
2591 	/*
2592 	 * Note that there are three places in dbuf_dirty()
2593 	 * where this dirty record may be put on a list.
2594 	 * Make sure to do a list_remove corresponding to
2595 	 * every one of those list_insert calls.
2596 	 */
2597 	if (dr->dr_parent) {
2598 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2599 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2600 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2601 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
2602 	    db->db_level + 1 == dn->dn_nlevels) {
2603 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2604 		mutex_enter(&dn->dn_mtx);
2605 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2606 		mutex_exit(&dn->dn_mtx);
2607 	}
2608 
2609 	if (db->db_state != DB_NOFILL && !brtwrite) {
2610 		dbuf_unoverride(dr);
2611 
2612 		if (dr->dt.dl.dr_data != db->db_buf) {
2613 			ASSERT(db->db_buf != NULL);
2614 			ASSERT(dr->dt.dl.dr_data != NULL);
2615 			arc_buf_destroy(dr->dt.dl.dr_data, db);
2616 		}
2617 	}
2618 
2619 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2620 
2621 	ASSERT(db->db_dirtycnt > 0);
2622 	db->db_dirtycnt -= 1;
2623 
2624 	if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2625 		ASSERT(db->db_state == DB_NOFILL || brtwrite || diowrite ||
2626 		    arc_released(db->db_buf));
2627 		dbuf_destroy(db);
2628 		return (B_TRUE);
2629 	}
2630 
2631 	return (B_FALSE);
2632 }
2633 
2634 static void
2635 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2636 {
2637 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2638 	boolean_t undirty = B_FALSE;
2639 
2640 	ASSERT(tx->tx_txg != 0);
2641 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2642 
2643 	/*
2644 	 * Quick check for dirtiness to improve performance for some workloads
2645 	 * (e.g. file deletion with indirect blocks cached).
2646 	 */
2647 	mutex_enter(&db->db_mtx);
2648 	if (db->db_state == DB_CACHED || db->db_state == DB_NOFILL) {
2649 		/*
2650 		 * It's possible that the dbuf is already dirty but not cached,
2651 		 * because there are some calls to dbuf_dirty() that don't
2652 		 * go through dmu_buf_will_dirty().
2653 		 */
2654 		dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2655 		if (dr != NULL) {
2656 			if (db->db_level == 0 &&
2657 			    dr->dt.dl.dr_brtwrite) {
2658 				/*
2659 				 * Block cloning: If we are dirtying a cloned
2660 				 * level 0 block, we cannot simply redirty it,
2661 				 * because this dr has no associated data.
2662 				 * We will go through a full undirtying below,
2663 				 * before dirtying it again.
2664 				 */
2665 				undirty = B_TRUE;
2666 			} else {
2667 				/* This dbuf is already dirty and cached. */
2668 				dbuf_redirty(dr);
2669 				mutex_exit(&db->db_mtx);
2670 				return;
2671 			}
2672 		}
2673 	}
2674 	mutex_exit(&db->db_mtx);
2675 
2676 	DB_DNODE_ENTER(db);
2677 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2678 		flags |= DB_RF_HAVESTRUCT;
2679 	DB_DNODE_EXIT(db);
2680 
2681 	/*
2682 	 * Block cloning: Do the dbuf_read() before undirtying the dbuf, as we
2683 	 * want to make sure dbuf_read() will read the pending cloned block and
2684 	 * not the uderlying block that is being replaced. dbuf_undirty() will
2685 	 * do brt_pending_remove() before removing the dirty record.
2686 	 */
2687 	(void) dbuf_read(db, NULL, flags);
2688 	if (undirty) {
2689 		mutex_enter(&db->db_mtx);
2690 		VERIFY(!dbuf_undirty(db, tx));
2691 		mutex_exit(&db->db_mtx);
2692 	}
2693 	(void) dbuf_dirty(db, tx);
2694 }
2695 
2696 void
2697 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2698 {
2699 	dmu_buf_will_dirty_impl(db_fake,
2700 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2701 }
2702 
2703 boolean_t
2704 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2705 {
2706 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2707 	dbuf_dirty_record_t *dr;
2708 
2709 	mutex_enter(&db->db_mtx);
2710 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2711 	mutex_exit(&db->db_mtx);
2712 	return (dr != NULL);
2713 }
2714 
2715 /*
2716  * Normally the db_blkptr points to the most recent on-disk content for the
2717  * dbuf (and anything newer will be cached in the dbuf). However, a pending
2718  * block clone or not yet synced Direct I/O write will have a dirty record BP
2719  * pointing to the most recent data.
2720  */
2721 int
2722 dmu_buf_get_bp_from_dbuf(dmu_buf_impl_t *db, blkptr_t **bp)
2723 {
2724 	ASSERT(MUTEX_HELD(&db->db_mtx));
2725 	int error = 0;
2726 
2727 	if (db->db_level != 0) {
2728 		*bp = db->db_blkptr;
2729 		return (0);
2730 	}
2731 
2732 	*bp = db->db_blkptr;
2733 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2734 	if (dr && db->db_state == DB_NOFILL) {
2735 		/* Block clone */
2736 		if (!dr->dt.dl.dr_brtwrite)
2737 			error = EIO;
2738 		else
2739 			*bp = &dr->dt.dl.dr_overridden_by;
2740 	} else if (dr && db->db_state == DB_UNCACHED) {
2741 		/* Direct I/O write */
2742 		if (dr->dt.dl.dr_diowrite)
2743 			*bp = &dr->dt.dl.dr_overridden_by;
2744 	}
2745 
2746 	return (error);
2747 }
2748 
2749 /*
2750  * Direct I/O reads can read directly from the ARC, but the data has
2751  * to be untransformed in order to copy it over into user pages.
2752  */
2753 int
2754 dmu_buf_untransform_direct(dmu_buf_impl_t *db, spa_t *spa)
2755 {
2756 	int err = 0;
2757 	DB_DNODE_ENTER(db);
2758 	dnode_t *dn = DB_DNODE(db);
2759 
2760 	ASSERT3S(db->db_state, ==, DB_CACHED);
2761 	ASSERT(MUTEX_HELD(&db->db_mtx));
2762 
2763 	/*
2764 	 * Ensure that this block's dnode has been decrypted if
2765 	 * the caller has requested decrypted data.
2766 	 */
2767 	err = dbuf_read_verify_dnode_crypt(db, dn, 0);
2768 
2769 	/*
2770 	 * If the arc buf is compressed or encrypted and the caller
2771 	 * requested uncompressed data, we need to untransform it
2772 	 * before returning. We also call arc_untransform() on any
2773 	 * unauthenticated blocks, which will verify their MAC if
2774 	 * the key is now available.
2775 	 */
2776 	if (err == 0 && db->db_buf != NULL &&
2777 	    (arc_is_encrypted(db->db_buf) ||
2778 	    arc_is_unauthenticated(db->db_buf) ||
2779 	    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
2780 		zbookmark_phys_t zb;
2781 
2782 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
2783 		    db->db.db_object, db->db_level, db->db_blkid);
2784 		dbuf_fix_old_data(db, spa_syncing_txg(spa));
2785 		err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
2786 		dbuf_set_data(db, db->db_buf);
2787 	}
2788 	DB_DNODE_EXIT(db);
2789 	DBUF_STAT_BUMP(hash_hits);
2790 
2791 	return (err);
2792 }
2793 
2794 void
2795 dmu_buf_will_clone_or_dio(dmu_buf_t *db_fake, dmu_tx_t *tx)
2796 {
2797 	/*
2798 	 * Block clones and Direct I/O writes always happen in open-context.
2799 	 */
2800 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2801 	ASSERT0(db->db_level);
2802 	ASSERT(!dmu_tx_is_syncing(tx));
2803 	ASSERT0(db->db_level);
2804 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2805 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
2806 
2807 	mutex_enter(&db->db_mtx);
2808 	DBUF_VERIFY(db);
2809 
2810 	/*
2811 	 * We are going to clone or issue a Direct I/O write on this block, so
2812 	 * undirty modifications done to this block so far in this txg. This
2813 	 * includes writes and clones into this block.
2814 	 *
2815 	 * If there dirty record associated with this txg from a previous Direct
2816 	 * I/O write then space accounting cleanup takes place. It is important
2817 	 * to go ahead free up the space accounting through dbuf_undirty() ->
2818 	 * dbuf_unoverride() -> zio_free(). Space accountiung for determining
2819 	 * if a write can occur in zfs_write() happens through dmu_tx_assign().
2820 	 * This can cause an issue with Direct I/O writes in the case of
2821 	 * overwriting the same block, because all DVA allocations are being
2822 	 * done in open-context. Constantly allowing Direct I/O overwrites to
2823 	 * the same block can exhaust the pools available space leading to
2824 	 * ENOSPC errors at the DVA allocation part of the ZIO pipeline, which
2825 	 * will eventually suspend the pool. By cleaning up sapce acccounting
2826 	 * now, the ENOSPC error can be avoided.
2827 	 *
2828 	 * Since we are undirtying the record in open-context, we must have a
2829 	 * hold on the db, so it should never be evicted after calling
2830 	 * dbuf_undirty().
2831 	 */
2832 	VERIFY3B(dbuf_undirty(db, tx), ==, B_FALSE);
2833 	ASSERT0P(dbuf_find_dirty_eq(db, tx->tx_txg));
2834 
2835 	if (db->db_buf != NULL) {
2836 		/*
2837 		 * If there is an associated ARC buffer with this dbuf we can
2838 		 * only destroy it if the previous dirty record does not
2839 		 * reference it.
2840 		 */
2841 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2842 		if (dr == NULL || dr->dt.dl.dr_data != db->db_buf)
2843 			arc_buf_destroy(db->db_buf, db);
2844 
2845 		/*
2846 		 * Setting the dbuf's data pointers to NULL will force all
2847 		 * future reads down to the devices to get the most up to date
2848 		 * version of the data after a Direct I/O write has completed.
2849 		 */
2850 		db->db_buf = NULL;
2851 		dbuf_clear_data(db);
2852 	}
2853 
2854 	ASSERT3P(db->db_buf, ==, NULL);
2855 	ASSERT3P(db->db.db_data, ==, NULL);
2856 
2857 	db->db_state = DB_NOFILL;
2858 	DTRACE_SET_STATE(db,
2859 	    "allocating NOFILL buffer for clone or direct I/O write");
2860 
2861 	DBUF_VERIFY(db);
2862 	mutex_exit(&db->db_mtx);
2863 
2864 	dbuf_noread(db);
2865 	(void) dbuf_dirty(db, tx);
2866 }
2867 
2868 void
2869 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2870 {
2871 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2872 
2873 	mutex_enter(&db->db_mtx);
2874 	db->db_state = DB_NOFILL;
2875 	DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2876 	mutex_exit(&db->db_mtx);
2877 
2878 	dbuf_noread(db);
2879 	(void) dbuf_dirty(db, tx);
2880 }
2881 
2882 void
2883 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail)
2884 {
2885 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2886 
2887 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2888 	ASSERT(tx->tx_txg != 0);
2889 	ASSERT(db->db_level == 0);
2890 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2891 
2892 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2893 	    dmu_tx_private_ok(tx));
2894 
2895 	mutex_enter(&db->db_mtx);
2896 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2897 	if (db->db_state == DB_NOFILL ||
2898 	    (db->db_state == DB_UNCACHED && dr && dr->dt.dl.dr_diowrite)) {
2899 		/*
2900 		 * If the fill can fail we should have a way to return back to
2901 		 * the cloned or Direct I/O write data.
2902 		 */
2903 		if (canfail && dr) {
2904 			mutex_exit(&db->db_mtx);
2905 			dmu_buf_will_dirty(db_fake, tx);
2906 			return;
2907 		}
2908 		/*
2909 		 * Block cloning: We will be completely overwriting a block
2910 		 * cloned in this transaction group, so let's undirty the
2911 		 * pending clone and mark the block as uncached. This will be
2912 		 * as if the clone was never done.
2913 		 */
2914 		if (dr && dr->dt.dl.dr_brtwrite) {
2915 			VERIFY(!dbuf_undirty(db, tx));
2916 			db->db_state = DB_UNCACHED;
2917 		}
2918 	}
2919 	mutex_exit(&db->db_mtx);
2920 
2921 	dbuf_noread(db);
2922 	(void) dbuf_dirty(db, tx);
2923 }
2924 
2925 /*
2926  * This function is effectively the same as dmu_buf_will_dirty(), but
2927  * indicates the caller expects raw encrypted data in the db, and provides
2928  * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2929  * blkptr_t when this dbuf is written.  This is only used for blocks of
2930  * dnodes, during raw receive.
2931  */
2932 void
2933 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2934     const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2935 {
2936 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2937 	dbuf_dirty_record_t *dr;
2938 
2939 	/*
2940 	 * dr_has_raw_params is only processed for blocks of dnodes
2941 	 * (see dbuf_sync_dnode_leaf_crypt()).
2942 	 */
2943 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2944 	ASSERT3U(db->db_level, ==, 0);
2945 	ASSERT(db->db_objset->os_raw_receive);
2946 
2947 	dmu_buf_will_dirty_impl(db_fake,
2948 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2949 
2950 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2951 
2952 	ASSERT3P(dr, !=, NULL);
2953 
2954 	dr->dt.dl.dr_has_raw_params = B_TRUE;
2955 	dr->dt.dl.dr_byteorder = byteorder;
2956 	memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2957 	memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2958 	memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2959 }
2960 
2961 static void
2962 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2963 {
2964 	struct dirty_leaf *dl;
2965 	dbuf_dirty_record_t *dr;
2966 
2967 	dr = list_head(&db->db_dirty_records);
2968 	ASSERT3P(dr, !=, NULL);
2969 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2970 	dl = &dr->dt.dl;
2971 	dl->dr_overridden_by = *bp;
2972 	dl->dr_override_state = DR_OVERRIDDEN;
2973 	BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
2974 }
2975 
2976 boolean_t
2977 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed)
2978 {
2979 	(void) tx;
2980 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2981 	mutex_enter(&db->db_mtx);
2982 	DBUF_VERIFY(db);
2983 
2984 	if (db->db_state == DB_FILL) {
2985 		if (db->db_level == 0 && db->db_freed_in_flight) {
2986 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2987 			/* we were freed while filling */
2988 			/* XXX dbuf_undirty? */
2989 			memset(db->db.db_data, 0, db->db.db_size);
2990 			db->db_freed_in_flight = FALSE;
2991 			db->db_state = DB_CACHED;
2992 			DTRACE_SET_STATE(db,
2993 			    "fill done handling freed in flight");
2994 			failed = B_FALSE;
2995 		} else if (failed) {
2996 			VERIFY(!dbuf_undirty(db, tx));
2997 			arc_buf_destroy(db->db_buf, db);
2998 			db->db_buf = NULL;
2999 			dbuf_clear_data(db);
3000 			DTRACE_SET_STATE(db, "fill failed");
3001 		} else {
3002 			db->db_state = DB_CACHED;
3003 			DTRACE_SET_STATE(db, "fill done");
3004 		}
3005 		cv_broadcast(&db->db_changed);
3006 	} else {
3007 		db->db_state = DB_CACHED;
3008 		failed = B_FALSE;
3009 	}
3010 	mutex_exit(&db->db_mtx);
3011 	return (failed);
3012 }
3013 
3014 void
3015 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
3016     bp_embedded_type_t etype, enum zio_compress comp,
3017     int uncompressed_size, int compressed_size, int byteorder,
3018     dmu_tx_t *tx)
3019 {
3020 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
3021 	struct dirty_leaf *dl;
3022 	dmu_object_type_t type;
3023 	dbuf_dirty_record_t *dr;
3024 
3025 	if (etype == BP_EMBEDDED_TYPE_DATA) {
3026 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
3027 		    SPA_FEATURE_EMBEDDED_DATA));
3028 	}
3029 
3030 	DB_DNODE_ENTER(db);
3031 	type = DB_DNODE(db)->dn_type;
3032 	DB_DNODE_EXIT(db);
3033 
3034 	ASSERT0(db->db_level);
3035 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3036 
3037 	dmu_buf_will_not_fill(dbuf, tx);
3038 
3039 	dr = list_head(&db->db_dirty_records);
3040 	ASSERT3P(dr, !=, NULL);
3041 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
3042 	dl = &dr->dt.dl;
3043 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
3044 	    data, comp, uncompressed_size, compressed_size);
3045 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
3046 	BP_SET_TYPE(&dl->dr_overridden_by, type);
3047 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
3048 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
3049 
3050 	dl->dr_override_state = DR_OVERRIDDEN;
3051 	BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by, dr->dr_txg);
3052 }
3053 
3054 void
3055 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
3056 {
3057 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
3058 	dmu_object_type_t type;
3059 	ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
3060 	    SPA_FEATURE_REDACTED_DATASETS));
3061 
3062 	DB_DNODE_ENTER(db);
3063 	type = DB_DNODE(db)->dn_type;
3064 	DB_DNODE_EXIT(db);
3065 
3066 	ASSERT0(db->db_level);
3067 	dmu_buf_will_not_fill(dbuf, tx);
3068 
3069 	blkptr_t bp = { { { {0} } } };
3070 	BP_SET_TYPE(&bp, type);
3071 	BP_SET_LEVEL(&bp, 0);
3072 	BP_SET_BIRTH(&bp, tx->tx_txg, 0);
3073 	BP_SET_REDACTED(&bp);
3074 	BPE_SET_LSIZE(&bp, dbuf->db_size);
3075 
3076 	dbuf_override_impl(db, &bp, tx);
3077 }
3078 
3079 /*
3080  * Directly assign a provided arc buf to a given dbuf if it's not referenced
3081  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
3082  */
3083 void
3084 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
3085 {
3086 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
3087 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
3088 	ASSERT(db->db_level == 0);
3089 	ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
3090 	ASSERT(buf != NULL);
3091 	ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
3092 	ASSERT(tx->tx_txg != 0);
3093 
3094 	arc_return_buf(buf, db);
3095 	ASSERT(arc_released(buf));
3096 
3097 	mutex_enter(&db->db_mtx);
3098 
3099 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
3100 		cv_wait(&db->db_changed, &db->db_mtx);
3101 
3102 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
3103 	    db->db_state == DB_NOFILL);
3104 
3105 	if (db->db_state == DB_CACHED &&
3106 	    zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
3107 		/*
3108 		 * In practice, we will never have a case where we have an
3109 		 * encrypted arc buffer while additional holds exist on the
3110 		 * dbuf. We don't handle this here so we simply assert that
3111 		 * fact instead.
3112 		 */
3113 		ASSERT(!arc_is_encrypted(buf));
3114 		mutex_exit(&db->db_mtx);
3115 		(void) dbuf_dirty(db, tx);
3116 		memcpy(db->db.db_data, buf->b_data, db->db.db_size);
3117 		arc_buf_destroy(buf, db);
3118 		return;
3119 	}
3120 
3121 	if (db->db_state == DB_CACHED) {
3122 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
3123 
3124 		ASSERT(db->db_buf != NULL);
3125 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
3126 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
3127 
3128 			if (!arc_released(db->db_buf)) {
3129 				ASSERT(dr->dt.dl.dr_override_state ==
3130 				    DR_OVERRIDDEN);
3131 				arc_release(db->db_buf, db);
3132 			}
3133 			dr->dt.dl.dr_data = buf;
3134 			arc_buf_destroy(db->db_buf, db);
3135 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
3136 			arc_release(db->db_buf, db);
3137 			arc_buf_destroy(db->db_buf, db);
3138 		}
3139 		db->db_buf = NULL;
3140 	} else if (db->db_state == DB_NOFILL) {
3141 		/*
3142 		 * We will be completely replacing the cloned block.  In case
3143 		 * it was cloned in this transaction group, let's undirty the
3144 		 * pending clone and mark the block as uncached. This will be
3145 		 * as if the clone was never done.
3146 		 */
3147 		VERIFY(!dbuf_undirty(db, tx));
3148 		db->db_state = DB_UNCACHED;
3149 	}
3150 	ASSERT(db->db_buf == NULL);
3151 	dbuf_set_data(db, buf);
3152 	db->db_state = DB_FILL;
3153 	DTRACE_SET_STATE(db, "filling assigned arcbuf");
3154 	mutex_exit(&db->db_mtx);
3155 	(void) dbuf_dirty(db, tx);
3156 	dmu_buf_fill_done(&db->db, tx, B_FALSE);
3157 }
3158 
3159 void
3160 dbuf_destroy(dmu_buf_impl_t *db)
3161 {
3162 	dnode_t *dn;
3163 	dmu_buf_impl_t *parent = db->db_parent;
3164 	dmu_buf_impl_t *dndb;
3165 
3166 	ASSERT(MUTEX_HELD(&db->db_mtx));
3167 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3168 
3169 	if (db->db_buf != NULL) {
3170 		arc_buf_destroy(db->db_buf, db);
3171 		db->db_buf = NULL;
3172 	}
3173 
3174 	if (db->db_blkid == DMU_BONUS_BLKID) {
3175 		int slots = DB_DNODE(db)->dn_num_slots;
3176 		int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
3177 		if (db->db.db_data != NULL) {
3178 			kmem_free(db->db.db_data, bonuslen);
3179 			arc_space_return(bonuslen, ARC_SPACE_BONUS);
3180 			db->db_state = DB_UNCACHED;
3181 			DTRACE_SET_STATE(db, "buffer cleared");
3182 		}
3183 	}
3184 
3185 	dbuf_clear_data(db);
3186 
3187 	if (multilist_link_active(&db->db_cache_link)) {
3188 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3189 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3190 
3191 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3192 
3193 		ASSERT0(dmu_buf_user_size(&db->db));
3194 		(void) zfs_refcount_remove_many(
3195 		    &dbuf_caches[db->db_caching_status].size,
3196 		    db->db.db_size, db);
3197 
3198 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3199 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3200 		} else {
3201 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3202 			DBUF_STAT_BUMPDOWN(cache_count);
3203 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3204 			    db->db.db_size);
3205 		}
3206 		db->db_caching_status = DB_NO_CACHE;
3207 	}
3208 
3209 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
3210 	ASSERT(db->db_data_pending == NULL);
3211 	ASSERT(list_is_empty(&db->db_dirty_records));
3212 
3213 	db->db_state = DB_EVICTING;
3214 	DTRACE_SET_STATE(db, "buffer eviction started");
3215 	db->db_blkptr = NULL;
3216 
3217 	/*
3218 	 * Now that db_state is DB_EVICTING, nobody else can find this via
3219 	 * the hash table.  We can now drop db_mtx, which allows us to
3220 	 * acquire the dn_dbufs_mtx.
3221 	 */
3222 	mutex_exit(&db->db_mtx);
3223 
3224 	DB_DNODE_ENTER(db);
3225 	dn = DB_DNODE(db);
3226 	dndb = dn->dn_dbuf;
3227 	if (db->db_blkid != DMU_BONUS_BLKID) {
3228 		boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
3229 		if (needlock)
3230 			mutex_enter_nested(&dn->dn_dbufs_mtx,
3231 			    NESTED_SINGLE);
3232 		avl_remove(&dn->dn_dbufs, db);
3233 		membar_producer();
3234 		DB_DNODE_EXIT(db);
3235 		if (needlock)
3236 			mutex_exit(&dn->dn_dbufs_mtx);
3237 		/*
3238 		 * Decrementing the dbuf count means that the hold corresponding
3239 		 * to the removed dbuf is no longer discounted in dnode_move(),
3240 		 * so the dnode cannot be moved until after we release the hold.
3241 		 * The membar_producer() ensures visibility of the decremented
3242 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
3243 		 * release any lock.
3244 		 */
3245 		mutex_enter(&dn->dn_mtx);
3246 		dnode_rele_and_unlock(dn, db, B_TRUE);
3247 #ifdef USE_DNODE_HANDLE
3248 		db->db_dnode_handle = NULL;
3249 #else
3250 		db->db_dnode = NULL;
3251 #endif
3252 
3253 		dbuf_hash_remove(db);
3254 	} else {
3255 		DB_DNODE_EXIT(db);
3256 	}
3257 
3258 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
3259 
3260 	db->db_parent = NULL;
3261 
3262 	ASSERT(db->db_buf == NULL);
3263 	ASSERT(db->db.db_data == NULL);
3264 	ASSERT(db->db_hash_next == NULL);
3265 	ASSERT(db->db_blkptr == NULL);
3266 	ASSERT(db->db_data_pending == NULL);
3267 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3268 	ASSERT(!multilist_link_active(&db->db_cache_link));
3269 
3270 	/*
3271 	 * If this dbuf is referenced from an indirect dbuf,
3272 	 * decrement the ref count on the indirect dbuf.
3273 	 */
3274 	if (parent && parent != dndb) {
3275 		mutex_enter(&parent->db_mtx);
3276 		dbuf_rele_and_unlock(parent, db, B_TRUE);
3277 	}
3278 
3279 	kmem_cache_free(dbuf_kmem_cache, db);
3280 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3281 }
3282 
3283 /*
3284  * Note: While bpp will always be updated if the function returns success,
3285  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3286  * this happens when the dnode is the meta-dnode, or {user|group|project}used
3287  * object.
3288  */
3289 __attribute__((always_inline))
3290 static inline int
3291 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3292     dmu_buf_impl_t **parentp, blkptr_t **bpp)
3293 {
3294 	*parentp = NULL;
3295 	*bpp = NULL;
3296 
3297 	ASSERT(blkid != DMU_BONUS_BLKID);
3298 
3299 	if (blkid == DMU_SPILL_BLKID) {
3300 		mutex_enter(&dn->dn_mtx);
3301 		if (dn->dn_have_spill &&
3302 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3303 			*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3304 		else
3305 			*bpp = NULL;
3306 		dbuf_add_ref(dn->dn_dbuf, NULL);
3307 		*parentp = dn->dn_dbuf;
3308 		mutex_exit(&dn->dn_mtx);
3309 		return (0);
3310 	}
3311 
3312 	int nlevels =
3313 	    (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3314 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3315 
3316 	ASSERT3U(level * epbs, <, 64);
3317 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3318 	/*
3319 	 * This assertion shouldn't trip as long as the max indirect block size
3320 	 * is less than 1M.  The reason for this is that up to that point,
3321 	 * the number of levels required to address an entire object with blocks
3322 	 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.	 In
3323 	 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3324 	 * (i.e. we can address the entire object), objects will all use at most
3325 	 * N-1 levels and the assertion won't overflow.	 However, once epbs is
3326 	 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
3327 	 * enough to address an entire object, so objects will have 5 levels,
3328 	 * but then this assertion will overflow.
3329 	 *
3330 	 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3331 	 * need to redo this logic to handle overflows.
3332 	 */
3333 	ASSERT(level >= nlevels ||
3334 	    ((nlevels - level - 1) * epbs) +
3335 	    highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3336 	if (level >= nlevels ||
3337 	    blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3338 	    ((nlevels - level - 1) * epbs)) ||
3339 	    (fail_sparse &&
3340 	    blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3341 		/* the buffer has no parent yet */
3342 		return (SET_ERROR(ENOENT));
3343 	} else if (level < nlevels-1) {
3344 		/* this block is referenced from an indirect block */
3345 		int err;
3346 
3347 		err = dbuf_hold_impl(dn, level + 1,
3348 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3349 
3350 		if (err)
3351 			return (err);
3352 		err = dbuf_read(*parentp, NULL,
3353 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3354 		if (err) {
3355 			dbuf_rele(*parentp, NULL);
3356 			*parentp = NULL;
3357 			return (err);
3358 		}
3359 		rw_enter(&(*parentp)->db_rwlock, RW_READER);
3360 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3361 		    (blkid & ((1ULL << epbs) - 1));
3362 		if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3363 			ASSERT(BP_IS_HOLE(*bpp));
3364 		rw_exit(&(*parentp)->db_rwlock);
3365 		return (0);
3366 	} else {
3367 		/* the block is referenced from the dnode */
3368 		ASSERT3U(level, ==, nlevels-1);
3369 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3370 		    blkid < dn->dn_phys->dn_nblkptr);
3371 		if (dn->dn_dbuf) {
3372 			dbuf_add_ref(dn->dn_dbuf, NULL);
3373 			*parentp = dn->dn_dbuf;
3374 		}
3375 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
3376 		return (0);
3377 	}
3378 }
3379 
3380 static dmu_buf_impl_t *
3381 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3382     dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3383 {
3384 	objset_t *os = dn->dn_objset;
3385 	dmu_buf_impl_t *db, *odb;
3386 
3387 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3388 	ASSERT(dn->dn_type != DMU_OT_NONE);
3389 
3390 	db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3391 
3392 	list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3393 	    offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3394 
3395 	db->db_objset = os;
3396 	db->db.db_object = dn->dn_object;
3397 	db->db_level = level;
3398 	db->db_blkid = blkid;
3399 	db->db_dirtycnt = 0;
3400 #ifdef USE_DNODE_HANDLE
3401 	db->db_dnode_handle = dn->dn_handle;
3402 #else
3403 	db->db_dnode = dn;
3404 #endif
3405 	db->db_parent = parent;
3406 	db->db_blkptr = blkptr;
3407 	db->db_hash = hash;
3408 
3409 	db->db_user = NULL;
3410 	db->db_user_immediate_evict = FALSE;
3411 	db->db_freed_in_flight = FALSE;
3412 	db->db_pending_evict = FALSE;
3413 
3414 	if (blkid == DMU_BONUS_BLKID) {
3415 		ASSERT3P(parent, ==, dn->dn_dbuf);
3416 		db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3417 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3418 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3419 		db->db.db_offset = DMU_BONUS_BLKID;
3420 		db->db_state = DB_UNCACHED;
3421 		DTRACE_SET_STATE(db, "bonus buffer created");
3422 		db->db_caching_status = DB_NO_CACHE;
3423 		/* the bonus dbuf is not placed in the hash table */
3424 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3425 		return (db);
3426 	} else if (blkid == DMU_SPILL_BLKID) {
3427 		db->db.db_size = (blkptr != NULL) ?
3428 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3429 		db->db.db_offset = 0;
3430 	} else {
3431 		int blocksize =
3432 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3433 		db->db.db_size = blocksize;
3434 		db->db.db_offset = db->db_blkid * blocksize;
3435 	}
3436 
3437 	/*
3438 	 * Hold the dn_dbufs_mtx while we get the new dbuf
3439 	 * in the hash table *and* added to the dbufs list.
3440 	 * This prevents a possible deadlock with someone
3441 	 * trying to look up this dbuf before it's added to the
3442 	 * dn_dbufs list.
3443 	 */
3444 	mutex_enter(&dn->dn_dbufs_mtx);
3445 	db->db_state = DB_EVICTING; /* not worth logging this state change */
3446 	if ((odb = dbuf_hash_insert(db)) != NULL) {
3447 		/* someone else inserted it first */
3448 		mutex_exit(&dn->dn_dbufs_mtx);
3449 		kmem_cache_free(dbuf_kmem_cache, db);
3450 		DBUF_STAT_BUMP(hash_insert_race);
3451 		return (odb);
3452 	}
3453 	avl_add(&dn->dn_dbufs, db);
3454 
3455 	db->db_state = DB_UNCACHED;
3456 	DTRACE_SET_STATE(db, "regular buffer created");
3457 	db->db_caching_status = DB_NO_CACHE;
3458 	mutex_exit(&dn->dn_dbufs_mtx);
3459 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3460 
3461 	if (parent && parent != dn->dn_dbuf)
3462 		dbuf_add_ref(parent, db);
3463 
3464 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3465 	    zfs_refcount_count(&dn->dn_holds) > 0);
3466 	(void) zfs_refcount_add(&dn->dn_holds, db);
3467 
3468 	dprintf_dbuf(db, "db=%p\n", db);
3469 
3470 	return (db);
3471 }
3472 
3473 /*
3474  * This function returns a block pointer and information about the object,
3475  * given a dnode and a block.  This is a publicly accessible version of
3476  * dbuf_findbp that only returns some information, rather than the
3477  * dbuf.  Note that the dnode passed in must be held, and the dn_struct_rwlock
3478  * should be locked as (at least) a reader.
3479  */
3480 int
3481 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3482     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3483 {
3484 	dmu_buf_impl_t *dbp = NULL;
3485 	blkptr_t *bp2;
3486 	int err = 0;
3487 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3488 
3489 	err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3490 	if (err == 0) {
3491 		ASSERT3P(bp2, !=, NULL);
3492 		*bp = *bp2;
3493 		if (dbp != NULL)
3494 			dbuf_rele(dbp, NULL);
3495 		if (datablkszsec != NULL)
3496 			*datablkszsec = dn->dn_phys->dn_datablkszsec;
3497 		if (indblkshift != NULL)
3498 			*indblkshift = dn->dn_phys->dn_indblkshift;
3499 	}
3500 
3501 	return (err);
3502 }
3503 
3504 typedef struct dbuf_prefetch_arg {
3505 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
3506 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3507 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3508 	int dpa_curlevel; /* The current level that we're reading */
3509 	dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3510 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3511 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3512 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3513 	dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3514 	void *dpa_arg; /* prefetch completion arg */
3515 } dbuf_prefetch_arg_t;
3516 
3517 static void
3518 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3519 {
3520 	if (dpa->dpa_cb != NULL) {
3521 		dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3522 		    dpa->dpa_zb.zb_blkid, io_done);
3523 	}
3524 	kmem_free(dpa, sizeof (*dpa));
3525 }
3526 
3527 static void
3528 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3529     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3530 {
3531 	(void) zio, (void) zb, (void) iobp;
3532 	dbuf_prefetch_arg_t *dpa = private;
3533 
3534 	if (abuf != NULL)
3535 		arc_buf_destroy(abuf, private);
3536 
3537 	dbuf_prefetch_fini(dpa, B_TRUE);
3538 }
3539 
3540 /*
3541  * Actually issue the prefetch read for the block given.
3542  */
3543 static void
3544 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3545 {
3546 	ASSERT(!BP_IS_REDACTED(bp) ||
3547 	    dsl_dataset_feature_is_active(
3548 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3549 	    SPA_FEATURE_REDACTED_DATASETS));
3550 
3551 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3552 		return (dbuf_prefetch_fini(dpa, B_FALSE));
3553 
3554 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3555 	arc_flags_t aflags =
3556 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3557 	    ARC_FLAG_NO_BUF;
3558 
3559 	/* dnodes are always read as raw and then converted later */
3560 	if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3561 	    dpa->dpa_curlevel == 0)
3562 		zio_flags |= ZIO_FLAG_RAW;
3563 
3564 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3565 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3566 	ASSERT(dpa->dpa_zio != NULL);
3567 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3568 	    dbuf_issue_final_prefetch_done, dpa,
3569 	    dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3570 }
3571 
3572 /*
3573  * Called when an indirect block above our prefetch target is read in.  This
3574  * will either read in the next indirect block down the tree or issue the actual
3575  * prefetch if the next block down is our target.
3576  */
3577 static void
3578 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3579     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3580 {
3581 	(void) zb, (void) iobp;
3582 	dbuf_prefetch_arg_t *dpa = private;
3583 
3584 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3585 	ASSERT3S(dpa->dpa_curlevel, >, 0);
3586 
3587 	if (abuf == NULL) {
3588 		ASSERT(zio == NULL || zio->io_error != 0);
3589 		dbuf_prefetch_fini(dpa, B_TRUE);
3590 		return;
3591 	}
3592 	ASSERT(zio == NULL || zio->io_error == 0);
3593 
3594 	/*
3595 	 * The dpa_dnode is only valid if we are called with a NULL
3596 	 * zio. This indicates that the arc_read() returned without
3597 	 * first calling zio_read() to issue a physical read. Once
3598 	 * a physical read is made the dpa_dnode must be invalidated
3599 	 * as the locks guarding it may have been dropped. If the
3600 	 * dpa_dnode is still valid, then we want to add it to the dbuf
3601 	 * cache. To do so, we must hold the dbuf associated with the block
3602 	 * we just prefetched, read its contents so that we associate it
3603 	 * with an arc_buf_t, and then release it.
3604 	 */
3605 	if (zio != NULL) {
3606 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3607 		if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3608 			ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3609 		} else {
3610 			ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3611 		}
3612 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3613 
3614 		dpa->dpa_dnode = NULL;
3615 	} else if (dpa->dpa_dnode != NULL) {
3616 		uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3617 		    (dpa->dpa_epbs * (dpa->dpa_curlevel -
3618 		    dpa->dpa_zb.zb_level));
3619 		dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3620 		    dpa->dpa_curlevel, curblkid, FTAG);
3621 		if (db == NULL) {
3622 			arc_buf_destroy(abuf, private);
3623 			dbuf_prefetch_fini(dpa, B_TRUE);
3624 			return;
3625 		}
3626 		(void) dbuf_read(db, NULL,
3627 		    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3628 		dbuf_rele(db, FTAG);
3629 	}
3630 
3631 	dpa->dpa_curlevel--;
3632 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3633 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3634 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3635 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3636 
3637 	ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3638 	    dsl_dataset_feature_is_active(
3639 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3640 	    SPA_FEATURE_REDACTED_DATASETS)));
3641 	if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3642 		arc_buf_destroy(abuf, private);
3643 		dbuf_prefetch_fini(dpa, B_TRUE);
3644 		return;
3645 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3646 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3647 		dbuf_issue_final_prefetch(dpa, bp);
3648 	} else {
3649 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3650 		zbookmark_phys_t zb;
3651 
3652 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3653 		if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3654 			iter_aflags |= ARC_FLAG_L2CACHE;
3655 
3656 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3657 
3658 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3659 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3660 
3661 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3662 		    bp, dbuf_prefetch_indirect_done, dpa,
3663 		    ZIO_PRIORITY_SYNC_READ,
3664 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3665 		    &iter_aflags, &zb);
3666 	}
3667 
3668 	arc_buf_destroy(abuf, private);
3669 }
3670 
3671 /*
3672  * Issue prefetch reads for the given block on the given level.  If the indirect
3673  * blocks above that block are not in memory, we will read them in
3674  * asynchronously.  As a result, this call never blocks waiting for a read to
3675  * complete. Note that the prefetch might fail if the dataset is encrypted and
3676  * the encryption key is unmapped before the IO completes.
3677  */
3678 int
3679 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3680     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3681     void *arg)
3682 {
3683 	blkptr_t bp;
3684 	int epbs, nlevels, curlevel;
3685 	uint64_t curblkid;
3686 
3687 	ASSERT(blkid != DMU_BONUS_BLKID);
3688 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3689 
3690 	if (blkid > dn->dn_maxblkid)
3691 		goto no_issue;
3692 
3693 	if (level == 0 && dnode_block_freed(dn, blkid))
3694 		goto no_issue;
3695 
3696 	/*
3697 	 * This dnode hasn't been written to disk yet, so there's nothing to
3698 	 * prefetch.
3699 	 */
3700 	nlevels = dn->dn_phys->dn_nlevels;
3701 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3702 		goto no_issue;
3703 
3704 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3705 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3706 		goto no_issue;
3707 
3708 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3709 	    level, blkid, NULL);
3710 	if (db != NULL) {
3711 		mutex_exit(&db->db_mtx);
3712 		/*
3713 		 * This dbuf already exists.  It is either CACHED, or
3714 		 * (we assume) about to be read or filled.
3715 		 */
3716 		goto no_issue;
3717 	}
3718 
3719 	/*
3720 	 * Find the closest ancestor (indirect block) of the target block
3721 	 * that is present in the cache.  In this indirect block, we will
3722 	 * find the bp that is at curlevel, curblkid.
3723 	 */
3724 	curlevel = level;
3725 	curblkid = blkid;
3726 	while (curlevel < nlevels - 1) {
3727 		int parent_level = curlevel + 1;
3728 		uint64_t parent_blkid = curblkid >> epbs;
3729 		dmu_buf_impl_t *db;
3730 
3731 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3732 		    FALSE, TRUE, FTAG, &db) == 0) {
3733 			blkptr_t *bpp = db->db_buf->b_data;
3734 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3735 			dbuf_rele(db, FTAG);
3736 			break;
3737 		}
3738 
3739 		curlevel = parent_level;
3740 		curblkid = parent_blkid;
3741 	}
3742 
3743 	if (curlevel == nlevels - 1) {
3744 		/* No cached indirect blocks found. */
3745 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3746 		bp = dn->dn_phys->dn_blkptr[curblkid];
3747 	}
3748 	ASSERT(!BP_IS_REDACTED(&bp) ||
3749 	    dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3750 	    SPA_FEATURE_REDACTED_DATASETS));
3751 	if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3752 		goto no_issue;
3753 
3754 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3755 
3756 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3757 	    ZIO_FLAG_CANFAIL);
3758 
3759 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3760 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3761 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3762 	    dn->dn_object, level, blkid);
3763 	dpa->dpa_curlevel = curlevel;
3764 	dpa->dpa_prio = prio;
3765 	dpa->dpa_aflags = aflags;
3766 	dpa->dpa_spa = dn->dn_objset->os_spa;
3767 	dpa->dpa_dnode = dn;
3768 	dpa->dpa_epbs = epbs;
3769 	dpa->dpa_zio = pio;
3770 	dpa->dpa_cb = cb;
3771 	dpa->dpa_arg = arg;
3772 
3773 	if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3774 		dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3775 	else if (dnode_level_is_l2cacheable(&bp, dn, level))
3776 		dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3777 
3778 	/*
3779 	 * If we have the indirect just above us, no need to do the asynchronous
3780 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
3781 	 * a higher level, though, we want to issue the prefetches for all the
3782 	 * indirect blocks asynchronously, so we can go on with whatever we were
3783 	 * doing.
3784 	 */
3785 	if (curlevel == level) {
3786 		ASSERT3U(curblkid, ==, blkid);
3787 		dbuf_issue_final_prefetch(dpa, &bp);
3788 	} else {
3789 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3790 		zbookmark_phys_t zb;
3791 
3792 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3793 		if (dnode_level_is_l2cacheable(&bp, dn, level))
3794 			iter_aflags |= ARC_FLAG_L2CACHE;
3795 
3796 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3797 		    dn->dn_object, curlevel, curblkid);
3798 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3799 		    &bp, dbuf_prefetch_indirect_done, dpa,
3800 		    ZIO_PRIORITY_SYNC_READ,
3801 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3802 		    &iter_aflags, &zb);
3803 	}
3804 	/*
3805 	 * We use pio here instead of dpa_zio since it's possible that
3806 	 * dpa may have already been freed.
3807 	 */
3808 	zio_nowait(pio);
3809 	return (1);
3810 no_issue:
3811 	if (cb != NULL)
3812 		cb(arg, level, blkid, B_FALSE);
3813 	return (0);
3814 }
3815 
3816 int
3817 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3818     arc_flags_t aflags)
3819 {
3820 
3821 	return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3822 }
3823 
3824 /*
3825  * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3826  * the case of encrypted, compressed and uncompressed buffers by
3827  * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3828  * arc_alloc_compressed_buf() or arc_alloc_buf().*
3829  *
3830  * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3831  */
3832 noinline static void
3833 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3834 {
3835 	dbuf_dirty_record_t *dr = db->db_data_pending;
3836 	arc_buf_t *data = dr->dt.dl.dr_data;
3837 	enum zio_compress compress_type = arc_get_compression(data);
3838 	uint8_t complevel = arc_get_complevel(data);
3839 
3840 	if (arc_is_encrypted(data)) {
3841 		boolean_t byteorder;
3842 		uint8_t salt[ZIO_DATA_SALT_LEN];
3843 		uint8_t iv[ZIO_DATA_IV_LEN];
3844 		uint8_t mac[ZIO_DATA_MAC_LEN];
3845 
3846 		arc_get_raw_params(data, &byteorder, salt, iv, mac);
3847 		dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3848 		    dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3849 		    dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3850 		    compress_type, complevel));
3851 	} else if (compress_type != ZIO_COMPRESS_OFF) {
3852 		dbuf_set_data(db, arc_alloc_compressed_buf(
3853 		    dn->dn_objset->os_spa, db, arc_buf_size(data),
3854 		    arc_buf_lsize(data), compress_type, complevel));
3855 	} else {
3856 		dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3857 		    DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3858 	}
3859 
3860 	rw_enter(&db->db_rwlock, RW_WRITER);
3861 	memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3862 	rw_exit(&db->db_rwlock);
3863 }
3864 
3865 /*
3866  * Returns with db_holds incremented, and db_mtx not held.
3867  * Note: dn_struct_rwlock must be held.
3868  */
3869 int
3870 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3871     boolean_t fail_sparse, boolean_t fail_uncached,
3872     const void *tag, dmu_buf_impl_t **dbp)
3873 {
3874 	dmu_buf_impl_t *db, *parent = NULL;
3875 	uint64_t hv;
3876 
3877 	/* If the pool has been created, verify the tx_sync_lock is not held */
3878 	spa_t *spa = dn->dn_objset->os_spa;
3879 	dsl_pool_t *dp = spa->spa_dsl_pool;
3880 	if (dp != NULL) {
3881 		ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3882 	}
3883 
3884 	ASSERT(blkid != DMU_BONUS_BLKID);
3885 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3886 	ASSERT3U(dn->dn_nlevels, >, level);
3887 
3888 	*dbp = NULL;
3889 
3890 	/* dbuf_find() returns with db_mtx held */
3891 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3892 
3893 	if (db == NULL) {
3894 		blkptr_t *bp = NULL;
3895 		int err;
3896 
3897 		if (fail_uncached)
3898 			return (SET_ERROR(ENOENT));
3899 
3900 		ASSERT3P(parent, ==, NULL);
3901 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3902 		if (fail_sparse) {
3903 			if (err == 0 && bp && BP_IS_HOLE(bp))
3904 				err = SET_ERROR(ENOENT);
3905 			if (err) {
3906 				if (parent)
3907 					dbuf_rele(parent, NULL);
3908 				return (err);
3909 			}
3910 		}
3911 		if (err && err != ENOENT)
3912 			return (err);
3913 		db = dbuf_create(dn, level, blkid, parent, bp, hv);
3914 	}
3915 
3916 	if (fail_uncached && db->db_state != DB_CACHED) {
3917 		mutex_exit(&db->db_mtx);
3918 		return (SET_ERROR(ENOENT));
3919 	}
3920 
3921 	if (db->db_buf != NULL) {
3922 		arc_buf_access(db->db_buf);
3923 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3924 	}
3925 
3926 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3927 
3928 	/*
3929 	 * If this buffer is currently syncing out, and we are
3930 	 * still referencing it from db_data, we need to make a copy
3931 	 * of it in case we decide we want to dirty it again in this txg.
3932 	 */
3933 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3934 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
3935 	    db->db_state == DB_CACHED && db->db_data_pending) {
3936 		dbuf_dirty_record_t *dr = db->db_data_pending;
3937 		if (dr->dt.dl.dr_data == db->db_buf) {
3938 			ASSERT3P(db->db_buf, !=, NULL);
3939 			dbuf_hold_copy(dn, db);
3940 		}
3941 	}
3942 
3943 	if (multilist_link_active(&db->db_cache_link)) {
3944 		ASSERT(zfs_refcount_is_zero(&db->db_holds));
3945 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3946 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3947 
3948 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3949 
3950 		uint64_t size = db->db.db_size;
3951 		uint64_t usize = dmu_buf_user_size(&db->db);
3952 		(void) zfs_refcount_remove_many(
3953 		    &dbuf_caches[db->db_caching_status].size, size, db);
3954 		(void) zfs_refcount_remove_many(
3955 		    &dbuf_caches[db->db_caching_status].size, usize,
3956 		    db->db_user);
3957 
3958 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3959 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3960 		} else {
3961 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3962 			DBUF_STAT_BUMPDOWN(cache_count);
3963 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3964 			    size + usize);
3965 		}
3966 		db->db_caching_status = DB_NO_CACHE;
3967 	}
3968 	(void) zfs_refcount_add(&db->db_holds, tag);
3969 	DBUF_VERIFY(db);
3970 	mutex_exit(&db->db_mtx);
3971 
3972 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
3973 	if (parent)
3974 		dbuf_rele(parent, NULL);
3975 
3976 	ASSERT3P(DB_DNODE(db), ==, dn);
3977 	ASSERT3U(db->db_blkid, ==, blkid);
3978 	ASSERT3U(db->db_level, ==, level);
3979 	*dbp = db;
3980 
3981 	return (0);
3982 }
3983 
3984 dmu_buf_impl_t *
3985 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3986 {
3987 	return (dbuf_hold_level(dn, 0, blkid, tag));
3988 }
3989 
3990 dmu_buf_impl_t *
3991 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3992 {
3993 	dmu_buf_impl_t *db;
3994 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3995 	return (err ? NULL : db);
3996 }
3997 
3998 void
3999 dbuf_create_bonus(dnode_t *dn)
4000 {
4001 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
4002 
4003 	ASSERT(dn->dn_bonus == NULL);
4004 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
4005 	    dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
4006 }
4007 
4008 int
4009 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
4010 {
4011 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4012 
4013 	if (db->db_blkid != DMU_SPILL_BLKID)
4014 		return (SET_ERROR(ENOTSUP));
4015 	if (blksz == 0)
4016 		blksz = SPA_MINBLOCKSIZE;
4017 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
4018 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
4019 
4020 	dbuf_new_size(db, blksz, tx);
4021 
4022 	return (0);
4023 }
4024 
4025 void
4026 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
4027 {
4028 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
4029 }
4030 
4031 #pragma weak dmu_buf_add_ref = dbuf_add_ref
4032 void
4033 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
4034 {
4035 	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
4036 	VERIFY3S(holds, >, 1);
4037 }
4038 
4039 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
4040 boolean_t
4041 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
4042     const void *tag)
4043 {
4044 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4045 	dmu_buf_impl_t *found_db;
4046 	boolean_t result = B_FALSE;
4047 
4048 	if (blkid == DMU_BONUS_BLKID)
4049 		found_db = dbuf_find_bonus(os, obj);
4050 	else
4051 		found_db = dbuf_find(os, obj, 0, blkid, NULL);
4052 
4053 	if (found_db != NULL) {
4054 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
4055 			(void) zfs_refcount_add(&db->db_holds, tag);
4056 			result = B_TRUE;
4057 		}
4058 		mutex_exit(&found_db->db_mtx);
4059 	}
4060 	return (result);
4061 }
4062 
4063 /*
4064  * If you call dbuf_rele() you had better not be referencing the dnode handle
4065  * unless you have some other direct or indirect hold on the dnode. (An indirect
4066  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
4067  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
4068  * dnode's parent dbuf evicting its dnode handles.
4069  */
4070 void
4071 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
4072 {
4073 	mutex_enter(&db->db_mtx);
4074 	dbuf_rele_and_unlock(db, tag, B_FALSE);
4075 }
4076 
4077 void
4078 dmu_buf_rele(dmu_buf_t *db, const void *tag)
4079 {
4080 	dbuf_rele((dmu_buf_impl_t *)db, tag);
4081 }
4082 
4083 /*
4084  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
4085  * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
4086  * argument should be set if we are already in the dbuf-evicting code
4087  * path, in which case we don't want to recursively evict.  This allows us to
4088  * avoid deeply nested stacks that would have a call flow similar to this:
4089  *
4090  * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
4091  *	^						|
4092  *	|						|
4093  *	+-----dbuf_destroy()<--dbuf_evict_one()<--------+
4094  *
4095  */
4096 void
4097 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
4098 {
4099 	int64_t holds;
4100 	uint64_t size;
4101 
4102 	ASSERT(MUTEX_HELD(&db->db_mtx));
4103 	DBUF_VERIFY(db);
4104 
4105 	/*
4106 	 * Remove the reference to the dbuf before removing its hold on the
4107 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
4108 	 * buffer has a corresponding dnode hold.
4109 	 */
4110 	holds = zfs_refcount_remove(&db->db_holds, tag);
4111 	ASSERT(holds >= 0);
4112 
4113 	/*
4114 	 * We can't freeze indirects if there is a possibility that they
4115 	 * may be modified in the current syncing context.
4116 	 */
4117 	if (db->db_buf != NULL &&
4118 	    holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
4119 		arc_buf_freeze(db->db_buf);
4120 	}
4121 
4122 	if (holds == db->db_dirtycnt &&
4123 	    db->db_level == 0 && db->db_user_immediate_evict)
4124 		dbuf_evict_user(db);
4125 
4126 	if (holds == 0) {
4127 		if (db->db_blkid == DMU_BONUS_BLKID) {
4128 			dnode_t *dn;
4129 			boolean_t evict_dbuf = db->db_pending_evict;
4130 
4131 			/*
4132 			 * If the dnode moves here, we cannot cross this
4133 			 * barrier until the move completes.
4134 			 */
4135 			DB_DNODE_ENTER(db);
4136 
4137 			dn = DB_DNODE(db);
4138 			atomic_dec_32(&dn->dn_dbufs_count);
4139 
4140 			/*
4141 			 * Decrementing the dbuf count means that the bonus
4142 			 * buffer's dnode hold is no longer discounted in
4143 			 * dnode_move(). The dnode cannot move until after
4144 			 * the dnode_rele() below.
4145 			 */
4146 			DB_DNODE_EXIT(db);
4147 
4148 			/*
4149 			 * Do not reference db after its lock is dropped.
4150 			 * Another thread may evict it.
4151 			 */
4152 			mutex_exit(&db->db_mtx);
4153 
4154 			if (evict_dbuf)
4155 				dnode_evict_bonus(dn);
4156 
4157 			dnode_rele(dn, db);
4158 		} else if (db->db_buf == NULL) {
4159 			/*
4160 			 * This is a special case: we never associated this
4161 			 * dbuf with any data allocated from the ARC.
4162 			 */
4163 			ASSERT(db->db_state == DB_UNCACHED ||
4164 			    db->db_state == DB_NOFILL);
4165 			dbuf_destroy(db);
4166 		} else if (arc_released(db->db_buf)) {
4167 			/*
4168 			 * This dbuf has anonymous data associated with it.
4169 			 */
4170 			dbuf_destroy(db);
4171 		} else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
4172 		    db->db_pending_evict) {
4173 			dbuf_destroy(db);
4174 		} else if (!multilist_link_active(&db->db_cache_link)) {
4175 			ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4176 
4177 			dbuf_cached_state_t dcs =
4178 			    dbuf_include_in_metadata_cache(db) ?
4179 			    DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
4180 			db->db_caching_status = dcs;
4181 
4182 			multilist_insert(&dbuf_caches[dcs].cache, db);
4183 			uint64_t db_size = db->db.db_size;
4184 			uint64_t dbu_size = dmu_buf_user_size(&db->db);
4185 			(void) zfs_refcount_add_many(
4186 			    &dbuf_caches[dcs].size, db_size, db);
4187 			size = zfs_refcount_add_many(
4188 			    &dbuf_caches[dcs].size, dbu_size, db->db_user);
4189 			uint8_t db_level = db->db_level;
4190 			mutex_exit(&db->db_mtx);
4191 
4192 			if (dcs == DB_DBUF_METADATA_CACHE) {
4193 				DBUF_STAT_BUMP(metadata_cache_count);
4194 				DBUF_STAT_MAX(metadata_cache_size_bytes_max,
4195 				    size);
4196 			} else {
4197 				DBUF_STAT_BUMP(cache_count);
4198 				DBUF_STAT_MAX(cache_size_bytes_max, size);
4199 				DBUF_STAT_BUMP(cache_levels[db_level]);
4200 				DBUF_STAT_INCR(cache_levels_bytes[db_level],
4201 				    db_size + dbu_size);
4202 			}
4203 
4204 			if (dcs == DB_DBUF_CACHE && !evicting)
4205 				dbuf_evict_notify(size);
4206 		}
4207 	} else {
4208 		mutex_exit(&db->db_mtx);
4209 	}
4210 }
4211 
4212 #pragma weak dmu_buf_refcount = dbuf_refcount
4213 uint64_t
4214 dbuf_refcount(dmu_buf_impl_t *db)
4215 {
4216 	return (zfs_refcount_count(&db->db_holds));
4217 }
4218 
4219 uint64_t
4220 dmu_buf_user_refcount(dmu_buf_t *db_fake)
4221 {
4222 	uint64_t holds;
4223 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4224 
4225 	mutex_enter(&db->db_mtx);
4226 	ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
4227 	holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
4228 	mutex_exit(&db->db_mtx);
4229 
4230 	return (holds);
4231 }
4232 
4233 void *
4234 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
4235     dmu_buf_user_t *new_user)
4236 {
4237 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4238 
4239 	mutex_enter(&db->db_mtx);
4240 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4241 	if (db->db_user == old_user)
4242 		db->db_user = new_user;
4243 	else
4244 		old_user = db->db_user;
4245 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4246 	mutex_exit(&db->db_mtx);
4247 
4248 	return (old_user);
4249 }
4250 
4251 void *
4252 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4253 {
4254 	return (dmu_buf_replace_user(db_fake, NULL, user));
4255 }
4256 
4257 void *
4258 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4259 {
4260 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4261 
4262 	db->db_user_immediate_evict = TRUE;
4263 	return (dmu_buf_set_user(db_fake, user));
4264 }
4265 
4266 void *
4267 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4268 {
4269 	return (dmu_buf_replace_user(db_fake, user, NULL));
4270 }
4271 
4272 void *
4273 dmu_buf_get_user(dmu_buf_t *db_fake)
4274 {
4275 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4276 
4277 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
4278 	return (db->db_user);
4279 }
4280 
4281 uint64_t
4282 dmu_buf_user_size(dmu_buf_t *db_fake)
4283 {
4284 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4285 	if (db->db_user == NULL)
4286 		return (0);
4287 	return (atomic_load_64(&db->db_user->dbu_size));
4288 }
4289 
4290 void
4291 dmu_buf_add_user_size(dmu_buf_t *db_fake, uint64_t nadd)
4292 {
4293 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4294 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4295 	ASSERT3P(db->db_user, !=, NULL);
4296 	ASSERT3U(atomic_load_64(&db->db_user->dbu_size), <, UINT64_MAX - nadd);
4297 	atomic_add_64(&db->db_user->dbu_size, nadd);
4298 }
4299 
4300 void
4301 dmu_buf_sub_user_size(dmu_buf_t *db_fake, uint64_t nsub)
4302 {
4303 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4304 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
4305 	ASSERT3P(db->db_user, !=, NULL);
4306 	ASSERT3U(atomic_load_64(&db->db_user->dbu_size), >=, nsub);
4307 	atomic_sub_64(&db->db_user->dbu_size, nsub);
4308 }
4309 
4310 void
4311 dmu_buf_user_evict_wait(void)
4312 {
4313 	taskq_wait(dbu_evict_taskq);
4314 }
4315 
4316 blkptr_t *
4317 dmu_buf_get_blkptr(dmu_buf_t *db)
4318 {
4319 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4320 	return (dbi->db_blkptr);
4321 }
4322 
4323 objset_t *
4324 dmu_buf_get_objset(dmu_buf_t *db)
4325 {
4326 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4327 	return (dbi->db_objset);
4328 }
4329 
4330 static void
4331 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4332 {
4333 	/* ASSERT(dmu_tx_is_syncing(tx) */
4334 	ASSERT(MUTEX_HELD(&db->db_mtx));
4335 
4336 	if (db->db_blkptr != NULL)
4337 		return;
4338 
4339 	if (db->db_blkid == DMU_SPILL_BLKID) {
4340 		db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4341 		BP_ZERO(db->db_blkptr);
4342 		return;
4343 	}
4344 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4345 		/*
4346 		 * This buffer was allocated at a time when there was
4347 		 * no available blkptrs from the dnode, or it was
4348 		 * inappropriate to hook it in (i.e., nlevels mismatch).
4349 		 */
4350 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4351 		ASSERT(db->db_parent == NULL);
4352 		db->db_parent = dn->dn_dbuf;
4353 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4354 		DBUF_VERIFY(db);
4355 	} else {
4356 		dmu_buf_impl_t *parent = db->db_parent;
4357 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4358 
4359 		ASSERT(dn->dn_phys->dn_nlevels > 1);
4360 		if (parent == NULL) {
4361 			mutex_exit(&db->db_mtx);
4362 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
4363 			parent = dbuf_hold_level(dn, db->db_level + 1,
4364 			    db->db_blkid >> epbs, db);
4365 			rw_exit(&dn->dn_struct_rwlock);
4366 			mutex_enter(&db->db_mtx);
4367 			db->db_parent = parent;
4368 		}
4369 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
4370 		    (db->db_blkid & ((1ULL << epbs) - 1));
4371 		DBUF_VERIFY(db);
4372 	}
4373 }
4374 
4375 static void
4376 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4377 {
4378 	dmu_buf_impl_t *db = dr->dr_dbuf;
4379 	void *data = dr->dt.dl.dr_data;
4380 
4381 	ASSERT0(db->db_level);
4382 	ASSERT(MUTEX_HELD(&db->db_mtx));
4383 	ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4384 	ASSERT(data != NULL);
4385 
4386 	dnode_t *dn = dr->dr_dnode;
4387 	ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4388 	    DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4389 	memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4390 
4391 	dbuf_sync_leaf_verify_bonus_dnode(dr);
4392 
4393 	dbuf_undirty_bonus(dr);
4394 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4395 }
4396 
4397 /*
4398  * When syncing out a blocks of dnodes, adjust the block to deal with
4399  * encryption.  Normally, we make sure the block is decrypted before writing
4400  * it.  If we have crypt params, then we are writing a raw (encrypted) block,
4401  * from a raw receive.  In this case, set the ARC buf's crypt params so
4402  * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4403  */
4404 static void
4405 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4406 {
4407 	int err;
4408 	dmu_buf_impl_t *db = dr->dr_dbuf;
4409 
4410 	ASSERT(MUTEX_HELD(&db->db_mtx));
4411 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4412 	ASSERT3U(db->db_level, ==, 0);
4413 
4414 	if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4415 		zbookmark_phys_t zb;
4416 
4417 		/*
4418 		 * Unfortunately, there is currently no mechanism for
4419 		 * syncing context to handle decryption errors. An error
4420 		 * here is only possible if an attacker maliciously
4421 		 * changed a dnode block and updated the associated
4422 		 * checksums going up the block tree.
4423 		 */
4424 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4425 		    db->db.db_object, db->db_level, db->db_blkid);
4426 		err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4427 		    &zb, B_TRUE);
4428 		if (err)
4429 			panic("Invalid dnode block MAC");
4430 	} else if (dr->dt.dl.dr_has_raw_params) {
4431 		(void) arc_release(dr->dt.dl.dr_data, db);
4432 		arc_convert_to_raw(dr->dt.dl.dr_data,
4433 		    dmu_objset_id(db->db_objset),
4434 		    dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4435 		    dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4436 	}
4437 }
4438 
4439 /*
4440  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4441  * is critical the we not allow the compiler to inline this function in to
4442  * dbuf_sync_list() thereby drastically bloating the stack usage.
4443  */
4444 noinline static void
4445 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4446 {
4447 	dmu_buf_impl_t *db = dr->dr_dbuf;
4448 	dnode_t *dn = dr->dr_dnode;
4449 
4450 	ASSERT(dmu_tx_is_syncing(tx));
4451 
4452 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4453 
4454 	mutex_enter(&db->db_mtx);
4455 
4456 	ASSERT(db->db_level > 0);
4457 	DBUF_VERIFY(db);
4458 
4459 	/* Read the block if it hasn't been read yet. */
4460 	if (db->db_buf == NULL) {
4461 		mutex_exit(&db->db_mtx);
4462 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4463 		mutex_enter(&db->db_mtx);
4464 	}
4465 	ASSERT3U(db->db_state, ==, DB_CACHED);
4466 	ASSERT(db->db_buf != NULL);
4467 
4468 	/* Indirect block size must match what the dnode thinks it is. */
4469 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4470 	dbuf_check_blkptr(dn, db);
4471 
4472 	/* Provide the pending dirty record to child dbufs */
4473 	db->db_data_pending = dr;
4474 
4475 	mutex_exit(&db->db_mtx);
4476 
4477 	dbuf_write(dr, db->db_buf, tx);
4478 
4479 	zio_t *zio = dr->dr_zio;
4480 	mutex_enter(&dr->dt.di.dr_mtx);
4481 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4482 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4483 	mutex_exit(&dr->dt.di.dr_mtx);
4484 	zio_nowait(zio);
4485 }
4486 
4487 /*
4488  * Verify that the size of the data in our bonus buffer does not exceed
4489  * its recorded size.
4490  *
4491  * The purpose of this verification is to catch any cases in development
4492  * where the size of a phys structure (i.e space_map_phys_t) grows and,
4493  * due to incorrect feature management, older pools expect to read more
4494  * data even though they didn't actually write it to begin with.
4495  *
4496  * For a example, this would catch an error in the feature logic where we
4497  * open an older pool and we expect to write the space map histogram of
4498  * a space map with size SPACE_MAP_SIZE_V0.
4499  */
4500 static void
4501 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4502 {
4503 #ifdef ZFS_DEBUG
4504 	dnode_t *dn = dr->dr_dnode;
4505 
4506 	/*
4507 	 * Encrypted bonus buffers can have data past their bonuslen.
4508 	 * Skip the verification of these blocks.
4509 	 */
4510 	if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4511 		return;
4512 
4513 	uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4514 	uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4515 	ASSERT3U(bonuslen, <=, maxbonuslen);
4516 
4517 	arc_buf_t *datap = dr->dt.dl.dr_data;
4518 	char *datap_end = ((char *)datap) + bonuslen;
4519 	char *datap_max = ((char *)datap) + maxbonuslen;
4520 
4521 	/* ensure that everything is zero after our data */
4522 	for (; datap_end < datap_max; datap_end++)
4523 		ASSERT(*datap_end == 0);
4524 #endif
4525 }
4526 
4527 static blkptr_t *
4528 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4529 {
4530 	/* This must be a lightweight dirty record. */
4531 	ASSERT3P(dr->dr_dbuf, ==, NULL);
4532 	dnode_t *dn = dr->dr_dnode;
4533 
4534 	if (dn->dn_phys->dn_nlevels == 1) {
4535 		VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4536 		return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4537 	} else {
4538 		dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4539 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4540 		VERIFY3U(parent_db->db_level, ==, 1);
4541 		VERIFY3P(DB_DNODE(parent_db), ==, dn);
4542 		VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4543 		blkptr_t *bp = parent_db->db.db_data;
4544 		return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4545 	}
4546 }
4547 
4548 static void
4549 dbuf_lightweight_ready(zio_t *zio)
4550 {
4551 	dbuf_dirty_record_t *dr = zio->io_private;
4552 	blkptr_t *bp = zio->io_bp;
4553 
4554 	if (zio->io_error != 0)
4555 		return;
4556 
4557 	dnode_t *dn = dr->dr_dnode;
4558 
4559 	blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4560 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4561 	int64_t delta = bp_get_dsize_sync(spa, bp) -
4562 	    bp_get_dsize_sync(spa, bp_orig);
4563 	dnode_diduse_space(dn, delta);
4564 
4565 	uint64_t blkid = dr->dt.dll.dr_blkid;
4566 	mutex_enter(&dn->dn_mtx);
4567 	if (blkid > dn->dn_phys->dn_maxblkid) {
4568 		ASSERT0(dn->dn_objset->os_raw_receive);
4569 		dn->dn_phys->dn_maxblkid = blkid;
4570 	}
4571 	mutex_exit(&dn->dn_mtx);
4572 
4573 	if (!BP_IS_EMBEDDED(bp)) {
4574 		uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4575 		BP_SET_FILL(bp, fill);
4576 	}
4577 
4578 	dmu_buf_impl_t *parent_db;
4579 	EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4580 	if (dr->dr_parent == NULL) {
4581 		parent_db = dn->dn_dbuf;
4582 	} else {
4583 		parent_db = dr->dr_parent->dr_dbuf;
4584 	}
4585 	rw_enter(&parent_db->db_rwlock, RW_WRITER);
4586 	*bp_orig = *bp;
4587 	rw_exit(&parent_db->db_rwlock);
4588 }
4589 
4590 static void
4591 dbuf_lightweight_done(zio_t *zio)
4592 {
4593 	dbuf_dirty_record_t *dr = zio->io_private;
4594 
4595 	VERIFY0(zio->io_error);
4596 
4597 	objset_t *os = dr->dr_dnode->dn_objset;
4598 	dmu_tx_t *tx = os->os_synctx;
4599 
4600 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4601 		ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4602 	} else {
4603 		dsl_dataset_t *ds = os->os_dsl_dataset;
4604 		(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4605 		dsl_dataset_block_born(ds, zio->io_bp, tx);
4606 	}
4607 
4608 	dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
4609 	    zio->io_txg);
4610 
4611 	abd_free(dr->dt.dll.dr_abd);
4612 	kmem_free(dr, sizeof (*dr));
4613 }
4614 
4615 noinline static void
4616 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4617 {
4618 	dnode_t *dn = dr->dr_dnode;
4619 	zio_t *pio;
4620 	if (dn->dn_phys->dn_nlevels == 1) {
4621 		pio = dn->dn_zio;
4622 	} else {
4623 		pio = dr->dr_parent->dr_zio;
4624 	}
4625 
4626 	zbookmark_phys_t zb = {
4627 		.zb_objset = dmu_objset_id(dn->dn_objset),
4628 		.zb_object = dn->dn_object,
4629 		.zb_level = 0,
4630 		.zb_blkid = dr->dt.dll.dr_blkid,
4631 	};
4632 
4633 	/*
4634 	 * See comment in dbuf_write().  This is so that zio->io_bp_orig
4635 	 * will have the old BP in dbuf_lightweight_done().
4636 	 */
4637 	dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4638 
4639 	dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4640 	    dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4641 	    dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4642 	    &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4643 	    dbuf_lightweight_done, dr, ZIO_PRIORITY_ASYNC_WRITE,
4644 	    ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4645 
4646 	zio_nowait(dr->dr_zio);
4647 }
4648 
4649 /*
4650  * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4651  * critical the we not allow the compiler to inline this function in to
4652  * dbuf_sync_list() thereby drastically bloating the stack usage.
4653  */
4654 noinline static void
4655 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4656 {
4657 	arc_buf_t **datap = &dr->dt.dl.dr_data;
4658 	dmu_buf_impl_t *db = dr->dr_dbuf;
4659 	dnode_t *dn = dr->dr_dnode;
4660 	objset_t *os;
4661 	uint64_t txg = tx->tx_txg;
4662 
4663 	ASSERT(dmu_tx_is_syncing(tx));
4664 
4665 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4666 
4667 	mutex_enter(&db->db_mtx);
4668 	/*
4669 	 * To be synced, we must be dirtied.  But we might have been freed
4670 	 * after the dirty.
4671 	 */
4672 	if (db->db_state == DB_UNCACHED) {
4673 		/* This buffer has been freed since it was dirtied */
4674 		ASSERT3P(db->db.db_data, ==, NULL);
4675 	} else if (db->db_state == DB_FILL) {
4676 		/* This buffer was freed and is now being re-filled */
4677 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4678 	} else if (db->db_state == DB_READ) {
4679 		/*
4680 		 * This buffer was either cloned or had a Direct I/O write
4681 		 * occur and has an in-flgiht read on the BP. It is safe to
4682 		 * issue the write here, because the read has already been
4683 		 * issued and the contents won't change.
4684 		 *
4685 		 * We can verify the case of both the clone and Direct I/O
4686 		 * write by making sure the first dirty record for the dbuf
4687 		 * has no ARC buffer associated with it.
4688 		 */
4689 		dbuf_dirty_record_t *dr_head =
4690 		    list_head(&db->db_dirty_records);
4691 		ASSERT3P(db->db_buf, ==, NULL);
4692 		ASSERT3P(db->db.db_data, ==, NULL);
4693 		ASSERT3P(dr_head->dt.dl.dr_data, ==, NULL);
4694 		ASSERT3U(dr_head->dt.dl.dr_override_state, ==, DR_OVERRIDDEN);
4695 	} else {
4696 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4697 	}
4698 	DBUF_VERIFY(db);
4699 
4700 	if (db->db_blkid == DMU_SPILL_BLKID) {
4701 		mutex_enter(&dn->dn_mtx);
4702 		if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4703 			/*
4704 			 * In the previous transaction group, the bonus buffer
4705 			 * was entirely used to store the attributes for the
4706 			 * dnode which overrode the dn_spill field.  However,
4707 			 * when adding more attributes to the file a spill
4708 			 * block was required to hold the extra attributes.
4709 			 *
4710 			 * Make sure to clear the garbage left in the dn_spill
4711 			 * field from the previous attributes in the bonus
4712 			 * buffer.  Otherwise, after writing out the spill
4713 			 * block to the new allocated dva, it will free
4714 			 * the old block pointed to by the invalid dn_spill.
4715 			 */
4716 			db->db_blkptr = NULL;
4717 		}
4718 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4719 		mutex_exit(&dn->dn_mtx);
4720 	}
4721 
4722 	/*
4723 	 * If this is a bonus buffer, simply copy the bonus data into the
4724 	 * dnode.  It will be written out when the dnode is synced (and it
4725 	 * will be synced, since it must have been dirty for dbuf_sync to
4726 	 * be called).
4727 	 */
4728 	if (db->db_blkid == DMU_BONUS_BLKID) {
4729 		ASSERT(dr->dr_dbuf == db);
4730 		dbuf_sync_bonus(dr, tx);
4731 		return;
4732 	}
4733 
4734 	os = dn->dn_objset;
4735 
4736 	/*
4737 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
4738 	 * operation to sneak in. As a result, we need to ensure that we
4739 	 * don't check the dr_override_state until we have returned from
4740 	 * dbuf_check_blkptr.
4741 	 */
4742 	dbuf_check_blkptr(dn, db);
4743 
4744 	/*
4745 	 * If this buffer is in the middle of an immediate write, wait for the
4746 	 * synchronous IO to complete.
4747 	 *
4748 	 * This is also valid even with Direct I/O writes setting a dirty
4749 	 * records override state into DR_IN_DMU_SYNC, because all
4750 	 * Direct I/O writes happen in open-context.
4751 	 */
4752 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4753 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4754 		cv_wait(&db->db_changed, &db->db_mtx);
4755 	}
4756 
4757 	/*
4758 	 * If this is a dnode block, ensure it is appropriately encrypted
4759 	 * or decrypted, depending on what we are writing to it this txg.
4760 	 */
4761 	if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4762 		dbuf_prepare_encrypted_dnode_leaf(dr);
4763 
4764 	if (*datap != NULL && *datap == db->db_buf &&
4765 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
4766 	    zfs_refcount_count(&db->db_holds) > 1 &&
4767 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN) {
4768 		/*
4769 		 * If this buffer is currently "in use" (i.e., there
4770 		 * are active holds and db_data still references it),
4771 		 * then make a copy before we start the write so that
4772 		 * any modifications from the open txg will not leak
4773 		 * into this write.
4774 		 *
4775 		 * NOTE: this copy does not need to be made for
4776 		 * objects only modified in the syncing context (e.g.
4777 		 * DNONE_DNODE blocks).
4778 		 */
4779 		int psize = arc_buf_size(*datap);
4780 		int lsize = arc_buf_lsize(*datap);
4781 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4782 		enum zio_compress compress_type = arc_get_compression(*datap);
4783 		uint8_t complevel = arc_get_complevel(*datap);
4784 
4785 		if (arc_is_encrypted(*datap)) {
4786 			boolean_t byteorder;
4787 			uint8_t salt[ZIO_DATA_SALT_LEN];
4788 			uint8_t iv[ZIO_DATA_IV_LEN];
4789 			uint8_t mac[ZIO_DATA_MAC_LEN];
4790 
4791 			arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4792 			*datap = arc_alloc_raw_buf(os->os_spa, db,
4793 			    dmu_objset_id(os), byteorder, salt, iv, mac,
4794 			    dn->dn_type, psize, lsize, compress_type,
4795 			    complevel);
4796 		} else if (compress_type != ZIO_COMPRESS_OFF) {
4797 			ASSERT3U(type, ==, ARC_BUFC_DATA);
4798 			*datap = arc_alloc_compressed_buf(os->os_spa, db,
4799 			    psize, lsize, compress_type, complevel);
4800 		} else {
4801 			*datap = arc_alloc_buf(os->os_spa, db, type, psize);
4802 		}
4803 		memcpy((*datap)->b_data, db->db.db_data, psize);
4804 	}
4805 	db->db_data_pending = dr;
4806 
4807 	mutex_exit(&db->db_mtx);
4808 
4809 	dbuf_write(dr, *datap, tx);
4810 
4811 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4812 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4813 		list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4814 	} else {
4815 		zio_nowait(dr->dr_zio);
4816 	}
4817 }
4818 
4819 /*
4820  * Syncs out a range of dirty records for indirect or leaf dbufs.  May be
4821  * called recursively from dbuf_sync_indirect().
4822  */
4823 void
4824 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4825 {
4826 	dbuf_dirty_record_t *dr;
4827 
4828 	while ((dr = list_head(list))) {
4829 		if (dr->dr_zio != NULL) {
4830 			/*
4831 			 * If we find an already initialized zio then we
4832 			 * are processing the meta-dnode, and we have finished.
4833 			 * The dbufs for all dnodes are put back on the list
4834 			 * during processing, so that we can zio_wait()
4835 			 * these IOs after initiating all child IOs.
4836 			 */
4837 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4838 			    DMU_META_DNODE_OBJECT);
4839 			break;
4840 		}
4841 		list_remove(list, dr);
4842 		if (dr->dr_dbuf == NULL) {
4843 			dbuf_sync_lightweight(dr, tx);
4844 		} else {
4845 			if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4846 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4847 				VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4848 			}
4849 			if (dr->dr_dbuf->db_level > 0)
4850 				dbuf_sync_indirect(dr, tx);
4851 			else
4852 				dbuf_sync_leaf(dr, tx);
4853 		}
4854 	}
4855 }
4856 
4857 static void
4858 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4859 {
4860 	(void) buf;
4861 	dmu_buf_impl_t *db = vdb;
4862 	dnode_t *dn;
4863 	blkptr_t *bp = zio->io_bp;
4864 	blkptr_t *bp_orig = &zio->io_bp_orig;
4865 	spa_t *spa = zio->io_spa;
4866 	int64_t delta;
4867 	uint64_t fill = 0;
4868 	int i;
4869 
4870 	ASSERT3P(db->db_blkptr, !=, NULL);
4871 	ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4872 
4873 	DB_DNODE_ENTER(db);
4874 	dn = DB_DNODE(db);
4875 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4876 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4877 	zio->io_prev_space_delta = delta;
4878 
4879 	if (BP_GET_LOGICAL_BIRTH(bp) != 0) {
4880 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4881 		    BP_GET_TYPE(bp) == dn->dn_type) ||
4882 		    (db->db_blkid == DMU_SPILL_BLKID &&
4883 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4884 		    BP_IS_EMBEDDED(bp));
4885 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4886 	}
4887 
4888 	mutex_enter(&db->db_mtx);
4889 
4890 #ifdef ZFS_DEBUG
4891 	if (db->db_blkid == DMU_SPILL_BLKID) {
4892 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4893 		ASSERT(!(BP_IS_HOLE(bp)) &&
4894 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4895 	}
4896 #endif
4897 
4898 	if (db->db_level == 0) {
4899 		mutex_enter(&dn->dn_mtx);
4900 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4901 		    db->db_blkid != DMU_SPILL_BLKID) {
4902 			ASSERT0(db->db_objset->os_raw_receive);
4903 			dn->dn_phys->dn_maxblkid = db->db_blkid;
4904 		}
4905 		mutex_exit(&dn->dn_mtx);
4906 
4907 		if (dn->dn_type == DMU_OT_DNODE) {
4908 			i = 0;
4909 			while (i < db->db.db_size) {
4910 				dnode_phys_t *dnp =
4911 				    (void *)(((char *)db->db.db_data) + i);
4912 
4913 				i += DNODE_MIN_SIZE;
4914 				if (dnp->dn_type != DMU_OT_NONE) {
4915 					fill++;
4916 					for (int j = 0; j < dnp->dn_nblkptr;
4917 					    j++) {
4918 						(void) zfs_blkptr_verify(spa,
4919 						    &dnp->dn_blkptr[j],
4920 						    BLK_CONFIG_SKIP,
4921 						    BLK_VERIFY_HALT);
4922 					}
4923 					if (dnp->dn_flags &
4924 					    DNODE_FLAG_SPILL_BLKPTR) {
4925 						(void) zfs_blkptr_verify(spa,
4926 						    DN_SPILL_BLKPTR(dnp),
4927 						    BLK_CONFIG_SKIP,
4928 						    BLK_VERIFY_HALT);
4929 					}
4930 					i += dnp->dn_extra_slots *
4931 					    DNODE_MIN_SIZE;
4932 				}
4933 			}
4934 		} else {
4935 			if (BP_IS_HOLE(bp)) {
4936 				fill = 0;
4937 			} else {
4938 				fill = 1;
4939 			}
4940 		}
4941 	} else {
4942 		blkptr_t *ibp = db->db.db_data;
4943 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4944 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4945 			if (BP_IS_HOLE(ibp))
4946 				continue;
4947 			(void) zfs_blkptr_verify(spa, ibp,
4948 			    BLK_CONFIG_SKIP, BLK_VERIFY_HALT);
4949 			fill += BP_GET_FILL(ibp);
4950 		}
4951 	}
4952 	DB_DNODE_EXIT(db);
4953 
4954 	if (!BP_IS_EMBEDDED(bp))
4955 		BP_SET_FILL(bp, fill);
4956 
4957 	mutex_exit(&db->db_mtx);
4958 
4959 	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4960 	*db->db_blkptr = *bp;
4961 	dmu_buf_unlock_parent(db, dblt, FTAG);
4962 }
4963 
4964 /*
4965  * This function gets called just prior to running through the compression
4966  * stage of the zio pipeline. If we're an indirect block comprised of only
4967  * holes, then we want this indirect to be compressed away to a hole. In
4968  * order to do that we must zero out any information about the holes that
4969  * this indirect points to prior to before we try to compress it.
4970  */
4971 static void
4972 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4973 {
4974 	(void) zio, (void) buf;
4975 	dmu_buf_impl_t *db = vdb;
4976 	blkptr_t *bp;
4977 	unsigned int epbs, i;
4978 
4979 	ASSERT3U(db->db_level, >, 0);
4980 	DB_DNODE_ENTER(db);
4981 	epbs = DB_DNODE(db)->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4982 	DB_DNODE_EXIT(db);
4983 	ASSERT3U(epbs, <, 31);
4984 
4985 	/* Determine if all our children are holes */
4986 	for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4987 		if (!BP_IS_HOLE(bp))
4988 			break;
4989 	}
4990 
4991 	/*
4992 	 * If all the children are holes, then zero them all out so that
4993 	 * we may get compressed away.
4994 	 */
4995 	if (i == 1ULL << epbs) {
4996 		/*
4997 		 * We only found holes. Grab the rwlock to prevent
4998 		 * anybody from reading the blocks we're about to
4999 		 * zero out.
5000 		 */
5001 		rw_enter(&db->db_rwlock, RW_WRITER);
5002 		memset(db->db.db_data, 0, db->db.db_size);
5003 		rw_exit(&db->db_rwlock);
5004 	}
5005 }
5006 
5007 static void
5008 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
5009 {
5010 	(void) buf;
5011 	dmu_buf_impl_t *db = vdb;
5012 	blkptr_t *bp_orig = &zio->io_bp_orig;
5013 	blkptr_t *bp = db->db_blkptr;
5014 	objset_t *os = db->db_objset;
5015 	dmu_tx_t *tx = os->os_synctx;
5016 
5017 	ASSERT0(zio->io_error);
5018 	ASSERT(db->db_blkptr == bp);
5019 
5020 	/*
5021 	 * For nopwrites and rewrites we ensure that the bp matches our
5022 	 * original and bypass all the accounting.
5023 	 */
5024 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
5025 		ASSERT(BP_EQUAL(bp, bp_orig));
5026 	} else {
5027 		dsl_dataset_t *ds = os->os_dsl_dataset;
5028 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
5029 		dsl_dataset_block_born(ds, bp, tx);
5030 	}
5031 
5032 	mutex_enter(&db->db_mtx);
5033 
5034 	DBUF_VERIFY(db);
5035 
5036 	dbuf_dirty_record_t *dr = db->db_data_pending;
5037 	dnode_t *dn = dr->dr_dnode;
5038 	ASSERT(!list_link_active(&dr->dr_dirty_node));
5039 	ASSERT(dr->dr_dbuf == db);
5040 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
5041 	list_remove(&db->db_dirty_records, dr);
5042 
5043 #ifdef ZFS_DEBUG
5044 	if (db->db_blkid == DMU_SPILL_BLKID) {
5045 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
5046 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
5047 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
5048 	}
5049 #endif
5050 
5051 	if (db->db_level == 0) {
5052 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
5053 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
5054 
5055 		/* no dr_data if this is a NO_FILL or Direct I/O */
5056 		if (dr->dt.dl.dr_data != NULL &&
5057 		    dr->dt.dl.dr_data != db->db_buf) {
5058 			ASSERT3B(dr->dt.dl.dr_brtwrite, ==, B_FALSE);
5059 			ASSERT3B(dr->dt.dl.dr_diowrite, ==, B_FALSE);
5060 			arc_buf_destroy(dr->dt.dl.dr_data, db);
5061 		}
5062 	} else {
5063 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
5064 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
5065 		if (!BP_IS_HOLE(db->db_blkptr)) {
5066 			int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
5067 			    SPA_BLKPTRSHIFT;
5068 			ASSERT3U(db->db_blkid, <=,
5069 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
5070 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
5071 			    db->db.db_size);
5072 		}
5073 		mutex_destroy(&dr->dt.di.dr_mtx);
5074 		list_destroy(&dr->dt.di.dr_children);
5075 	}
5076 
5077 	cv_broadcast(&db->db_changed);
5078 	ASSERT(db->db_dirtycnt > 0);
5079 	db->db_dirtycnt -= 1;
5080 	db->db_data_pending = NULL;
5081 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
5082 
5083 	dsl_pool_undirty_space(dmu_objset_pool(os), dr->dr_accounted,
5084 	    zio->io_txg);
5085 
5086 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
5087 }
5088 
5089 static void
5090 dbuf_write_nofill_ready(zio_t *zio)
5091 {
5092 	dbuf_write_ready(zio, NULL, zio->io_private);
5093 }
5094 
5095 static void
5096 dbuf_write_nofill_done(zio_t *zio)
5097 {
5098 	dbuf_write_done(zio, NULL, zio->io_private);
5099 }
5100 
5101 static void
5102 dbuf_write_override_ready(zio_t *zio)
5103 {
5104 	dbuf_dirty_record_t *dr = zio->io_private;
5105 	dmu_buf_impl_t *db = dr->dr_dbuf;
5106 
5107 	dbuf_write_ready(zio, NULL, db);
5108 }
5109 
5110 static void
5111 dbuf_write_override_done(zio_t *zio)
5112 {
5113 	dbuf_dirty_record_t *dr = zio->io_private;
5114 	dmu_buf_impl_t *db = dr->dr_dbuf;
5115 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
5116 
5117 	mutex_enter(&db->db_mtx);
5118 	if (!BP_EQUAL(zio->io_bp, obp)) {
5119 		if (!BP_IS_HOLE(obp))
5120 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
5121 		arc_release(dr->dt.dl.dr_data, db);
5122 	}
5123 	mutex_exit(&db->db_mtx);
5124 
5125 	dbuf_write_done(zio, NULL, db);
5126 
5127 	if (zio->io_abd != NULL)
5128 		abd_free(zio->io_abd);
5129 }
5130 
5131 typedef struct dbuf_remap_impl_callback_arg {
5132 	objset_t	*drica_os;
5133 	uint64_t	drica_blk_birth;
5134 	dmu_tx_t	*drica_tx;
5135 } dbuf_remap_impl_callback_arg_t;
5136 
5137 static void
5138 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
5139     void *arg)
5140 {
5141 	dbuf_remap_impl_callback_arg_t *drica = arg;
5142 	objset_t *os = drica->drica_os;
5143 	spa_t *spa = dmu_objset_spa(os);
5144 	dmu_tx_t *tx = drica->drica_tx;
5145 
5146 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5147 
5148 	if (os == spa_meta_objset(spa)) {
5149 		spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
5150 	} else {
5151 		dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
5152 		    size, drica->drica_blk_birth, tx);
5153 	}
5154 }
5155 
5156 static void
5157 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
5158 {
5159 	blkptr_t bp_copy = *bp;
5160 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
5161 	dbuf_remap_impl_callback_arg_t drica;
5162 
5163 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5164 
5165 	drica.drica_os = dn->dn_objset;
5166 	drica.drica_blk_birth = BP_GET_LOGICAL_BIRTH(bp);
5167 	drica.drica_tx = tx;
5168 	if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
5169 	    &drica)) {
5170 		/*
5171 		 * If the blkptr being remapped is tracked by a livelist,
5172 		 * then we need to make sure the livelist reflects the update.
5173 		 * First, cancel out the old blkptr by appending a 'FREE'
5174 		 * entry. Next, add an 'ALLOC' to track the new version. This
5175 		 * way we avoid trying to free an inaccurate blkptr at delete.
5176 		 * Note that embedded blkptrs are not tracked in livelists.
5177 		 */
5178 		if (dn->dn_objset != spa_meta_objset(spa)) {
5179 			dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
5180 			if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
5181 			    BP_GET_LOGICAL_BIRTH(bp) >
5182 			    ds->ds_dir->dd_origin_txg) {
5183 				ASSERT(!BP_IS_EMBEDDED(bp));
5184 				ASSERT(dsl_dir_is_clone(ds->ds_dir));
5185 				ASSERT(spa_feature_is_enabled(spa,
5186 				    SPA_FEATURE_LIVELIST));
5187 				bplist_append(&ds->ds_dir->dd_pending_frees,
5188 				    bp);
5189 				bplist_append(&ds->ds_dir->dd_pending_allocs,
5190 				    &bp_copy);
5191 			}
5192 		}
5193 
5194 		/*
5195 		 * The db_rwlock prevents dbuf_read_impl() from
5196 		 * dereferencing the BP while we are changing it.  To
5197 		 * avoid lock contention, only grab it when we are actually
5198 		 * changing the BP.
5199 		 */
5200 		if (rw != NULL)
5201 			rw_enter(rw, RW_WRITER);
5202 		*bp = bp_copy;
5203 		if (rw != NULL)
5204 			rw_exit(rw);
5205 	}
5206 }
5207 
5208 /*
5209  * Remap any existing BP's to concrete vdevs, if possible.
5210  */
5211 static void
5212 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
5213 {
5214 	spa_t *spa = dmu_objset_spa(db->db_objset);
5215 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
5216 
5217 	if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
5218 		return;
5219 
5220 	if (db->db_level > 0) {
5221 		blkptr_t *bp = db->db.db_data;
5222 		for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
5223 			dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
5224 		}
5225 	} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
5226 		dnode_phys_t *dnp = db->db.db_data;
5227 		ASSERT3U(dn->dn_type, ==, DMU_OT_DNODE);
5228 		for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
5229 		    i += dnp[i].dn_extra_slots + 1) {
5230 			for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
5231 				krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
5232 				    &dn->dn_dbuf->db_rwlock);
5233 				dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
5234 				    tx);
5235 			}
5236 		}
5237 	}
5238 }
5239 
5240 
5241 /*
5242  * Populate dr->dr_zio with a zio to commit a dirty buffer to disk.
5243  * Caller is responsible for issuing the zio_[no]wait(dr->dr_zio).
5244  */
5245 static void
5246 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5247 {
5248 	dmu_buf_impl_t *db = dr->dr_dbuf;
5249 	dnode_t *dn = dr->dr_dnode;
5250 	objset_t *os;
5251 	dmu_buf_impl_t *parent = db->db_parent;
5252 	uint64_t txg = tx->tx_txg;
5253 	zbookmark_phys_t zb;
5254 	zio_prop_t zp;
5255 	zio_t *pio; /* parent I/O */
5256 	int wp_flag = 0;
5257 
5258 	ASSERT(dmu_tx_is_syncing(tx));
5259 
5260 	os = dn->dn_objset;
5261 
5262 	if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5263 		/*
5264 		 * Private object buffers are released here rather than in
5265 		 * dbuf_dirty() since they are only modified in the syncing
5266 		 * context and we don't want the overhead of making multiple
5267 		 * copies of the data.
5268 		 */
5269 		if (BP_IS_HOLE(db->db_blkptr))
5270 			arc_buf_thaw(data);
5271 		else
5272 			dbuf_release_bp(db);
5273 		dbuf_remap(dn, db, tx);
5274 	}
5275 
5276 	if (parent != dn->dn_dbuf) {
5277 		/* Our parent is an indirect block. */
5278 		/* We have a dirty parent that has been scheduled for write. */
5279 		ASSERT(parent && parent->db_data_pending);
5280 		/* Our parent's buffer is one level closer to the dnode. */
5281 		ASSERT(db->db_level == parent->db_level-1);
5282 		/*
5283 		 * We're about to modify our parent's db_data by modifying
5284 		 * our block pointer, so the parent must be released.
5285 		 */
5286 		ASSERT(arc_released(parent->db_buf));
5287 		pio = parent->db_data_pending->dr_zio;
5288 	} else {
5289 		/* Our parent is the dnode itself. */
5290 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5291 		    db->db_blkid != DMU_SPILL_BLKID) ||
5292 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5293 		if (db->db_blkid != DMU_SPILL_BLKID)
5294 			ASSERT3P(db->db_blkptr, ==,
5295 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
5296 		pio = dn->dn_zio;
5297 	}
5298 
5299 	ASSERT(db->db_level == 0 || data == db->db_buf);
5300 	ASSERT3U(BP_GET_LOGICAL_BIRTH(db->db_blkptr), <=, txg);
5301 	ASSERT(pio);
5302 
5303 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5304 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5305 	    db->db.db_object, db->db_level, db->db_blkid);
5306 
5307 	if (db->db_blkid == DMU_SPILL_BLKID)
5308 		wp_flag = WP_SPILL;
5309 	wp_flag |= (data == NULL) ? WP_NOFILL : 0;
5310 
5311 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5312 
5313 	/*
5314 	 * We copy the blkptr now (rather than when we instantiate the dirty
5315 	 * record), because its value can change between open context and
5316 	 * syncing context. We do not need to hold dn_struct_rwlock to read
5317 	 * db_blkptr because we are in syncing context.
5318 	 */
5319 	dr->dr_bp_copy = *db->db_blkptr;
5320 
5321 	if (db->db_level == 0 &&
5322 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5323 		/*
5324 		 * The BP for this block has been provided by open context
5325 		 * (by dmu_sync(), dmu_write_direct(),
5326 		 *  or dmu_buf_write_embedded()).
5327 		 */
5328 		abd_t *contents = (data != NULL) ?
5329 		    abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5330 
5331 		dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5332 		    contents, db->db.db_size, db->db.db_size, &zp,
5333 		    dbuf_write_override_ready, NULL,
5334 		    dbuf_write_override_done,
5335 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5336 		mutex_enter(&db->db_mtx);
5337 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5338 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5339 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5340 		    dr->dt.dl.dr_brtwrite);
5341 		mutex_exit(&db->db_mtx);
5342 	} else if (data == NULL) {
5343 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5344 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5345 		dr->dr_zio = zio_write(pio, os->os_spa, txg,
5346 		    &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5347 		    dbuf_write_nofill_ready, NULL,
5348 		    dbuf_write_nofill_done, db,
5349 		    ZIO_PRIORITY_ASYNC_WRITE,
5350 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5351 	} else {
5352 		ASSERT(arc_released(data));
5353 
5354 		/*
5355 		 * For indirect blocks, we want to setup the children
5356 		 * ready callback so that we can properly handle an indirect
5357 		 * block that only contains holes.
5358 		 */
5359 		arc_write_done_func_t *children_ready_cb = NULL;
5360 		if (db->db_level != 0)
5361 			children_ready_cb = dbuf_write_children_ready;
5362 
5363 		dr->dr_zio = arc_write(pio, os->os_spa, txg,
5364 		    &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5365 		    dbuf_is_l2cacheable(db, NULL), &zp, dbuf_write_ready,
5366 		    children_ready_cb, dbuf_write_done, db,
5367 		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5368 	}
5369 }
5370 
5371 EXPORT_SYMBOL(dbuf_find);
5372 EXPORT_SYMBOL(dbuf_is_metadata);
5373 EXPORT_SYMBOL(dbuf_destroy);
5374 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5375 EXPORT_SYMBOL(dbuf_whichblock);
5376 EXPORT_SYMBOL(dbuf_read);
5377 EXPORT_SYMBOL(dbuf_unoverride);
5378 EXPORT_SYMBOL(dbuf_free_range);
5379 EXPORT_SYMBOL(dbuf_new_size);
5380 EXPORT_SYMBOL(dbuf_release_bp);
5381 EXPORT_SYMBOL(dbuf_dirty);
5382 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5383 EXPORT_SYMBOL(dmu_buf_will_dirty);
5384 EXPORT_SYMBOL(dmu_buf_is_dirty);
5385 EXPORT_SYMBOL(dmu_buf_will_clone_or_dio);
5386 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5387 EXPORT_SYMBOL(dmu_buf_will_fill);
5388 EXPORT_SYMBOL(dmu_buf_fill_done);
5389 EXPORT_SYMBOL(dmu_buf_rele);
5390 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5391 EXPORT_SYMBOL(dbuf_prefetch);
5392 EXPORT_SYMBOL(dbuf_hold_impl);
5393 EXPORT_SYMBOL(dbuf_hold);
5394 EXPORT_SYMBOL(dbuf_hold_level);
5395 EXPORT_SYMBOL(dbuf_create_bonus);
5396 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5397 EXPORT_SYMBOL(dbuf_rm_spill);
5398 EXPORT_SYMBOL(dbuf_add_ref);
5399 EXPORT_SYMBOL(dbuf_rele);
5400 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5401 EXPORT_SYMBOL(dbuf_refcount);
5402 EXPORT_SYMBOL(dbuf_sync_list);
5403 EXPORT_SYMBOL(dmu_buf_set_user);
5404 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5405 EXPORT_SYMBOL(dmu_buf_get_user);
5406 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5407 
5408 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5409 	"Maximum size in bytes of the dbuf cache.");
5410 
5411 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5412 	"Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5413 
5414 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5415 	"Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5416 
5417 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5418 	"Maximum size in bytes of dbuf metadata cache.");
5419 
5420 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5421 	"Set size of dbuf cache to log2 fraction of arc size.");
5422 
5423 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5424 	"Set size of dbuf metadata cache to log2 fraction of arc size.");
5425 
5426 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5427 	"Set size of dbuf cache mutex array as log2 shift.");
5428