xref: /titanic_52/usr/src/uts/common/fs/zfs/dbuf.c (revision 1b3b16f35bee1ffc210591d82bca6adf247954b0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2013 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  */
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_send.h>
31 #include <sys/dmu_impl.h>
32 #include <sys/dbuf.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/spa.h>
38 #include <sys/zio.h>
39 #include <sys/dmu_zfetch.h>
40 #include <sys/sa.h>
41 #include <sys/sa_impl.h>
42 
43 /*
44  * Number of times that zfs_free_range() took the slow path while doing
45  * a zfs receive.  A nonzero value indicates a potential performance problem.
46  */
47 uint64_t zfs_free_range_recv_miss;
48 
49 static void dbuf_destroy(dmu_buf_impl_t *db);
50 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
51 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
52 
53 /*
54  * Global data structures and functions for the dbuf cache.
55  */
56 static kmem_cache_t *dbuf_cache;
57 
58 /* ARGSUSED */
59 static int
60 dbuf_cons(void *vdb, void *unused, int kmflag)
61 {
62 	dmu_buf_impl_t *db = vdb;
63 	bzero(db, sizeof (dmu_buf_impl_t));
64 
65 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
66 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
67 	refcount_create(&db->db_holds);
68 	return (0);
69 }
70 
71 /* ARGSUSED */
72 static void
73 dbuf_dest(void *vdb, void *unused)
74 {
75 	dmu_buf_impl_t *db = vdb;
76 	mutex_destroy(&db->db_mtx);
77 	cv_destroy(&db->db_changed);
78 	refcount_destroy(&db->db_holds);
79 }
80 
81 /*
82  * dbuf hash table routines
83  */
84 static dbuf_hash_table_t dbuf_hash_table;
85 
86 static uint64_t dbuf_hash_count;
87 
88 static uint64_t
89 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
90 {
91 	uintptr_t osv = (uintptr_t)os;
92 	uint64_t crc = -1ULL;
93 
94 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
95 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
96 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
97 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
98 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
99 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
100 	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
101 
102 	crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
103 
104 	return (crc);
105 }
106 
107 #define	DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
108 
109 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
110 	((dbuf)->db.db_object == (obj) &&		\
111 	(dbuf)->db_objset == (os) &&			\
112 	(dbuf)->db_level == (level) &&			\
113 	(dbuf)->db_blkid == (blkid))
114 
115 dmu_buf_impl_t *
116 dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
117 {
118 	dbuf_hash_table_t *h = &dbuf_hash_table;
119 	objset_t *os = dn->dn_objset;
120 	uint64_t obj = dn->dn_object;
121 	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
122 	uint64_t idx = hv & h->hash_table_mask;
123 	dmu_buf_impl_t *db;
124 
125 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
126 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
127 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
128 			mutex_enter(&db->db_mtx);
129 			if (db->db_state != DB_EVICTING) {
130 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
131 				return (db);
132 			}
133 			mutex_exit(&db->db_mtx);
134 		}
135 	}
136 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
137 	return (NULL);
138 }
139 
140 /*
141  * Insert an entry into the hash table.  If there is already an element
142  * equal to elem in the hash table, then the already existing element
143  * will be returned and the new element will not be inserted.
144  * Otherwise returns NULL.
145  */
146 static dmu_buf_impl_t *
147 dbuf_hash_insert(dmu_buf_impl_t *db)
148 {
149 	dbuf_hash_table_t *h = &dbuf_hash_table;
150 	objset_t *os = db->db_objset;
151 	uint64_t obj = db->db.db_object;
152 	int level = db->db_level;
153 	uint64_t blkid = db->db_blkid;
154 	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
155 	uint64_t idx = hv & h->hash_table_mask;
156 	dmu_buf_impl_t *dbf;
157 
158 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
159 	for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
160 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
161 			mutex_enter(&dbf->db_mtx);
162 			if (dbf->db_state != DB_EVICTING) {
163 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
164 				return (dbf);
165 			}
166 			mutex_exit(&dbf->db_mtx);
167 		}
168 	}
169 
170 	mutex_enter(&db->db_mtx);
171 	db->db_hash_next = h->hash_table[idx];
172 	h->hash_table[idx] = db;
173 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
174 	atomic_add_64(&dbuf_hash_count, 1);
175 
176 	return (NULL);
177 }
178 
179 /*
180  * Remove an entry from the hash table.  This operation will
181  * fail if there are any existing holds on the db.
182  */
183 static void
184 dbuf_hash_remove(dmu_buf_impl_t *db)
185 {
186 	dbuf_hash_table_t *h = &dbuf_hash_table;
187 	uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
188 	    db->db_level, db->db_blkid);
189 	uint64_t idx = hv & h->hash_table_mask;
190 	dmu_buf_impl_t *dbf, **dbp;
191 
192 	/*
193 	 * We musn't hold db_mtx to maintin lock ordering:
194 	 * DBUF_HASH_MUTEX > db_mtx.
195 	 */
196 	ASSERT(refcount_is_zero(&db->db_holds));
197 	ASSERT(db->db_state == DB_EVICTING);
198 	ASSERT(!MUTEX_HELD(&db->db_mtx));
199 
200 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
201 	dbp = &h->hash_table[idx];
202 	while ((dbf = *dbp) != db) {
203 		dbp = &dbf->db_hash_next;
204 		ASSERT(dbf != NULL);
205 	}
206 	*dbp = db->db_hash_next;
207 	db->db_hash_next = NULL;
208 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
209 	atomic_add_64(&dbuf_hash_count, -1);
210 }
211 
212 static arc_evict_func_t dbuf_do_evict;
213 
214 static void
215 dbuf_evict_user(dmu_buf_impl_t *db)
216 {
217 	ASSERT(MUTEX_HELD(&db->db_mtx));
218 
219 	if (db->db_level != 0 || db->db_evict_func == NULL)
220 		return;
221 
222 	if (db->db_user_data_ptr_ptr)
223 		*db->db_user_data_ptr_ptr = db->db.db_data;
224 	db->db_evict_func(&db->db, db->db_user_ptr);
225 	db->db_user_ptr = NULL;
226 	db->db_user_data_ptr_ptr = NULL;
227 	db->db_evict_func = NULL;
228 }
229 
230 boolean_t
231 dbuf_is_metadata(dmu_buf_impl_t *db)
232 {
233 	if (db->db_level > 0) {
234 		return (B_TRUE);
235 	} else {
236 		boolean_t is_metadata;
237 
238 		DB_DNODE_ENTER(db);
239 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
240 		DB_DNODE_EXIT(db);
241 
242 		return (is_metadata);
243 	}
244 }
245 
246 void
247 dbuf_evict(dmu_buf_impl_t *db)
248 {
249 	ASSERT(MUTEX_HELD(&db->db_mtx));
250 	ASSERT(db->db_buf == NULL);
251 	ASSERT(db->db_data_pending == NULL);
252 
253 	dbuf_clear(db);
254 	dbuf_destroy(db);
255 }
256 
257 void
258 dbuf_init(void)
259 {
260 	uint64_t hsize = 1ULL << 16;
261 	dbuf_hash_table_t *h = &dbuf_hash_table;
262 	int i;
263 
264 	/*
265 	 * The hash table is big enough to fill all of physical memory
266 	 * with an average 4K block size.  The table will take up
267 	 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
268 	 */
269 	while (hsize * 4096 < physmem * PAGESIZE)
270 		hsize <<= 1;
271 
272 retry:
273 	h->hash_table_mask = hsize - 1;
274 	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
275 	if (h->hash_table == NULL) {
276 		/* XXX - we should really return an error instead of assert */
277 		ASSERT(hsize > (1ULL << 10));
278 		hsize >>= 1;
279 		goto retry;
280 	}
281 
282 	dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
283 	    sizeof (dmu_buf_impl_t),
284 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
285 
286 	for (i = 0; i < DBUF_MUTEXES; i++)
287 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
288 }
289 
290 void
291 dbuf_fini(void)
292 {
293 	dbuf_hash_table_t *h = &dbuf_hash_table;
294 	int i;
295 
296 	for (i = 0; i < DBUF_MUTEXES; i++)
297 		mutex_destroy(&h->hash_mutexes[i]);
298 	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
299 	kmem_cache_destroy(dbuf_cache);
300 }
301 
302 /*
303  * Other stuff.
304  */
305 
306 #ifdef ZFS_DEBUG
307 static void
308 dbuf_verify(dmu_buf_impl_t *db)
309 {
310 	dnode_t *dn;
311 	dbuf_dirty_record_t *dr;
312 
313 	ASSERT(MUTEX_HELD(&db->db_mtx));
314 
315 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
316 		return;
317 
318 	ASSERT(db->db_objset != NULL);
319 	DB_DNODE_ENTER(db);
320 	dn = DB_DNODE(db);
321 	if (dn == NULL) {
322 		ASSERT(db->db_parent == NULL);
323 		ASSERT(db->db_blkptr == NULL);
324 	} else {
325 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
326 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
327 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
328 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
329 		    db->db_blkid == DMU_SPILL_BLKID ||
330 		    !list_is_empty(&dn->dn_dbufs));
331 	}
332 	if (db->db_blkid == DMU_BONUS_BLKID) {
333 		ASSERT(dn != NULL);
334 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
335 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
336 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
337 		ASSERT(dn != NULL);
338 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
339 		ASSERT0(db->db.db_offset);
340 	} else {
341 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
342 	}
343 
344 	for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
345 		ASSERT(dr->dr_dbuf == db);
346 
347 	for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
348 		ASSERT(dr->dr_dbuf == db);
349 
350 	/*
351 	 * We can't assert that db_size matches dn_datablksz because it
352 	 * can be momentarily different when another thread is doing
353 	 * dnode_set_blksz().
354 	 */
355 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
356 		dr = db->db_data_pending;
357 		/*
358 		 * It should only be modified in syncing context, so
359 		 * make sure we only have one copy of the data.
360 		 */
361 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
362 	}
363 
364 	/* verify db->db_blkptr */
365 	if (db->db_blkptr) {
366 		if (db->db_parent == dn->dn_dbuf) {
367 			/* db is pointed to by the dnode */
368 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
369 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
370 				ASSERT(db->db_parent == NULL);
371 			else
372 				ASSERT(db->db_parent != NULL);
373 			if (db->db_blkid != DMU_SPILL_BLKID)
374 				ASSERT3P(db->db_blkptr, ==,
375 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
376 		} else {
377 			/* db is pointed to by an indirect block */
378 			int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
379 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
380 			ASSERT3U(db->db_parent->db.db_object, ==,
381 			    db->db.db_object);
382 			/*
383 			 * dnode_grow_indblksz() can make this fail if we don't
384 			 * have the struct_rwlock.  XXX indblksz no longer
385 			 * grows.  safe to do this now?
386 			 */
387 			if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
388 				ASSERT3P(db->db_blkptr, ==,
389 				    ((blkptr_t *)db->db_parent->db.db_data +
390 				    db->db_blkid % epb));
391 			}
392 		}
393 	}
394 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
395 	    (db->db_buf == NULL || db->db_buf->b_data) &&
396 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
397 	    db->db_state != DB_FILL && !dn->dn_free_txg) {
398 		/*
399 		 * If the blkptr isn't set but they have nonzero data,
400 		 * it had better be dirty, otherwise we'll lose that
401 		 * data when we evict this buffer.
402 		 */
403 		if (db->db_dirtycnt == 0) {
404 			uint64_t *buf = db->db.db_data;
405 			int i;
406 
407 			for (i = 0; i < db->db.db_size >> 3; i++) {
408 				ASSERT(buf[i] == 0);
409 			}
410 		}
411 	}
412 	DB_DNODE_EXIT(db);
413 }
414 #endif
415 
416 static void
417 dbuf_update_data(dmu_buf_impl_t *db)
418 {
419 	ASSERT(MUTEX_HELD(&db->db_mtx));
420 	if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
421 		ASSERT(!refcount_is_zero(&db->db_holds));
422 		*db->db_user_data_ptr_ptr = db->db.db_data;
423 	}
424 }
425 
426 static void
427 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
428 {
429 	ASSERT(MUTEX_HELD(&db->db_mtx));
430 	ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
431 	db->db_buf = buf;
432 	if (buf != NULL) {
433 		ASSERT(buf->b_data != NULL);
434 		db->db.db_data = buf->b_data;
435 		if (!arc_released(buf))
436 			arc_set_callback(buf, dbuf_do_evict, db);
437 		dbuf_update_data(db);
438 	} else {
439 		dbuf_evict_user(db);
440 		db->db.db_data = NULL;
441 		if (db->db_state != DB_NOFILL)
442 			db->db_state = DB_UNCACHED;
443 	}
444 }
445 
446 /*
447  * Loan out an arc_buf for read.  Return the loaned arc_buf.
448  */
449 arc_buf_t *
450 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
451 {
452 	arc_buf_t *abuf;
453 
454 	mutex_enter(&db->db_mtx);
455 	if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
456 		int blksz = db->db.db_size;
457 		spa_t *spa;
458 
459 		mutex_exit(&db->db_mtx);
460 		DB_GET_SPA(&spa, db);
461 		abuf = arc_loan_buf(spa, blksz);
462 		bcopy(db->db.db_data, abuf->b_data, blksz);
463 	} else {
464 		abuf = db->db_buf;
465 		arc_loan_inuse_buf(abuf, db);
466 		dbuf_set_data(db, NULL);
467 		mutex_exit(&db->db_mtx);
468 	}
469 	return (abuf);
470 }
471 
472 uint64_t
473 dbuf_whichblock(dnode_t *dn, uint64_t offset)
474 {
475 	if (dn->dn_datablkshift) {
476 		return (offset >> dn->dn_datablkshift);
477 	} else {
478 		ASSERT3U(offset, <, dn->dn_datablksz);
479 		return (0);
480 	}
481 }
482 
483 static void
484 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
485 {
486 	dmu_buf_impl_t *db = vdb;
487 
488 	mutex_enter(&db->db_mtx);
489 	ASSERT3U(db->db_state, ==, DB_READ);
490 	/*
491 	 * All reads are synchronous, so we must have a hold on the dbuf
492 	 */
493 	ASSERT(refcount_count(&db->db_holds) > 0);
494 	ASSERT(db->db_buf == NULL);
495 	ASSERT(db->db.db_data == NULL);
496 	if (db->db_level == 0 && db->db_freed_in_flight) {
497 		/* we were freed in flight; disregard any error */
498 		arc_release(buf, db);
499 		bzero(buf->b_data, db->db.db_size);
500 		arc_buf_freeze(buf);
501 		db->db_freed_in_flight = FALSE;
502 		dbuf_set_data(db, buf);
503 		db->db_state = DB_CACHED;
504 	} else if (zio == NULL || zio->io_error == 0) {
505 		dbuf_set_data(db, buf);
506 		db->db_state = DB_CACHED;
507 	} else {
508 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
509 		ASSERT3P(db->db_buf, ==, NULL);
510 		VERIFY(arc_buf_remove_ref(buf, db));
511 		db->db_state = DB_UNCACHED;
512 	}
513 	cv_broadcast(&db->db_changed);
514 	dbuf_rele_and_unlock(db, NULL);
515 }
516 
517 static void
518 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
519 {
520 	dnode_t *dn;
521 	spa_t *spa;
522 	zbookmark_t zb;
523 	uint32_t aflags = ARC_NOWAIT;
524 
525 	DB_DNODE_ENTER(db);
526 	dn = DB_DNODE(db);
527 	ASSERT(!refcount_is_zero(&db->db_holds));
528 	/* We need the struct_rwlock to prevent db_blkptr from changing. */
529 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
530 	ASSERT(MUTEX_HELD(&db->db_mtx));
531 	ASSERT(db->db_state == DB_UNCACHED);
532 	ASSERT(db->db_buf == NULL);
533 
534 	if (db->db_blkid == DMU_BONUS_BLKID) {
535 		int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
536 
537 		ASSERT3U(bonuslen, <=, db->db.db_size);
538 		db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
539 		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
540 		if (bonuslen < DN_MAX_BONUSLEN)
541 			bzero(db->db.db_data, DN_MAX_BONUSLEN);
542 		if (bonuslen)
543 			bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
544 		DB_DNODE_EXIT(db);
545 		dbuf_update_data(db);
546 		db->db_state = DB_CACHED;
547 		mutex_exit(&db->db_mtx);
548 		return;
549 	}
550 
551 	/*
552 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
553 	 * processes the delete record and clears the bp while we are waiting
554 	 * for the dn_mtx (resulting in a "no" from block_freed).
555 	 */
556 	if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
557 	    (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
558 	    BP_IS_HOLE(db->db_blkptr)))) {
559 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
560 
561 		dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa,
562 		    db->db.db_size, db, type));
563 		DB_DNODE_EXIT(db);
564 		bzero(db->db.db_data, db->db.db_size);
565 		db->db_state = DB_CACHED;
566 		*flags |= DB_RF_CACHED;
567 		mutex_exit(&db->db_mtx);
568 		return;
569 	}
570 
571 	spa = dn->dn_objset->os_spa;
572 	DB_DNODE_EXIT(db);
573 
574 	db->db_state = DB_READ;
575 	mutex_exit(&db->db_mtx);
576 
577 	if (DBUF_IS_L2CACHEABLE(db))
578 		aflags |= ARC_L2CACHE;
579 	if (DBUF_IS_L2COMPRESSIBLE(db))
580 		aflags |= ARC_L2COMPRESS;
581 
582 	SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
583 	    db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
584 	    db->db.db_object, db->db_level, db->db_blkid);
585 
586 	dbuf_add_ref(db, NULL);
587 
588 	(void) arc_read(zio, spa, db->db_blkptr,
589 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
590 	    (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
591 	    &aflags, &zb);
592 	if (aflags & ARC_CACHED)
593 		*flags |= DB_RF_CACHED;
594 }
595 
596 int
597 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
598 {
599 	int err = 0;
600 	int havepzio = (zio != NULL);
601 	int prefetch;
602 	dnode_t *dn;
603 
604 	/*
605 	 * We don't have to hold the mutex to check db_state because it
606 	 * can't be freed while we have a hold on the buffer.
607 	 */
608 	ASSERT(!refcount_is_zero(&db->db_holds));
609 
610 	if (db->db_state == DB_NOFILL)
611 		return (SET_ERROR(EIO));
612 
613 	DB_DNODE_ENTER(db);
614 	dn = DB_DNODE(db);
615 	if ((flags & DB_RF_HAVESTRUCT) == 0)
616 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
617 
618 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
619 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
620 	    DBUF_IS_CACHEABLE(db);
621 
622 	mutex_enter(&db->db_mtx);
623 	if (db->db_state == DB_CACHED) {
624 		mutex_exit(&db->db_mtx);
625 		if (prefetch)
626 			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
627 			    db->db.db_size, TRUE);
628 		if ((flags & DB_RF_HAVESTRUCT) == 0)
629 			rw_exit(&dn->dn_struct_rwlock);
630 		DB_DNODE_EXIT(db);
631 	} else if (db->db_state == DB_UNCACHED) {
632 		spa_t *spa = dn->dn_objset->os_spa;
633 
634 		if (zio == NULL)
635 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
636 		dbuf_read_impl(db, zio, &flags);
637 
638 		/* dbuf_read_impl has dropped db_mtx for us */
639 
640 		if (prefetch)
641 			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
642 			    db->db.db_size, flags & DB_RF_CACHED);
643 
644 		if ((flags & DB_RF_HAVESTRUCT) == 0)
645 			rw_exit(&dn->dn_struct_rwlock);
646 		DB_DNODE_EXIT(db);
647 
648 		if (!havepzio)
649 			err = zio_wait(zio);
650 	} else {
651 		/*
652 		 * Another reader came in while the dbuf was in flight
653 		 * between UNCACHED and CACHED.  Either a writer will finish
654 		 * writing the buffer (sending the dbuf to CACHED) or the
655 		 * first reader's request will reach the read_done callback
656 		 * and send the dbuf to CACHED.  Otherwise, a failure
657 		 * occurred and the dbuf went to UNCACHED.
658 		 */
659 		mutex_exit(&db->db_mtx);
660 		if (prefetch)
661 			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
662 			    db->db.db_size, TRUE);
663 		if ((flags & DB_RF_HAVESTRUCT) == 0)
664 			rw_exit(&dn->dn_struct_rwlock);
665 		DB_DNODE_EXIT(db);
666 
667 		/* Skip the wait per the caller's request. */
668 		mutex_enter(&db->db_mtx);
669 		if ((flags & DB_RF_NEVERWAIT) == 0) {
670 			while (db->db_state == DB_READ ||
671 			    db->db_state == DB_FILL) {
672 				ASSERT(db->db_state == DB_READ ||
673 				    (flags & DB_RF_HAVESTRUCT) == 0);
674 				cv_wait(&db->db_changed, &db->db_mtx);
675 			}
676 			if (db->db_state == DB_UNCACHED)
677 				err = SET_ERROR(EIO);
678 		}
679 		mutex_exit(&db->db_mtx);
680 	}
681 
682 	ASSERT(err || havepzio || db->db_state == DB_CACHED);
683 	return (err);
684 }
685 
686 static void
687 dbuf_noread(dmu_buf_impl_t *db)
688 {
689 	ASSERT(!refcount_is_zero(&db->db_holds));
690 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
691 	mutex_enter(&db->db_mtx);
692 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
693 		cv_wait(&db->db_changed, &db->db_mtx);
694 	if (db->db_state == DB_UNCACHED) {
695 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
696 		spa_t *spa;
697 
698 		ASSERT(db->db_buf == NULL);
699 		ASSERT(db->db.db_data == NULL);
700 		DB_GET_SPA(&spa, db);
701 		dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
702 		db->db_state = DB_FILL;
703 	} else if (db->db_state == DB_NOFILL) {
704 		dbuf_set_data(db, NULL);
705 	} else {
706 		ASSERT3U(db->db_state, ==, DB_CACHED);
707 	}
708 	mutex_exit(&db->db_mtx);
709 }
710 
711 /*
712  * This is our just-in-time copy function.  It makes a copy of
713  * buffers, that have been modified in a previous transaction
714  * group, before we modify them in the current active group.
715  *
716  * This function is used in two places: when we are dirtying a
717  * buffer for the first time in a txg, and when we are freeing
718  * a range in a dnode that includes this buffer.
719  *
720  * Note that when we are called from dbuf_free_range() we do
721  * not put a hold on the buffer, we just traverse the active
722  * dbuf list for the dnode.
723  */
724 static void
725 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
726 {
727 	dbuf_dirty_record_t *dr = db->db_last_dirty;
728 
729 	ASSERT(MUTEX_HELD(&db->db_mtx));
730 	ASSERT(db->db.db_data != NULL);
731 	ASSERT(db->db_level == 0);
732 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
733 
734 	if (dr == NULL ||
735 	    (dr->dt.dl.dr_data !=
736 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
737 		return;
738 
739 	/*
740 	 * If the last dirty record for this dbuf has not yet synced
741 	 * and its referencing the dbuf data, either:
742 	 *	reset the reference to point to a new copy,
743 	 * or (if there a no active holders)
744 	 *	just null out the current db_data pointer.
745 	 */
746 	ASSERT(dr->dr_txg >= txg - 2);
747 	if (db->db_blkid == DMU_BONUS_BLKID) {
748 		/* Note that the data bufs here are zio_bufs */
749 		dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
750 		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
751 		bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
752 	} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
753 		int size = db->db.db_size;
754 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
755 		spa_t *spa;
756 
757 		DB_GET_SPA(&spa, db);
758 		dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
759 		bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
760 	} else {
761 		dbuf_set_data(db, NULL);
762 	}
763 }
764 
765 void
766 dbuf_unoverride(dbuf_dirty_record_t *dr)
767 {
768 	dmu_buf_impl_t *db = dr->dr_dbuf;
769 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
770 	uint64_t txg = dr->dr_txg;
771 
772 	ASSERT(MUTEX_HELD(&db->db_mtx));
773 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
774 	ASSERT(db->db_level == 0);
775 
776 	if (db->db_blkid == DMU_BONUS_BLKID ||
777 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
778 		return;
779 
780 	ASSERT(db->db_data_pending != dr);
781 
782 	/* free this block */
783 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) {
784 		spa_t *spa;
785 
786 		DB_GET_SPA(&spa, db);
787 		zio_free(spa, txg, bp);
788 	}
789 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
790 	dr->dt.dl.dr_nopwrite = B_FALSE;
791 
792 	/*
793 	 * Release the already-written buffer, so we leave it in
794 	 * a consistent dirty state.  Note that all callers are
795 	 * modifying the buffer, so they will immediately do
796 	 * another (redundant) arc_release().  Therefore, leave
797 	 * the buf thawed to save the effort of freezing &
798 	 * immediately re-thawing it.
799 	 */
800 	arc_release(dr->dt.dl.dr_data, db);
801 }
802 
803 /*
804  * Evict (if its unreferenced) or clear (if its referenced) any level-0
805  * data blocks in the free range, so that any future readers will find
806  * empty blocks.  Also, if we happen across any level-1 dbufs in the
807  * range that have not already been marked dirty, mark them dirty so
808  * they stay in memory.
809  *
810  * This is a no-op if the dataset is in the middle of an incremental
811  * receive; see comment below for details.
812  */
813 void
814 dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
815 {
816 	dmu_buf_impl_t *db, *db_next;
817 	uint64_t txg = tx->tx_txg;
818 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
819 	uint64_t first_l1 = start >> epbs;
820 	uint64_t last_l1 = end >> epbs;
821 
822 	if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) {
823 		end = dn->dn_maxblkid;
824 		last_l1 = end >> epbs;
825 	}
826 	dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
827 
828 	mutex_enter(&dn->dn_dbufs_mtx);
829 	if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) {
830 		/* There can't be any dbufs in this range; no need to search. */
831 		mutex_exit(&dn->dn_dbufs_mtx);
832 		return;
833 	} else if (dmu_objset_is_receiving(dn->dn_objset)) {
834 		/*
835 		 * If we are receiving, we expect there to be no dbufs in
836 		 * the range to be freed, because receive modifies each
837 		 * block at most once, and in offset order.  If this is
838 		 * not the case, it can lead to performance problems,
839 		 * so note that we unexpectedly took the slow path.
840 		 */
841 		atomic_inc_64(&zfs_free_range_recv_miss);
842 	}
843 
844 	for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
845 		db_next = list_next(&dn->dn_dbufs, db);
846 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
847 
848 		if (db->db_level == 1 &&
849 		    db->db_blkid >= first_l1 && db->db_blkid <= last_l1) {
850 			mutex_enter(&db->db_mtx);
851 			if (db->db_last_dirty &&
852 			    db->db_last_dirty->dr_txg < txg) {
853 				dbuf_add_ref(db, FTAG);
854 				mutex_exit(&db->db_mtx);
855 				dbuf_will_dirty(db, tx);
856 				dbuf_rele(db, FTAG);
857 			} else {
858 				mutex_exit(&db->db_mtx);
859 			}
860 		}
861 
862 		if (db->db_level != 0)
863 			continue;
864 		dprintf_dbuf(db, "found buf %s\n", "");
865 		if (db->db_blkid < start || db->db_blkid > end)
866 			continue;
867 
868 		/* found a level 0 buffer in the range */
869 		mutex_enter(&db->db_mtx);
870 		if (dbuf_undirty(db, tx)) {
871 			/* mutex has been dropped and dbuf destroyed */
872 			continue;
873 		}
874 
875 		if (db->db_state == DB_UNCACHED ||
876 		    db->db_state == DB_NOFILL ||
877 		    db->db_state == DB_EVICTING) {
878 			ASSERT(db->db.db_data == NULL);
879 			mutex_exit(&db->db_mtx);
880 			continue;
881 		}
882 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
883 			/* will be handled in dbuf_read_done or dbuf_rele */
884 			db->db_freed_in_flight = TRUE;
885 			mutex_exit(&db->db_mtx);
886 			continue;
887 		}
888 		if (refcount_count(&db->db_holds) == 0) {
889 			ASSERT(db->db_buf);
890 			dbuf_clear(db);
891 			continue;
892 		}
893 		/* The dbuf is referenced */
894 
895 		if (db->db_last_dirty != NULL) {
896 			dbuf_dirty_record_t *dr = db->db_last_dirty;
897 
898 			if (dr->dr_txg == txg) {
899 				/*
900 				 * This buffer is "in-use", re-adjust the file
901 				 * size to reflect that this buffer may
902 				 * contain new data when we sync.
903 				 */
904 				if (db->db_blkid != DMU_SPILL_BLKID &&
905 				    db->db_blkid > dn->dn_maxblkid)
906 					dn->dn_maxblkid = db->db_blkid;
907 				dbuf_unoverride(dr);
908 			} else {
909 				/*
910 				 * This dbuf is not dirty in the open context.
911 				 * Either uncache it (if its not referenced in
912 				 * the open context) or reset its contents to
913 				 * empty.
914 				 */
915 				dbuf_fix_old_data(db, txg);
916 			}
917 		}
918 		/* clear the contents if its cached */
919 		if (db->db_state == DB_CACHED) {
920 			ASSERT(db->db.db_data != NULL);
921 			arc_release(db->db_buf, db);
922 			bzero(db->db.db_data, db->db.db_size);
923 			arc_buf_freeze(db->db_buf);
924 		}
925 
926 		mutex_exit(&db->db_mtx);
927 	}
928 	mutex_exit(&dn->dn_dbufs_mtx);
929 }
930 
931 static int
932 dbuf_block_freeable(dmu_buf_impl_t *db)
933 {
934 	dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
935 	uint64_t birth_txg = 0;
936 
937 	/*
938 	 * We don't need any locking to protect db_blkptr:
939 	 * If it's syncing, then db_last_dirty will be set
940 	 * so we'll ignore db_blkptr.
941 	 */
942 	ASSERT(MUTEX_HELD(&db->db_mtx));
943 	if (db->db_last_dirty)
944 		birth_txg = db->db_last_dirty->dr_txg;
945 	else if (db->db_blkptr)
946 		birth_txg = db->db_blkptr->blk_birth;
947 
948 	/*
949 	 * If we don't exist or are in a snapshot, we can't be freed.
950 	 * Don't pass the bp to dsl_dataset_block_freeable() since we
951 	 * are holding the db_mtx lock and might deadlock if we are
952 	 * prefetching a dedup-ed block.
953 	 */
954 	if (birth_txg)
955 		return (ds == NULL ||
956 		    dsl_dataset_block_freeable(ds, NULL, birth_txg));
957 	else
958 		return (FALSE);
959 }
960 
961 void
962 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
963 {
964 	arc_buf_t *buf, *obuf;
965 	int osize = db->db.db_size;
966 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
967 	dnode_t *dn;
968 
969 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
970 
971 	DB_DNODE_ENTER(db);
972 	dn = DB_DNODE(db);
973 
974 	/* XXX does *this* func really need the lock? */
975 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
976 
977 	/*
978 	 * This call to dbuf_will_dirty() with the dn_struct_rwlock held
979 	 * is OK, because there can be no other references to the db
980 	 * when we are changing its size, so no concurrent DB_FILL can
981 	 * be happening.
982 	 */
983 	/*
984 	 * XXX we should be doing a dbuf_read, checking the return
985 	 * value and returning that up to our callers
986 	 */
987 	dbuf_will_dirty(db, tx);
988 
989 	/* create the data buffer for the new block */
990 	buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
991 
992 	/* copy old block data to the new block */
993 	obuf = db->db_buf;
994 	bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
995 	/* zero the remainder */
996 	if (size > osize)
997 		bzero((uint8_t *)buf->b_data + osize, size - osize);
998 
999 	mutex_enter(&db->db_mtx);
1000 	dbuf_set_data(db, buf);
1001 	VERIFY(arc_buf_remove_ref(obuf, db));
1002 	db->db.db_size = size;
1003 
1004 	if (db->db_level == 0) {
1005 		ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1006 		db->db_last_dirty->dt.dl.dr_data = buf;
1007 	}
1008 	mutex_exit(&db->db_mtx);
1009 
1010 	dnode_willuse_space(dn, size-osize, tx);
1011 	DB_DNODE_EXIT(db);
1012 }
1013 
1014 void
1015 dbuf_release_bp(dmu_buf_impl_t *db)
1016 {
1017 	objset_t *os;
1018 
1019 	DB_GET_OBJSET(&os, db);
1020 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1021 	ASSERT(arc_released(os->os_phys_buf) ||
1022 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1023 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1024 
1025 	(void) arc_release(db->db_buf, db);
1026 }
1027 
1028 dbuf_dirty_record_t *
1029 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1030 {
1031 	dnode_t *dn;
1032 	objset_t *os;
1033 	dbuf_dirty_record_t **drp, *dr;
1034 	int drop_struct_lock = FALSE;
1035 	boolean_t do_free_accounting = B_FALSE;
1036 	int txgoff = tx->tx_txg & TXG_MASK;
1037 
1038 	ASSERT(tx->tx_txg != 0);
1039 	ASSERT(!refcount_is_zero(&db->db_holds));
1040 	DMU_TX_DIRTY_BUF(tx, db);
1041 
1042 	DB_DNODE_ENTER(db);
1043 	dn = DB_DNODE(db);
1044 	/*
1045 	 * Shouldn't dirty a regular buffer in syncing context.  Private
1046 	 * objects may be dirtied in syncing context, but only if they
1047 	 * were already pre-dirtied in open context.
1048 	 */
1049 	ASSERT(!dmu_tx_is_syncing(tx) ||
1050 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1051 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1052 	    dn->dn_objset->os_dsl_dataset == NULL);
1053 	/*
1054 	 * We make this assert for private objects as well, but after we
1055 	 * check if we're already dirty.  They are allowed to re-dirty
1056 	 * in syncing context.
1057 	 */
1058 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1059 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1060 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1061 
1062 	mutex_enter(&db->db_mtx);
1063 	/*
1064 	 * XXX make this true for indirects too?  The problem is that
1065 	 * transactions created with dmu_tx_create_assigned() from
1066 	 * syncing context don't bother holding ahead.
1067 	 */
1068 	ASSERT(db->db_level != 0 ||
1069 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1070 	    db->db_state == DB_NOFILL);
1071 
1072 	mutex_enter(&dn->dn_mtx);
1073 	/*
1074 	 * Don't set dirtyctx to SYNC if we're just modifying this as we
1075 	 * initialize the objset.
1076 	 */
1077 	if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1078 	    !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1079 		dn->dn_dirtyctx =
1080 		    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1081 		ASSERT(dn->dn_dirtyctx_firstset == NULL);
1082 		dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1083 	}
1084 	mutex_exit(&dn->dn_mtx);
1085 
1086 	if (db->db_blkid == DMU_SPILL_BLKID)
1087 		dn->dn_have_spill = B_TRUE;
1088 
1089 	/*
1090 	 * If this buffer is already dirty, we're done.
1091 	 */
1092 	drp = &db->db_last_dirty;
1093 	ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1094 	    db->db.db_object == DMU_META_DNODE_OBJECT);
1095 	while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1096 		drp = &dr->dr_next;
1097 	if (dr && dr->dr_txg == tx->tx_txg) {
1098 		DB_DNODE_EXIT(db);
1099 
1100 		if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1101 			/*
1102 			 * If this buffer has already been written out,
1103 			 * we now need to reset its state.
1104 			 */
1105 			dbuf_unoverride(dr);
1106 			if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1107 			    db->db_state != DB_NOFILL)
1108 				arc_buf_thaw(db->db_buf);
1109 		}
1110 		mutex_exit(&db->db_mtx);
1111 		return (dr);
1112 	}
1113 
1114 	/*
1115 	 * Only valid if not already dirty.
1116 	 */
1117 	ASSERT(dn->dn_object == 0 ||
1118 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1119 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1120 
1121 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
1122 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1123 	    dn->dn_phys->dn_nlevels > db->db_level ||
1124 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
1125 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1126 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1127 
1128 	/*
1129 	 * We should only be dirtying in syncing context if it's the
1130 	 * mos or we're initializing the os or it's a special object.
1131 	 * However, we are allowed to dirty in syncing context provided
1132 	 * we already dirtied it in open context.  Hence we must make
1133 	 * this assertion only if we're not already dirty.
1134 	 */
1135 	os = dn->dn_objset;
1136 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1137 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1138 	ASSERT(db->db.db_size != 0);
1139 
1140 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1141 
1142 	if (db->db_blkid != DMU_BONUS_BLKID) {
1143 		/*
1144 		 * Update the accounting.
1145 		 * Note: we delay "free accounting" until after we drop
1146 		 * the db_mtx.  This keeps us from grabbing other locks
1147 		 * (and possibly deadlocking) in bp_get_dsize() while
1148 		 * also holding the db_mtx.
1149 		 */
1150 		dnode_willuse_space(dn, db->db.db_size, tx);
1151 		do_free_accounting = dbuf_block_freeable(db);
1152 	}
1153 
1154 	/*
1155 	 * If this buffer is dirty in an old transaction group we need
1156 	 * to make a copy of it so that the changes we make in this
1157 	 * transaction group won't leak out when we sync the older txg.
1158 	 */
1159 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1160 	if (db->db_level == 0) {
1161 		void *data_old = db->db_buf;
1162 
1163 		if (db->db_state != DB_NOFILL) {
1164 			if (db->db_blkid == DMU_BONUS_BLKID) {
1165 				dbuf_fix_old_data(db, tx->tx_txg);
1166 				data_old = db->db.db_data;
1167 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1168 				/*
1169 				 * Release the data buffer from the cache so
1170 				 * that we can modify it without impacting
1171 				 * possible other users of this cached data
1172 				 * block.  Note that indirect blocks and
1173 				 * private objects are not released until the
1174 				 * syncing state (since they are only modified
1175 				 * then).
1176 				 */
1177 				arc_release(db->db_buf, db);
1178 				dbuf_fix_old_data(db, tx->tx_txg);
1179 				data_old = db->db_buf;
1180 			}
1181 			ASSERT(data_old != NULL);
1182 		}
1183 		dr->dt.dl.dr_data = data_old;
1184 	} else {
1185 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1186 		list_create(&dr->dt.di.dr_children,
1187 		    sizeof (dbuf_dirty_record_t),
1188 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
1189 	}
1190 	dr->dr_dbuf = db;
1191 	dr->dr_txg = tx->tx_txg;
1192 	dr->dr_next = *drp;
1193 	*drp = dr;
1194 
1195 	/*
1196 	 * We could have been freed_in_flight between the dbuf_noread
1197 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
1198 	 * happened after the free.
1199 	 */
1200 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1201 	    db->db_blkid != DMU_SPILL_BLKID) {
1202 		mutex_enter(&dn->dn_mtx);
1203 		dnode_clear_range(dn, db->db_blkid, 1, tx);
1204 		mutex_exit(&dn->dn_mtx);
1205 		db->db_freed_in_flight = FALSE;
1206 	}
1207 
1208 	/*
1209 	 * This buffer is now part of this txg
1210 	 */
1211 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1212 	db->db_dirtycnt += 1;
1213 	ASSERT3U(db->db_dirtycnt, <=, 3);
1214 
1215 	mutex_exit(&db->db_mtx);
1216 
1217 	if (db->db_blkid == DMU_BONUS_BLKID ||
1218 	    db->db_blkid == DMU_SPILL_BLKID) {
1219 		mutex_enter(&dn->dn_mtx);
1220 		ASSERT(!list_link_active(&dr->dr_dirty_node));
1221 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1222 		mutex_exit(&dn->dn_mtx);
1223 		dnode_setdirty(dn, tx);
1224 		DB_DNODE_EXIT(db);
1225 		return (dr);
1226 	} else if (do_free_accounting) {
1227 		blkptr_t *bp = db->db_blkptr;
1228 		int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1229 		    bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1230 		/*
1231 		 * This is only a guess -- if the dbuf is dirty
1232 		 * in a previous txg, we don't know how much
1233 		 * space it will use on disk yet.  We should
1234 		 * really have the struct_rwlock to access
1235 		 * db_blkptr, but since this is just a guess,
1236 		 * it's OK if we get an odd answer.
1237 		 */
1238 		ddt_prefetch(os->os_spa, bp);
1239 		dnode_willuse_space(dn, -willfree, tx);
1240 	}
1241 
1242 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1243 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1244 		drop_struct_lock = TRUE;
1245 	}
1246 
1247 	if (db->db_level == 0) {
1248 		dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1249 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
1250 	}
1251 
1252 	if (db->db_level+1 < dn->dn_nlevels) {
1253 		dmu_buf_impl_t *parent = db->db_parent;
1254 		dbuf_dirty_record_t *di;
1255 		int parent_held = FALSE;
1256 
1257 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1258 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1259 
1260 			parent = dbuf_hold_level(dn, db->db_level+1,
1261 			    db->db_blkid >> epbs, FTAG);
1262 			ASSERT(parent != NULL);
1263 			parent_held = TRUE;
1264 		}
1265 		if (drop_struct_lock)
1266 			rw_exit(&dn->dn_struct_rwlock);
1267 		ASSERT3U(db->db_level+1, ==, parent->db_level);
1268 		di = dbuf_dirty(parent, tx);
1269 		if (parent_held)
1270 			dbuf_rele(parent, FTAG);
1271 
1272 		mutex_enter(&db->db_mtx);
1273 		/*  possible race with dbuf_undirty() */
1274 		if (db->db_last_dirty == dr ||
1275 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
1276 			mutex_enter(&di->dt.di.dr_mtx);
1277 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1278 			ASSERT(!list_link_active(&dr->dr_dirty_node));
1279 			list_insert_tail(&di->dt.di.dr_children, dr);
1280 			mutex_exit(&di->dt.di.dr_mtx);
1281 			dr->dr_parent = di;
1282 		}
1283 		mutex_exit(&db->db_mtx);
1284 	} else {
1285 		ASSERT(db->db_level+1 == dn->dn_nlevels);
1286 		ASSERT(db->db_blkid < dn->dn_nblkptr);
1287 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1288 		mutex_enter(&dn->dn_mtx);
1289 		ASSERT(!list_link_active(&dr->dr_dirty_node));
1290 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1291 		mutex_exit(&dn->dn_mtx);
1292 		if (drop_struct_lock)
1293 			rw_exit(&dn->dn_struct_rwlock);
1294 	}
1295 
1296 	dnode_setdirty(dn, tx);
1297 	DB_DNODE_EXIT(db);
1298 	return (dr);
1299 }
1300 
1301 /*
1302  * Undirty a buffer in the transaction group referenced by the given
1303  * transaction.  Return whether this evicted the dbuf.
1304  */
1305 static boolean_t
1306 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1307 {
1308 	dnode_t *dn;
1309 	uint64_t txg = tx->tx_txg;
1310 	dbuf_dirty_record_t *dr, **drp;
1311 
1312 	ASSERT(txg != 0);
1313 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1314 	ASSERT0(db->db_level);
1315 	ASSERT(MUTEX_HELD(&db->db_mtx));
1316 
1317 	/*
1318 	 * If this buffer is not dirty, we're done.
1319 	 */
1320 	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1321 		if (dr->dr_txg <= txg)
1322 			break;
1323 	if (dr == NULL || dr->dr_txg < txg)
1324 		return (B_FALSE);
1325 	ASSERT(dr->dr_txg == txg);
1326 	ASSERT(dr->dr_dbuf == db);
1327 
1328 	DB_DNODE_ENTER(db);
1329 	dn = DB_DNODE(db);
1330 
1331 	/*
1332 	 * Note:  This code will probably work even if there are concurrent
1333 	 * holders, but it is untested in that scenerio, as the ZPL and
1334 	 * ztest have additional locking (the range locks) that prevents
1335 	 * that type of concurrent access.
1336 	 */
1337 	ASSERT3U(refcount_count(&db->db_holds), ==, db->db_dirtycnt);
1338 
1339 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1340 
1341 	ASSERT(db->db.db_size != 0);
1342 
1343 	/* XXX would be nice to fix up dn_towrite_space[] */
1344 
1345 	*drp = dr->dr_next;
1346 
1347 	/*
1348 	 * Note that there are three places in dbuf_dirty()
1349 	 * where this dirty record may be put on a list.
1350 	 * Make sure to do a list_remove corresponding to
1351 	 * every one of those list_insert calls.
1352 	 */
1353 	if (dr->dr_parent) {
1354 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1355 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1356 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1357 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
1358 	    db->db_level+1 == dn->dn_nlevels) {
1359 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1360 		mutex_enter(&dn->dn_mtx);
1361 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1362 		mutex_exit(&dn->dn_mtx);
1363 	}
1364 	DB_DNODE_EXIT(db);
1365 
1366 	if (db->db_state != DB_NOFILL) {
1367 		dbuf_unoverride(dr);
1368 
1369 		ASSERT(db->db_buf != NULL);
1370 		ASSERT(dr->dt.dl.dr_data != NULL);
1371 		if (dr->dt.dl.dr_data != db->db_buf)
1372 			VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1373 	}
1374 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
1375 
1376 	ASSERT(db->db_dirtycnt > 0);
1377 	db->db_dirtycnt -= 1;
1378 
1379 	if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1380 		arc_buf_t *buf = db->db_buf;
1381 
1382 		ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1383 		dbuf_set_data(db, NULL);
1384 		VERIFY(arc_buf_remove_ref(buf, db));
1385 		dbuf_evict(db);
1386 		return (B_TRUE);
1387 	}
1388 
1389 	return (B_FALSE);
1390 }
1391 
1392 #pragma weak dmu_buf_will_dirty = dbuf_will_dirty
1393 void
1394 dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1395 {
1396 	int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1397 
1398 	ASSERT(tx->tx_txg != 0);
1399 	ASSERT(!refcount_is_zero(&db->db_holds));
1400 
1401 	DB_DNODE_ENTER(db);
1402 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1403 		rf |= DB_RF_HAVESTRUCT;
1404 	DB_DNODE_EXIT(db);
1405 	(void) dbuf_read(db, NULL, rf);
1406 	(void) dbuf_dirty(db, tx);
1407 }
1408 
1409 void
1410 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1411 {
1412 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1413 
1414 	db->db_state = DB_NOFILL;
1415 
1416 	dmu_buf_will_fill(db_fake, tx);
1417 }
1418 
1419 void
1420 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1421 {
1422 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1423 
1424 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1425 	ASSERT(tx->tx_txg != 0);
1426 	ASSERT(db->db_level == 0);
1427 	ASSERT(!refcount_is_zero(&db->db_holds));
1428 
1429 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1430 	    dmu_tx_private_ok(tx));
1431 
1432 	dbuf_noread(db);
1433 	(void) dbuf_dirty(db, tx);
1434 }
1435 
1436 #pragma weak dmu_buf_fill_done = dbuf_fill_done
1437 /* ARGSUSED */
1438 void
1439 dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1440 {
1441 	mutex_enter(&db->db_mtx);
1442 	DBUF_VERIFY(db);
1443 
1444 	if (db->db_state == DB_FILL) {
1445 		if (db->db_level == 0 && db->db_freed_in_flight) {
1446 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1447 			/* we were freed while filling */
1448 			/* XXX dbuf_undirty? */
1449 			bzero(db->db.db_data, db->db.db_size);
1450 			db->db_freed_in_flight = FALSE;
1451 		}
1452 		db->db_state = DB_CACHED;
1453 		cv_broadcast(&db->db_changed);
1454 	}
1455 	mutex_exit(&db->db_mtx);
1456 }
1457 
1458 /*
1459  * Directly assign a provided arc buf to a given dbuf if it's not referenced
1460  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1461  */
1462 void
1463 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1464 {
1465 	ASSERT(!refcount_is_zero(&db->db_holds));
1466 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1467 	ASSERT(db->db_level == 0);
1468 	ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1469 	ASSERT(buf != NULL);
1470 	ASSERT(arc_buf_size(buf) == db->db.db_size);
1471 	ASSERT(tx->tx_txg != 0);
1472 
1473 	arc_return_buf(buf, db);
1474 	ASSERT(arc_released(buf));
1475 
1476 	mutex_enter(&db->db_mtx);
1477 
1478 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1479 		cv_wait(&db->db_changed, &db->db_mtx);
1480 
1481 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1482 
1483 	if (db->db_state == DB_CACHED &&
1484 	    refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1485 		mutex_exit(&db->db_mtx);
1486 		(void) dbuf_dirty(db, tx);
1487 		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1488 		VERIFY(arc_buf_remove_ref(buf, db));
1489 		xuio_stat_wbuf_copied();
1490 		return;
1491 	}
1492 
1493 	xuio_stat_wbuf_nocopy();
1494 	if (db->db_state == DB_CACHED) {
1495 		dbuf_dirty_record_t *dr = db->db_last_dirty;
1496 
1497 		ASSERT(db->db_buf != NULL);
1498 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1499 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
1500 			if (!arc_released(db->db_buf)) {
1501 				ASSERT(dr->dt.dl.dr_override_state ==
1502 				    DR_OVERRIDDEN);
1503 				arc_release(db->db_buf, db);
1504 			}
1505 			dr->dt.dl.dr_data = buf;
1506 			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1507 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1508 			arc_release(db->db_buf, db);
1509 			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1510 		}
1511 		db->db_buf = NULL;
1512 	}
1513 	ASSERT(db->db_buf == NULL);
1514 	dbuf_set_data(db, buf);
1515 	db->db_state = DB_FILL;
1516 	mutex_exit(&db->db_mtx);
1517 	(void) dbuf_dirty(db, tx);
1518 	dbuf_fill_done(db, tx);
1519 }
1520 
1521 /*
1522  * "Clear" the contents of this dbuf.  This will mark the dbuf
1523  * EVICTING and clear *most* of its references.  Unfortunetely,
1524  * when we are not holding the dn_dbufs_mtx, we can't clear the
1525  * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1526  * in this case.  For callers from the DMU we will usually see:
1527  *	dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1528  * For the arc callback, we will usually see:
1529  *	dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1530  * Sometimes, though, we will get a mix of these two:
1531  *	DMU: dbuf_clear()->arc_buf_evict()
1532  *	ARC: dbuf_do_evict()->dbuf_destroy()
1533  */
1534 void
1535 dbuf_clear(dmu_buf_impl_t *db)
1536 {
1537 	dnode_t *dn;
1538 	dmu_buf_impl_t *parent = db->db_parent;
1539 	dmu_buf_impl_t *dndb;
1540 	int dbuf_gone = FALSE;
1541 
1542 	ASSERT(MUTEX_HELD(&db->db_mtx));
1543 	ASSERT(refcount_is_zero(&db->db_holds));
1544 
1545 	dbuf_evict_user(db);
1546 
1547 	if (db->db_state == DB_CACHED) {
1548 		ASSERT(db->db.db_data != NULL);
1549 		if (db->db_blkid == DMU_BONUS_BLKID) {
1550 			zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1551 			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1552 		}
1553 		db->db.db_data = NULL;
1554 		db->db_state = DB_UNCACHED;
1555 	}
1556 
1557 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1558 	ASSERT(db->db_data_pending == NULL);
1559 
1560 	db->db_state = DB_EVICTING;
1561 	db->db_blkptr = NULL;
1562 
1563 	DB_DNODE_ENTER(db);
1564 	dn = DB_DNODE(db);
1565 	dndb = dn->dn_dbuf;
1566 	if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1567 		list_remove(&dn->dn_dbufs, db);
1568 		(void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1569 		membar_producer();
1570 		DB_DNODE_EXIT(db);
1571 		/*
1572 		 * Decrementing the dbuf count means that the hold corresponding
1573 		 * to the removed dbuf is no longer discounted in dnode_move(),
1574 		 * so the dnode cannot be moved until after we release the hold.
1575 		 * The membar_producer() ensures visibility of the decremented
1576 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1577 		 * release any lock.
1578 		 */
1579 		dnode_rele(dn, db);
1580 		db->db_dnode_handle = NULL;
1581 	} else {
1582 		DB_DNODE_EXIT(db);
1583 	}
1584 
1585 	if (db->db_buf)
1586 		dbuf_gone = arc_buf_evict(db->db_buf);
1587 
1588 	if (!dbuf_gone)
1589 		mutex_exit(&db->db_mtx);
1590 
1591 	/*
1592 	 * If this dbuf is referenced from an indirect dbuf,
1593 	 * decrement the ref count on the indirect dbuf.
1594 	 */
1595 	if (parent && parent != dndb)
1596 		dbuf_rele(parent, db);
1597 }
1598 
1599 static int
1600 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1601     dmu_buf_impl_t **parentp, blkptr_t **bpp)
1602 {
1603 	int nlevels, epbs;
1604 
1605 	*parentp = NULL;
1606 	*bpp = NULL;
1607 
1608 	ASSERT(blkid != DMU_BONUS_BLKID);
1609 
1610 	if (blkid == DMU_SPILL_BLKID) {
1611 		mutex_enter(&dn->dn_mtx);
1612 		if (dn->dn_have_spill &&
1613 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1614 			*bpp = &dn->dn_phys->dn_spill;
1615 		else
1616 			*bpp = NULL;
1617 		dbuf_add_ref(dn->dn_dbuf, NULL);
1618 		*parentp = dn->dn_dbuf;
1619 		mutex_exit(&dn->dn_mtx);
1620 		return (0);
1621 	}
1622 
1623 	if (dn->dn_phys->dn_nlevels == 0)
1624 		nlevels = 1;
1625 	else
1626 		nlevels = dn->dn_phys->dn_nlevels;
1627 
1628 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1629 
1630 	ASSERT3U(level * epbs, <, 64);
1631 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1632 	if (level >= nlevels ||
1633 	    (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1634 		/* the buffer has no parent yet */
1635 		return (SET_ERROR(ENOENT));
1636 	} else if (level < nlevels-1) {
1637 		/* this block is referenced from an indirect block */
1638 		int err = dbuf_hold_impl(dn, level+1,
1639 		    blkid >> epbs, fail_sparse, NULL, parentp);
1640 		if (err)
1641 			return (err);
1642 		err = dbuf_read(*parentp, NULL,
1643 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1644 		if (err) {
1645 			dbuf_rele(*parentp, NULL);
1646 			*parentp = NULL;
1647 			return (err);
1648 		}
1649 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1650 		    (blkid & ((1ULL << epbs) - 1));
1651 		return (0);
1652 	} else {
1653 		/* the block is referenced from the dnode */
1654 		ASSERT3U(level, ==, nlevels-1);
1655 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1656 		    blkid < dn->dn_phys->dn_nblkptr);
1657 		if (dn->dn_dbuf) {
1658 			dbuf_add_ref(dn->dn_dbuf, NULL);
1659 			*parentp = dn->dn_dbuf;
1660 		}
1661 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
1662 		return (0);
1663 	}
1664 }
1665 
1666 static dmu_buf_impl_t *
1667 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1668     dmu_buf_impl_t *parent, blkptr_t *blkptr)
1669 {
1670 	objset_t *os = dn->dn_objset;
1671 	dmu_buf_impl_t *db, *odb;
1672 
1673 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1674 	ASSERT(dn->dn_type != DMU_OT_NONE);
1675 
1676 	db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1677 
1678 	db->db_objset = os;
1679 	db->db.db_object = dn->dn_object;
1680 	db->db_level = level;
1681 	db->db_blkid = blkid;
1682 	db->db_last_dirty = NULL;
1683 	db->db_dirtycnt = 0;
1684 	db->db_dnode_handle = dn->dn_handle;
1685 	db->db_parent = parent;
1686 	db->db_blkptr = blkptr;
1687 
1688 	db->db_user_ptr = NULL;
1689 	db->db_user_data_ptr_ptr = NULL;
1690 	db->db_evict_func = NULL;
1691 	db->db_immediate_evict = 0;
1692 	db->db_freed_in_flight = 0;
1693 
1694 	if (blkid == DMU_BONUS_BLKID) {
1695 		ASSERT3P(parent, ==, dn->dn_dbuf);
1696 		db->db.db_size = DN_MAX_BONUSLEN -
1697 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1698 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1699 		db->db.db_offset = DMU_BONUS_BLKID;
1700 		db->db_state = DB_UNCACHED;
1701 		/* the bonus dbuf is not placed in the hash table */
1702 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1703 		return (db);
1704 	} else if (blkid == DMU_SPILL_BLKID) {
1705 		db->db.db_size = (blkptr != NULL) ?
1706 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1707 		db->db.db_offset = 0;
1708 	} else {
1709 		int blocksize =
1710 		    db->db_level ? 1<<dn->dn_indblkshift :  dn->dn_datablksz;
1711 		db->db.db_size = blocksize;
1712 		db->db.db_offset = db->db_blkid * blocksize;
1713 	}
1714 
1715 	/*
1716 	 * Hold the dn_dbufs_mtx while we get the new dbuf
1717 	 * in the hash table *and* added to the dbufs list.
1718 	 * This prevents a possible deadlock with someone
1719 	 * trying to look up this dbuf before its added to the
1720 	 * dn_dbufs list.
1721 	 */
1722 	mutex_enter(&dn->dn_dbufs_mtx);
1723 	db->db_state = DB_EVICTING;
1724 	if ((odb = dbuf_hash_insert(db)) != NULL) {
1725 		/* someone else inserted it first */
1726 		kmem_cache_free(dbuf_cache, db);
1727 		mutex_exit(&dn->dn_dbufs_mtx);
1728 		return (odb);
1729 	}
1730 	list_insert_head(&dn->dn_dbufs, db);
1731 	if (db->db_level == 0 && db->db_blkid >=
1732 	    dn->dn_unlisted_l0_blkid)
1733 		dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1734 	db->db_state = DB_UNCACHED;
1735 	mutex_exit(&dn->dn_dbufs_mtx);
1736 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1737 
1738 	if (parent && parent != dn->dn_dbuf)
1739 		dbuf_add_ref(parent, db);
1740 
1741 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1742 	    refcount_count(&dn->dn_holds) > 0);
1743 	(void) refcount_add(&dn->dn_holds, db);
1744 	(void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1745 
1746 	dprintf_dbuf(db, "db=%p\n", db);
1747 
1748 	return (db);
1749 }
1750 
1751 static int
1752 dbuf_do_evict(void *private)
1753 {
1754 	arc_buf_t *buf = private;
1755 	dmu_buf_impl_t *db = buf->b_private;
1756 
1757 	if (!MUTEX_HELD(&db->db_mtx))
1758 		mutex_enter(&db->db_mtx);
1759 
1760 	ASSERT(refcount_is_zero(&db->db_holds));
1761 
1762 	if (db->db_state != DB_EVICTING) {
1763 		ASSERT(db->db_state == DB_CACHED);
1764 		DBUF_VERIFY(db);
1765 		db->db_buf = NULL;
1766 		dbuf_evict(db);
1767 	} else {
1768 		mutex_exit(&db->db_mtx);
1769 		dbuf_destroy(db);
1770 	}
1771 	return (0);
1772 }
1773 
1774 static void
1775 dbuf_destroy(dmu_buf_impl_t *db)
1776 {
1777 	ASSERT(refcount_is_zero(&db->db_holds));
1778 
1779 	if (db->db_blkid != DMU_BONUS_BLKID) {
1780 		/*
1781 		 * If this dbuf is still on the dn_dbufs list,
1782 		 * remove it from that list.
1783 		 */
1784 		if (db->db_dnode_handle != NULL) {
1785 			dnode_t *dn;
1786 
1787 			DB_DNODE_ENTER(db);
1788 			dn = DB_DNODE(db);
1789 			mutex_enter(&dn->dn_dbufs_mtx);
1790 			list_remove(&dn->dn_dbufs, db);
1791 			(void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1792 			mutex_exit(&dn->dn_dbufs_mtx);
1793 			DB_DNODE_EXIT(db);
1794 			/*
1795 			 * Decrementing the dbuf count means that the hold
1796 			 * corresponding to the removed dbuf is no longer
1797 			 * discounted in dnode_move(), so the dnode cannot be
1798 			 * moved until after we release the hold.
1799 			 */
1800 			dnode_rele(dn, db);
1801 			db->db_dnode_handle = NULL;
1802 		}
1803 		dbuf_hash_remove(db);
1804 	}
1805 	db->db_parent = NULL;
1806 	db->db_buf = NULL;
1807 
1808 	ASSERT(!list_link_active(&db->db_link));
1809 	ASSERT(db->db.db_data == NULL);
1810 	ASSERT(db->db_hash_next == NULL);
1811 	ASSERT(db->db_blkptr == NULL);
1812 	ASSERT(db->db_data_pending == NULL);
1813 
1814 	kmem_cache_free(dbuf_cache, db);
1815 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1816 }
1817 
1818 void
1819 dbuf_prefetch(dnode_t *dn, uint64_t blkid)
1820 {
1821 	dmu_buf_impl_t *db = NULL;
1822 	blkptr_t *bp = NULL;
1823 
1824 	ASSERT(blkid != DMU_BONUS_BLKID);
1825 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1826 
1827 	if (dnode_block_freed(dn, blkid))
1828 		return;
1829 
1830 	/* dbuf_find() returns with db_mtx held */
1831 	if (db = dbuf_find(dn, 0, blkid)) {
1832 		/*
1833 		 * This dbuf is already in the cache.  We assume that
1834 		 * it is already CACHED, or else about to be either
1835 		 * read or filled.
1836 		 */
1837 		mutex_exit(&db->db_mtx);
1838 		return;
1839 	}
1840 
1841 	if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1842 		if (bp && !BP_IS_HOLE(bp)) {
1843 			int priority = dn->dn_type == DMU_OT_DDT_ZAP ?
1844 			    ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ;
1845 			dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1846 			uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1847 			zbookmark_t zb;
1848 
1849 			SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1850 			    dn->dn_object, 0, blkid);
1851 
1852 			(void) arc_read(NULL, dn->dn_objset->os_spa,
1853 			    bp, NULL, NULL, priority,
1854 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1855 			    &aflags, &zb);
1856 		}
1857 		if (db)
1858 			dbuf_rele(db, NULL);
1859 	}
1860 }
1861 
1862 /*
1863  * Returns with db_holds incremented, and db_mtx not held.
1864  * Note: dn_struct_rwlock must be held.
1865  */
1866 int
1867 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1868     void *tag, dmu_buf_impl_t **dbp)
1869 {
1870 	dmu_buf_impl_t *db, *parent = NULL;
1871 
1872 	ASSERT(blkid != DMU_BONUS_BLKID);
1873 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1874 	ASSERT3U(dn->dn_nlevels, >, level);
1875 
1876 	*dbp = NULL;
1877 top:
1878 	/* dbuf_find() returns with db_mtx held */
1879 	db = dbuf_find(dn, level, blkid);
1880 
1881 	if (db == NULL) {
1882 		blkptr_t *bp = NULL;
1883 		int err;
1884 
1885 		ASSERT3P(parent, ==, NULL);
1886 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1887 		if (fail_sparse) {
1888 			if (err == 0 && bp && BP_IS_HOLE(bp))
1889 				err = SET_ERROR(ENOENT);
1890 			if (err) {
1891 				if (parent)
1892 					dbuf_rele(parent, NULL);
1893 				return (err);
1894 			}
1895 		}
1896 		if (err && err != ENOENT)
1897 			return (err);
1898 		db = dbuf_create(dn, level, blkid, parent, bp);
1899 	}
1900 
1901 	if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1902 		arc_buf_add_ref(db->db_buf, db);
1903 		if (db->db_buf->b_data == NULL) {
1904 			dbuf_clear(db);
1905 			if (parent) {
1906 				dbuf_rele(parent, NULL);
1907 				parent = NULL;
1908 			}
1909 			goto top;
1910 		}
1911 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1912 	}
1913 
1914 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1915 
1916 	/*
1917 	 * If this buffer is currently syncing out, and we are are
1918 	 * still referencing it from db_data, we need to make a copy
1919 	 * of it in case we decide we want to dirty it again in this txg.
1920 	 */
1921 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1922 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
1923 	    db->db_state == DB_CACHED && db->db_data_pending) {
1924 		dbuf_dirty_record_t *dr = db->db_data_pending;
1925 
1926 		if (dr->dt.dl.dr_data == db->db_buf) {
1927 			arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1928 
1929 			dbuf_set_data(db,
1930 			    arc_buf_alloc(dn->dn_objset->os_spa,
1931 			    db->db.db_size, db, type));
1932 			bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1933 			    db->db.db_size);
1934 		}
1935 	}
1936 
1937 	(void) refcount_add(&db->db_holds, tag);
1938 	dbuf_update_data(db);
1939 	DBUF_VERIFY(db);
1940 	mutex_exit(&db->db_mtx);
1941 
1942 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
1943 	if (parent)
1944 		dbuf_rele(parent, NULL);
1945 
1946 	ASSERT3P(DB_DNODE(db), ==, dn);
1947 	ASSERT3U(db->db_blkid, ==, blkid);
1948 	ASSERT3U(db->db_level, ==, level);
1949 	*dbp = db;
1950 
1951 	return (0);
1952 }
1953 
1954 dmu_buf_impl_t *
1955 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1956 {
1957 	dmu_buf_impl_t *db;
1958 	int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1959 	return (err ? NULL : db);
1960 }
1961 
1962 dmu_buf_impl_t *
1963 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1964 {
1965 	dmu_buf_impl_t *db;
1966 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
1967 	return (err ? NULL : db);
1968 }
1969 
1970 void
1971 dbuf_create_bonus(dnode_t *dn)
1972 {
1973 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1974 
1975 	ASSERT(dn->dn_bonus == NULL);
1976 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
1977 }
1978 
1979 int
1980 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
1981 {
1982 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1983 	dnode_t *dn;
1984 
1985 	if (db->db_blkid != DMU_SPILL_BLKID)
1986 		return (SET_ERROR(ENOTSUP));
1987 	if (blksz == 0)
1988 		blksz = SPA_MINBLOCKSIZE;
1989 	if (blksz > SPA_MAXBLOCKSIZE)
1990 		blksz = SPA_MAXBLOCKSIZE;
1991 	else
1992 		blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
1993 
1994 	DB_DNODE_ENTER(db);
1995 	dn = DB_DNODE(db);
1996 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1997 	dbuf_new_size(db, blksz, tx);
1998 	rw_exit(&dn->dn_struct_rwlock);
1999 	DB_DNODE_EXIT(db);
2000 
2001 	return (0);
2002 }
2003 
2004 void
2005 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2006 {
2007 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2008 }
2009 
2010 #pragma weak dmu_buf_add_ref = dbuf_add_ref
2011 void
2012 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2013 {
2014 	int64_t holds = refcount_add(&db->db_holds, tag);
2015 	ASSERT(holds > 1);
2016 }
2017 
2018 /*
2019  * If you call dbuf_rele() you had better not be referencing the dnode handle
2020  * unless you have some other direct or indirect hold on the dnode. (An indirect
2021  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2022  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2023  * dnode's parent dbuf evicting its dnode handles.
2024  */
2025 #pragma weak dmu_buf_rele = dbuf_rele
2026 void
2027 dbuf_rele(dmu_buf_impl_t *db, void *tag)
2028 {
2029 	mutex_enter(&db->db_mtx);
2030 	dbuf_rele_and_unlock(db, tag);
2031 }
2032 
2033 /*
2034  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2035  * db_dirtycnt and db_holds to be updated atomically.
2036  */
2037 void
2038 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2039 {
2040 	int64_t holds;
2041 
2042 	ASSERT(MUTEX_HELD(&db->db_mtx));
2043 	DBUF_VERIFY(db);
2044 
2045 	/*
2046 	 * Remove the reference to the dbuf before removing its hold on the
2047 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
2048 	 * buffer has a corresponding dnode hold.
2049 	 */
2050 	holds = refcount_remove(&db->db_holds, tag);
2051 	ASSERT(holds >= 0);
2052 
2053 	/*
2054 	 * We can't freeze indirects if there is a possibility that they
2055 	 * may be modified in the current syncing context.
2056 	 */
2057 	if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2058 		arc_buf_freeze(db->db_buf);
2059 
2060 	if (holds == db->db_dirtycnt &&
2061 	    db->db_level == 0 && db->db_immediate_evict)
2062 		dbuf_evict_user(db);
2063 
2064 	if (holds == 0) {
2065 		if (db->db_blkid == DMU_BONUS_BLKID) {
2066 			mutex_exit(&db->db_mtx);
2067 
2068 			/*
2069 			 * If the dnode moves here, we cannot cross this barrier
2070 			 * until the move completes.
2071 			 */
2072 			DB_DNODE_ENTER(db);
2073 			(void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2074 			DB_DNODE_EXIT(db);
2075 			/*
2076 			 * The bonus buffer's dnode hold is no longer discounted
2077 			 * in dnode_move(). The dnode cannot move until after
2078 			 * the dnode_rele().
2079 			 */
2080 			dnode_rele(DB_DNODE(db), db);
2081 		} else if (db->db_buf == NULL) {
2082 			/*
2083 			 * This is a special case: we never associated this
2084 			 * dbuf with any data allocated from the ARC.
2085 			 */
2086 			ASSERT(db->db_state == DB_UNCACHED ||
2087 			    db->db_state == DB_NOFILL);
2088 			dbuf_evict(db);
2089 		} else if (arc_released(db->db_buf)) {
2090 			arc_buf_t *buf = db->db_buf;
2091 			/*
2092 			 * This dbuf has anonymous data associated with it.
2093 			 */
2094 			dbuf_set_data(db, NULL);
2095 			VERIFY(arc_buf_remove_ref(buf, db));
2096 			dbuf_evict(db);
2097 		} else {
2098 			VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2099 
2100 			/*
2101 			 * A dbuf will be eligible for eviction if either the
2102 			 * 'primarycache' property is set or a duplicate
2103 			 * copy of this buffer is already cached in the arc.
2104 			 *
2105 			 * In the case of the 'primarycache' a buffer
2106 			 * is considered for eviction if it matches the
2107 			 * criteria set in the property.
2108 			 *
2109 			 * To decide if our buffer is considered a
2110 			 * duplicate, we must call into the arc to determine
2111 			 * if multiple buffers are referencing the same
2112 			 * block on-disk. If so, then we simply evict
2113 			 * ourselves.
2114 			 */
2115 			if (!DBUF_IS_CACHEABLE(db) ||
2116 			    arc_buf_eviction_needed(db->db_buf))
2117 				dbuf_clear(db);
2118 			else
2119 				mutex_exit(&db->db_mtx);
2120 		}
2121 	} else {
2122 		mutex_exit(&db->db_mtx);
2123 	}
2124 }
2125 
2126 #pragma weak dmu_buf_refcount = dbuf_refcount
2127 uint64_t
2128 dbuf_refcount(dmu_buf_impl_t *db)
2129 {
2130 	return (refcount_count(&db->db_holds));
2131 }
2132 
2133 void *
2134 dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2135     dmu_buf_evict_func_t *evict_func)
2136 {
2137 	return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2138 	    user_data_ptr_ptr, evict_func));
2139 }
2140 
2141 void *
2142 dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2143     dmu_buf_evict_func_t *evict_func)
2144 {
2145 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2146 
2147 	db->db_immediate_evict = TRUE;
2148 	return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2149 	    user_data_ptr_ptr, evict_func));
2150 }
2151 
2152 void *
2153 dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2154     void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2155 {
2156 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2157 	ASSERT(db->db_level == 0);
2158 
2159 	ASSERT((user_ptr == NULL) == (evict_func == NULL));
2160 
2161 	mutex_enter(&db->db_mtx);
2162 
2163 	if (db->db_user_ptr == old_user_ptr) {
2164 		db->db_user_ptr = user_ptr;
2165 		db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2166 		db->db_evict_func = evict_func;
2167 
2168 		dbuf_update_data(db);
2169 	} else {
2170 		old_user_ptr = db->db_user_ptr;
2171 	}
2172 
2173 	mutex_exit(&db->db_mtx);
2174 	return (old_user_ptr);
2175 }
2176 
2177 void *
2178 dmu_buf_get_user(dmu_buf_t *db_fake)
2179 {
2180 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2181 	ASSERT(!refcount_is_zero(&db->db_holds));
2182 
2183 	return (db->db_user_ptr);
2184 }
2185 
2186 boolean_t
2187 dmu_buf_freeable(dmu_buf_t *dbuf)
2188 {
2189 	boolean_t res = B_FALSE;
2190 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2191 
2192 	if (db->db_blkptr)
2193 		res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2194 		    db->db_blkptr, db->db_blkptr->blk_birth);
2195 
2196 	return (res);
2197 }
2198 
2199 blkptr_t *
2200 dmu_buf_get_blkptr(dmu_buf_t *db)
2201 {
2202 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2203 	return (dbi->db_blkptr);
2204 }
2205 
2206 static void
2207 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2208 {
2209 	/* ASSERT(dmu_tx_is_syncing(tx) */
2210 	ASSERT(MUTEX_HELD(&db->db_mtx));
2211 
2212 	if (db->db_blkptr != NULL)
2213 		return;
2214 
2215 	if (db->db_blkid == DMU_SPILL_BLKID) {
2216 		db->db_blkptr = &dn->dn_phys->dn_spill;
2217 		BP_ZERO(db->db_blkptr);
2218 		return;
2219 	}
2220 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2221 		/*
2222 		 * This buffer was allocated at a time when there was
2223 		 * no available blkptrs from the dnode, or it was
2224 		 * inappropriate to hook it in (i.e., nlevels mis-match).
2225 		 */
2226 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2227 		ASSERT(db->db_parent == NULL);
2228 		db->db_parent = dn->dn_dbuf;
2229 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2230 		DBUF_VERIFY(db);
2231 	} else {
2232 		dmu_buf_impl_t *parent = db->db_parent;
2233 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2234 
2235 		ASSERT(dn->dn_phys->dn_nlevels > 1);
2236 		if (parent == NULL) {
2237 			mutex_exit(&db->db_mtx);
2238 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
2239 			(void) dbuf_hold_impl(dn, db->db_level+1,
2240 			    db->db_blkid >> epbs, FALSE, db, &parent);
2241 			rw_exit(&dn->dn_struct_rwlock);
2242 			mutex_enter(&db->db_mtx);
2243 			db->db_parent = parent;
2244 		}
2245 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
2246 		    (db->db_blkid & ((1ULL << epbs) - 1));
2247 		DBUF_VERIFY(db);
2248 	}
2249 }
2250 
2251 static void
2252 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2253 {
2254 	dmu_buf_impl_t *db = dr->dr_dbuf;
2255 	dnode_t *dn;
2256 	zio_t *zio;
2257 
2258 	ASSERT(dmu_tx_is_syncing(tx));
2259 
2260 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2261 
2262 	mutex_enter(&db->db_mtx);
2263 
2264 	ASSERT(db->db_level > 0);
2265 	DBUF_VERIFY(db);
2266 
2267 	/* Read the block if it hasn't been read yet. */
2268 	if (db->db_buf == NULL) {
2269 		mutex_exit(&db->db_mtx);
2270 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2271 		mutex_enter(&db->db_mtx);
2272 	}
2273 	ASSERT3U(db->db_state, ==, DB_CACHED);
2274 	ASSERT(db->db_buf != NULL);
2275 
2276 	DB_DNODE_ENTER(db);
2277 	dn = DB_DNODE(db);
2278 	/* Indirect block size must match what the dnode thinks it is. */
2279 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2280 	dbuf_check_blkptr(dn, db);
2281 	DB_DNODE_EXIT(db);
2282 
2283 	/* Provide the pending dirty record to child dbufs */
2284 	db->db_data_pending = dr;
2285 
2286 	mutex_exit(&db->db_mtx);
2287 	dbuf_write(dr, db->db_buf, tx);
2288 
2289 	zio = dr->dr_zio;
2290 	mutex_enter(&dr->dt.di.dr_mtx);
2291 	dbuf_sync_list(&dr->dt.di.dr_children, tx);
2292 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2293 	mutex_exit(&dr->dt.di.dr_mtx);
2294 	zio_nowait(zio);
2295 }
2296 
2297 static void
2298 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2299 {
2300 	arc_buf_t **datap = &dr->dt.dl.dr_data;
2301 	dmu_buf_impl_t *db = dr->dr_dbuf;
2302 	dnode_t *dn;
2303 	objset_t *os;
2304 	uint64_t txg = tx->tx_txg;
2305 
2306 	ASSERT(dmu_tx_is_syncing(tx));
2307 
2308 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2309 
2310 	mutex_enter(&db->db_mtx);
2311 	/*
2312 	 * To be synced, we must be dirtied.  But we
2313 	 * might have been freed after the dirty.
2314 	 */
2315 	if (db->db_state == DB_UNCACHED) {
2316 		/* This buffer has been freed since it was dirtied */
2317 		ASSERT(db->db.db_data == NULL);
2318 	} else if (db->db_state == DB_FILL) {
2319 		/* This buffer was freed and is now being re-filled */
2320 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2321 	} else {
2322 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2323 	}
2324 	DBUF_VERIFY(db);
2325 
2326 	DB_DNODE_ENTER(db);
2327 	dn = DB_DNODE(db);
2328 
2329 	if (db->db_blkid == DMU_SPILL_BLKID) {
2330 		mutex_enter(&dn->dn_mtx);
2331 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2332 		mutex_exit(&dn->dn_mtx);
2333 	}
2334 
2335 	/*
2336 	 * If this is a bonus buffer, simply copy the bonus data into the
2337 	 * dnode.  It will be written out when the dnode is synced (and it
2338 	 * will be synced, since it must have been dirty for dbuf_sync to
2339 	 * be called).
2340 	 */
2341 	if (db->db_blkid == DMU_BONUS_BLKID) {
2342 		dbuf_dirty_record_t **drp;
2343 
2344 		ASSERT(*datap != NULL);
2345 		ASSERT0(db->db_level);
2346 		ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2347 		bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2348 		DB_DNODE_EXIT(db);
2349 
2350 		if (*datap != db->db.db_data) {
2351 			zio_buf_free(*datap, DN_MAX_BONUSLEN);
2352 			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2353 		}
2354 		db->db_data_pending = NULL;
2355 		drp = &db->db_last_dirty;
2356 		while (*drp != dr)
2357 			drp = &(*drp)->dr_next;
2358 		ASSERT(dr->dr_next == NULL);
2359 		ASSERT(dr->dr_dbuf == db);
2360 		*drp = dr->dr_next;
2361 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
2362 		ASSERT(db->db_dirtycnt > 0);
2363 		db->db_dirtycnt -= 1;
2364 		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2365 		return;
2366 	}
2367 
2368 	os = dn->dn_objset;
2369 
2370 	/*
2371 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
2372 	 * operation to sneak in. As a result, we need to ensure that we
2373 	 * don't check the dr_override_state until we have returned from
2374 	 * dbuf_check_blkptr.
2375 	 */
2376 	dbuf_check_blkptr(dn, db);
2377 
2378 	/*
2379 	 * If this buffer is in the middle of an immediate write,
2380 	 * wait for the synchronous IO to complete.
2381 	 */
2382 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2383 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2384 		cv_wait(&db->db_changed, &db->db_mtx);
2385 		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2386 	}
2387 
2388 	if (db->db_state != DB_NOFILL &&
2389 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2390 	    refcount_count(&db->db_holds) > 1 &&
2391 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2392 	    *datap == db->db_buf) {
2393 		/*
2394 		 * If this buffer is currently "in use" (i.e., there
2395 		 * are active holds and db_data still references it),
2396 		 * then make a copy before we start the write so that
2397 		 * any modifications from the open txg will not leak
2398 		 * into this write.
2399 		 *
2400 		 * NOTE: this copy does not need to be made for
2401 		 * objects only modified in the syncing context (e.g.
2402 		 * DNONE_DNODE blocks).
2403 		 */
2404 		int blksz = arc_buf_size(*datap);
2405 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2406 		*datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2407 		bcopy(db->db.db_data, (*datap)->b_data, blksz);
2408 	}
2409 	db->db_data_pending = dr;
2410 
2411 	mutex_exit(&db->db_mtx);
2412 
2413 	dbuf_write(dr, *datap, tx);
2414 
2415 	ASSERT(!list_link_active(&dr->dr_dirty_node));
2416 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2417 		list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2418 		DB_DNODE_EXIT(db);
2419 	} else {
2420 		/*
2421 		 * Although zio_nowait() does not "wait for an IO", it does
2422 		 * initiate the IO. If this is an empty write it seems plausible
2423 		 * that the IO could actually be completed before the nowait
2424 		 * returns. We need to DB_DNODE_EXIT() first in case
2425 		 * zio_nowait() invalidates the dbuf.
2426 		 */
2427 		DB_DNODE_EXIT(db);
2428 		zio_nowait(dr->dr_zio);
2429 	}
2430 }
2431 
2432 void
2433 dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2434 {
2435 	dbuf_dirty_record_t *dr;
2436 
2437 	while (dr = list_head(list)) {
2438 		if (dr->dr_zio != NULL) {
2439 			/*
2440 			 * If we find an already initialized zio then we
2441 			 * are processing the meta-dnode, and we have finished.
2442 			 * The dbufs for all dnodes are put back on the list
2443 			 * during processing, so that we can zio_wait()
2444 			 * these IOs after initiating all child IOs.
2445 			 */
2446 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2447 			    DMU_META_DNODE_OBJECT);
2448 			break;
2449 		}
2450 		list_remove(list, dr);
2451 		if (dr->dr_dbuf->db_level > 0)
2452 			dbuf_sync_indirect(dr, tx);
2453 		else
2454 			dbuf_sync_leaf(dr, tx);
2455 	}
2456 }
2457 
2458 /* ARGSUSED */
2459 static void
2460 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2461 {
2462 	dmu_buf_impl_t *db = vdb;
2463 	dnode_t *dn;
2464 	blkptr_t *bp = zio->io_bp;
2465 	blkptr_t *bp_orig = &zio->io_bp_orig;
2466 	spa_t *spa = zio->io_spa;
2467 	int64_t delta;
2468 	uint64_t fill = 0;
2469 	int i;
2470 
2471 	ASSERT(db->db_blkptr == bp);
2472 
2473 	DB_DNODE_ENTER(db);
2474 	dn = DB_DNODE(db);
2475 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2476 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2477 	zio->io_prev_space_delta = delta;
2478 
2479 	if (BP_IS_HOLE(bp)) {
2480 		ASSERT(bp->blk_fill == 0);
2481 		DB_DNODE_EXIT(db);
2482 		return;
2483 	}
2484 
2485 	ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2486 	    BP_GET_TYPE(bp) == dn->dn_type) ||
2487 	    (db->db_blkid == DMU_SPILL_BLKID &&
2488 	    BP_GET_TYPE(bp) == dn->dn_bonustype));
2489 	ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2490 
2491 	mutex_enter(&db->db_mtx);
2492 
2493 #ifdef ZFS_DEBUG
2494 	if (db->db_blkid == DMU_SPILL_BLKID) {
2495 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2496 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2497 		    db->db_blkptr == &dn->dn_phys->dn_spill);
2498 	}
2499 #endif
2500 
2501 	if (db->db_level == 0) {
2502 		mutex_enter(&dn->dn_mtx);
2503 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2504 		    db->db_blkid != DMU_SPILL_BLKID)
2505 			dn->dn_phys->dn_maxblkid = db->db_blkid;
2506 		mutex_exit(&dn->dn_mtx);
2507 
2508 		if (dn->dn_type == DMU_OT_DNODE) {
2509 			dnode_phys_t *dnp = db->db.db_data;
2510 			for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2511 			    i--, dnp++) {
2512 				if (dnp->dn_type != DMU_OT_NONE)
2513 					fill++;
2514 			}
2515 		} else {
2516 			fill = 1;
2517 		}
2518 	} else {
2519 		blkptr_t *ibp = db->db.db_data;
2520 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2521 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2522 			if (BP_IS_HOLE(ibp))
2523 				continue;
2524 			fill += ibp->blk_fill;
2525 		}
2526 	}
2527 	DB_DNODE_EXIT(db);
2528 
2529 	bp->blk_fill = fill;
2530 
2531 	mutex_exit(&db->db_mtx);
2532 }
2533 
2534 /* ARGSUSED */
2535 static void
2536 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2537 {
2538 	dmu_buf_impl_t *db = vdb;
2539 	blkptr_t *bp = zio->io_bp;
2540 	blkptr_t *bp_orig = &zio->io_bp_orig;
2541 	uint64_t txg = zio->io_txg;
2542 	dbuf_dirty_record_t **drp, *dr;
2543 
2544 	ASSERT0(zio->io_error);
2545 	ASSERT(db->db_blkptr == bp);
2546 
2547 	/*
2548 	 * For nopwrites and rewrites we ensure that the bp matches our
2549 	 * original and bypass all the accounting.
2550 	 */
2551 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2552 		ASSERT(BP_EQUAL(bp, bp_orig));
2553 	} else {
2554 		objset_t *os;
2555 		dsl_dataset_t *ds;
2556 		dmu_tx_t *tx;
2557 
2558 		DB_GET_OBJSET(&os, db);
2559 		ds = os->os_dsl_dataset;
2560 		tx = os->os_synctx;
2561 
2562 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2563 		dsl_dataset_block_born(ds, bp, tx);
2564 	}
2565 
2566 	mutex_enter(&db->db_mtx);
2567 
2568 	DBUF_VERIFY(db);
2569 
2570 	drp = &db->db_last_dirty;
2571 	while ((dr = *drp) != db->db_data_pending)
2572 		drp = &dr->dr_next;
2573 	ASSERT(!list_link_active(&dr->dr_dirty_node));
2574 	ASSERT(dr->dr_txg == txg);
2575 	ASSERT(dr->dr_dbuf == db);
2576 	ASSERT(dr->dr_next == NULL);
2577 	*drp = dr->dr_next;
2578 
2579 #ifdef ZFS_DEBUG
2580 	if (db->db_blkid == DMU_SPILL_BLKID) {
2581 		dnode_t *dn;
2582 
2583 		DB_DNODE_ENTER(db);
2584 		dn = DB_DNODE(db);
2585 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2586 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2587 		    db->db_blkptr == &dn->dn_phys->dn_spill);
2588 		DB_DNODE_EXIT(db);
2589 	}
2590 #endif
2591 
2592 	if (db->db_level == 0) {
2593 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2594 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2595 		if (db->db_state != DB_NOFILL) {
2596 			if (dr->dt.dl.dr_data != db->db_buf)
2597 				VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2598 				    db));
2599 			else if (!arc_released(db->db_buf))
2600 				arc_set_callback(db->db_buf, dbuf_do_evict, db);
2601 		}
2602 	} else {
2603 		dnode_t *dn;
2604 
2605 		DB_DNODE_ENTER(db);
2606 		dn = DB_DNODE(db);
2607 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2608 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2609 		if (!BP_IS_HOLE(db->db_blkptr)) {
2610 			int epbs =
2611 			    dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2612 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2613 			    db->db.db_size);
2614 			ASSERT3U(dn->dn_phys->dn_maxblkid
2615 			    >> (db->db_level * epbs), >=, db->db_blkid);
2616 			arc_set_callback(db->db_buf, dbuf_do_evict, db);
2617 		}
2618 		DB_DNODE_EXIT(db);
2619 		mutex_destroy(&dr->dt.di.dr_mtx);
2620 		list_destroy(&dr->dt.di.dr_children);
2621 	}
2622 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2623 
2624 	cv_broadcast(&db->db_changed);
2625 	ASSERT(db->db_dirtycnt > 0);
2626 	db->db_dirtycnt -= 1;
2627 	db->db_data_pending = NULL;
2628 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2629 }
2630 
2631 static void
2632 dbuf_write_nofill_ready(zio_t *zio)
2633 {
2634 	dbuf_write_ready(zio, NULL, zio->io_private);
2635 }
2636 
2637 static void
2638 dbuf_write_nofill_done(zio_t *zio)
2639 {
2640 	dbuf_write_done(zio, NULL, zio->io_private);
2641 }
2642 
2643 static void
2644 dbuf_write_override_ready(zio_t *zio)
2645 {
2646 	dbuf_dirty_record_t *dr = zio->io_private;
2647 	dmu_buf_impl_t *db = dr->dr_dbuf;
2648 
2649 	dbuf_write_ready(zio, NULL, db);
2650 }
2651 
2652 static void
2653 dbuf_write_override_done(zio_t *zio)
2654 {
2655 	dbuf_dirty_record_t *dr = zio->io_private;
2656 	dmu_buf_impl_t *db = dr->dr_dbuf;
2657 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2658 
2659 	mutex_enter(&db->db_mtx);
2660 	if (!BP_EQUAL(zio->io_bp, obp)) {
2661 		if (!BP_IS_HOLE(obp))
2662 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2663 		arc_release(dr->dt.dl.dr_data, db);
2664 	}
2665 	mutex_exit(&db->db_mtx);
2666 
2667 	dbuf_write_done(zio, NULL, db);
2668 }
2669 
2670 /* Issue I/O to commit a dirty buffer to disk. */
2671 static void
2672 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2673 {
2674 	dmu_buf_impl_t *db = dr->dr_dbuf;
2675 	dnode_t *dn;
2676 	objset_t *os;
2677 	dmu_buf_impl_t *parent = db->db_parent;
2678 	uint64_t txg = tx->tx_txg;
2679 	zbookmark_t zb;
2680 	zio_prop_t zp;
2681 	zio_t *zio;
2682 	int wp_flag = 0;
2683 
2684 	DB_DNODE_ENTER(db);
2685 	dn = DB_DNODE(db);
2686 	os = dn->dn_objset;
2687 
2688 	if (db->db_state != DB_NOFILL) {
2689 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2690 			/*
2691 			 * Private object buffers are released here rather
2692 			 * than in dbuf_dirty() since they are only modified
2693 			 * in the syncing context and we don't want the
2694 			 * overhead of making multiple copies of the data.
2695 			 */
2696 			if (BP_IS_HOLE(db->db_blkptr)) {
2697 				arc_buf_thaw(data);
2698 			} else {
2699 				dbuf_release_bp(db);
2700 			}
2701 		}
2702 	}
2703 
2704 	if (parent != dn->dn_dbuf) {
2705 		/* Our parent is an indirect block. */
2706 		/* We have a dirty parent that has been scheduled for write. */
2707 		ASSERT(parent && parent->db_data_pending);
2708 		/* Our parent's buffer is one level closer to the dnode. */
2709 		ASSERT(db->db_level == parent->db_level-1);
2710 		/*
2711 		 * We're about to modify our parent's db_data by modifying
2712 		 * our block pointer, so the parent must be released.
2713 		 */
2714 		ASSERT(arc_released(parent->db_buf));
2715 		zio = parent->db_data_pending->dr_zio;
2716 	} else {
2717 		/* Our parent is the dnode itself. */
2718 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2719 		    db->db_blkid != DMU_SPILL_BLKID) ||
2720 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2721 		if (db->db_blkid != DMU_SPILL_BLKID)
2722 			ASSERT3P(db->db_blkptr, ==,
2723 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
2724 		zio = dn->dn_zio;
2725 	}
2726 
2727 	ASSERT(db->db_level == 0 || data == db->db_buf);
2728 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2729 	ASSERT(zio);
2730 
2731 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2732 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2733 	    db->db.db_object, db->db_level, db->db_blkid);
2734 
2735 	if (db->db_blkid == DMU_SPILL_BLKID)
2736 		wp_flag = WP_SPILL;
2737 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2738 
2739 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2740 	DB_DNODE_EXIT(db);
2741 
2742 	if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2743 		ASSERT(db->db_state != DB_NOFILL);
2744 		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2745 		    db->db_blkptr, data->b_data, arc_buf_size(data), &zp,
2746 		    dbuf_write_override_ready, dbuf_write_override_done, dr,
2747 		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2748 		mutex_enter(&db->db_mtx);
2749 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2750 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2751 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
2752 		mutex_exit(&db->db_mtx);
2753 	} else if (db->db_state == DB_NOFILL) {
2754 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF);
2755 		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2756 		    db->db_blkptr, NULL, db->db.db_size, &zp,
2757 		    dbuf_write_nofill_ready, dbuf_write_nofill_done, db,
2758 		    ZIO_PRIORITY_ASYNC_WRITE,
2759 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2760 	} else {
2761 		ASSERT(arc_released(data));
2762 		dr->dr_zio = arc_write(zio, os->os_spa, txg,
2763 		    db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
2764 		    DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
2765 		    dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
2766 		    ZIO_FLAG_MUSTSUCCEED, &zb);
2767 	}
2768 }
2769