xref: /illumos-gate/usr/src/uts/common/fs/zfs/dnode_sync.c (revision bc9014e6a81272073b9854d9f65dd59e18d18c35)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26  */
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 #include <sys/range_tree.h>
37 #include <sys/zfeature.h>
38 
39 static void
40 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
41 {
42 	dmu_buf_impl_t *db;
43 	int txgoff = tx->tx_txg & TXG_MASK;
44 	int nblkptr = dn->dn_phys->dn_nblkptr;
45 	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
46 	int new_level = dn->dn_next_nlevels[txgoff];
47 	int i;
48 
49 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
50 
51 	/* this dnode can't be paged out because it's dirty */
52 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
53 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
54 	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
55 
56 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
57 	ASSERT(db != NULL);
58 
59 	dn->dn_phys->dn_nlevels = new_level;
60 	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
61 	    dn->dn_object, dn->dn_phys->dn_nlevels);
62 
63 	/* check for existing blkptrs in the dnode */
64 	for (i = 0; i < nblkptr; i++)
65 		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
66 			break;
67 	if (i != nblkptr) {
68 		/* transfer dnode's block pointers to new indirect block */
69 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
70 		ASSERT(db->db.db_data);
71 		ASSERT(arc_released(db->db_buf));
72 		ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
73 		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
74 		    sizeof (blkptr_t) * nblkptr);
75 		arc_buf_freeze(db->db_buf);
76 	}
77 
78 	/* set dbuf's parent pointers to new indirect buf */
79 	for (i = 0; i < nblkptr; i++) {
80 		dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
81 
82 		if (child == NULL)
83 			continue;
84 #ifdef	DEBUG
85 		DB_DNODE_ENTER(child);
86 		ASSERT3P(DB_DNODE(child), ==, dn);
87 		DB_DNODE_EXIT(child);
88 #endif	/* DEBUG */
89 		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
90 			ASSERT(child->db_parent->db_level == db->db_level);
91 			ASSERT(child->db_blkptr !=
92 			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
93 			mutex_exit(&child->db_mtx);
94 			continue;
95 		}
96 		ASSERT(child->db_parent == NULL ||
97 		    child->db_parent == dn->dn_dbuf);
98 
99 		child->db_parent = db;
100 		dbuf_add_ref(db, child);
101 		if (db->db.db_data)
102 			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
103 		else
104 			child->db_blkptr = NULL;
105 		dprintf_dbuf_bp(child, child->db_blkptr,
106 		    "changed db_blkptr to new indirect %s", "");
107 
108 		mutex_exit(&child->db_mtx);
109 	}
110 
111 	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
112 
113 	dbuf_rele(db, FTAG);
114 
115 	rw_exit(&dn->dn_struct_rwlock);
116 }
117 
118 static void
119 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
120 {
121 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
122 	uint64_t bytesfreed = 0;
123 
124 	dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
125 
126 	for (int i = 0; i < num; i++, bp++) {
127 		if (BP_IS_HOLE(bp))
128 			continue;
129 
130 		bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
131 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
132 
133 		/*
134 		 * Save some useful information on the holes being
135 		 * punched, including logical size, type, and indirection
136 		 * level. Retaining birth time enables detection of when
137 		 * holes are punched for reducing the number of free
138 		 * records transmitted during a zfs send.
139 		 */
140 
141 		uint64_t lsize = BP_GET_LSIZE(bp);
142 		dmu_object_type_t type = BP_GET_TYPE(bp);
143 		uint64_t lvl = BP_GET_LEVEL(bp);
144 
145 		bzero(bp, sizeof (blkptr_t));
146 
147 		if (spa_feature_is_active(dn->dn_objset->os_spa,
148 		    SPA_FEATURE_HOLE_BIRTH)) {
149 			BP_SET_LSIZE(bp, lsize);
150 			BP_SET_TYPE(bp, type);
151 			BP_SET_LEVEL(bp, lvl);
152 			BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
153 		}
154 	}
155 	dnode_diduse_space(dn, -bytesfreed);
156 }
157 
158 #ifdef ZFS_DEBUG
159 static void
160 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
161 {
162 	int off, num;
163 	int i, err, epbs;
164 	uint64_t txg = tx->tx_txg;
165 	dnode_t *dn;
166 
167 	DB_DNODE_ENTER(db);
168 	dn = DB_DNODE(db);
169 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
170 	off = start - (db->db_blkid * 1<<epbs);
171 	num = end - start + 1;
172 
173 	ASSERT3U(off, >=, 0);
174 	ASSERT3U(num, >=, 0);
175 	ASSERT3U(db->db_level, >, 0);
176 	ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
177 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
178 	ASSERT(db->db_blkptr != NULL);
179 
180 	for (i = off; i < off+num; i++) {
181 		uint64_t *buf;
182 		dmu_buf_impl_t *child;
183 		dbuf_dirty_record_t *dr;
184 		int j;
185 
186 		ASSERT(db->db_level == 1);
187 
188 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
189 		err = dbuf_hold_impl(dn, db->db_level-1,
190 		    (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
191 		rw_exit(&dn->dn_struct_rwlock);
192 		if (err == ENOENT)
193 			continue;
194 		ASSERT(err == 0);
195 		ASSERT(child->db_level == 0);
196 		dr = child->db_last_dirty;
197 		while (dr && dr->dr_txg > txg)
198 			dr = dr->dr_next;
199 		ASSERT(dr == NULL || dr->dr_txg == txg);
200 
201 		/* data_old better be zeroed */
202 		if (dr) {
203 			buf = dr->dt.dl.dr_data->b_data;
204 			for (j = 0; j < child->db.db_size >> 3; j++) {
205 				if (buf[j] != 0) {
206 					panic("freed data not zero: "
207 					    "child=%p i=%d off=%d num=%d\n",
208 					    (void *)child, i, off, num);
209 				}
210 			}
211 		}
212 
213 		/*
214 		 * db_data better be zeroed unless it's dirty in a
215 		 * future txg.
216 		 */
217 		mutex_enter(&child->db_mtx);
218 		buf = child->db.db_data;
219 		if (buf != NULL && child->db_state != DB_FILL &&
220 		    child->db_last_dirty == NULL) {
221 			for (j = 0; j < child->db.db_size >> 3; j++) {
222 				if (buf[j] != 0) {
223 					panic("freed data not zero: "
224 					    "child=%p i=%d off=%d num=%d\n",
225 					    (void *)child, i, off, num);
226 				}
227 			}
228 		}
229 		mutex_exit(&child->db_mtx);
230 
231 		dbuf_rele(child, FTAG);
232 	}
233 	DB_DNODE_EXIT(db);
234 }
235 #endif
236 
237 static void
238 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
239     dmu_tx_t *tx)
240 {
241 	dnode_t *dn;
242 	blkptr_t *bp;
243 	dmu_buf_impl_t *subdb;
244 	uint64_t start, end, dbstart, dbend, i;
245 	int epbs, shift;
246 
247 	/*
248 	 * There is a small possibility that this block will not be cached:
249 	 *   1 - if level > 1 and there are no children with level <= 1
250 	 *   2 - if this block was evicted since we read it from
251 	 *	 dmu_tx_hold_free().
252 	 */
253 	if (db->db_state != DB_CACHED)
254 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
255 
256 	dbuf_release_bp(db);
257 	bp = db->db.db_data;
258 
259 	DB_DNODE_ENTER(db);
260 	dn = DB_DNODE(db);
261 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
262 	shift = (db->db_level - 1) * epbs;
263 	dbstart = db->db_blkid << epbs;
264 	start = blkid >> shift;
265 	if (dbstart < start) {
266 		bp += start - dbstart;
267 	} else {
268 		start = dbstart;
269 	}
270 	dbend = ((db->db_blkid + 1) << epbs) - 1;
271 	end = (blkid + nblks - 1) >> shift;
272 	if (dbend <= end)
273 		end = dbend;
274 
275 	ASSERT3U(start, <=, end);
276 
277 	if (db->db_level == 1) {
278 		FREE_VERIFY(db, start, end, tx);
279 		free_blocks(dn, bp, end-start+1, tx);
280 	} else {
281 		for (i = start; i <= end; i++, bp++) {
282 			if (BP_IS_HOLE(bp))
283 				continue;
284 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
285 			VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
286 			    i, B_TRUE, FTAG, &subdb));
287 			rw_exit(&dn->dn_struct_rwlock);
288 			ASSERT3P(bp, ==, subdb->db_blkptr);
289 
290 			free_children(subdb, blkid, nblks, tx);
291 			dbuf_rele(subdb, FTAG);
292 		}
293 	}
294 
295 	/* If this whole block is free, free ourself too. */
296 	for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
297 		if (!BP_IS_HOLE(bp))
298 			break;
299 	}
300 	if (i == 1 << epbs) {
301 		/* didn't find any non-holes */
302 		bzero(db->db.db_data, db->db.db_size);
303 		free_blocks(dn, db->db_blkptr, 1, tx);
304 	} else {
305 		/*
306 		 * Partial block free; must be marked dirty so that it
307 		 * will be written out.
308 		 */
309 		ASSERT(db->db_dirtycnt > 0);
310 	}
311 
312 	DB_DNODE_EXIT(db);
313 	arc_buf_freeze(db->db_buf);
314 }
315 
316 /*
317  * Traverse the indicated range of the provided file
318  * and "free" all the blocks contained there.
319  */
320 static void
321 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
322     dmu_tx_t *tx)
323 {
324 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
325 	int dnlevel = dn->dn_phys->dn_nlevels;
326 	boolean_t trunc = B_FALSE;
327 
328 	if (blkid > dn->dn_phys->dn_maxblkid)
329 		return;
330 
331 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
332 	if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
333 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
334 		trunc = B_TRUE;
335 	}
336 
337 	/* There are no indirect blocks in the object */
338 	if (dnlevel == 1) {
339 		if (blkid >= dn->dn_phys->dn_nblkptr) {
340 			/* this range was never made persistent */
341 			return;
342 		}
343 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
344 		free_blocks(dn, bp + blkid, nblks, tx);
345 	} else {
346 		int shift = (dnlevel - 1) *
347 		    (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
348 		int start = blkid >> shift;
349 		int end = (blkid + nblks - 1) >> shift;
350 		dmu_buf_impl_t *db;
351 
352 		ASSERT(start < dn->dn_phys->dn_nblkptr);
353 		bp += start;
354 		for (int i = start; i <= end; i++, bp++) {
355 			if (BP_IS_HOLE(bp))
356 				continue;
357 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
358 			VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
359 			    TRUE, FTAG, &db));
360 			rw_exit(&dn->dn_struct_rwlock);
361 
362 			free_children(db, blkid, nblks, tx);
363 			dbuf_rele(db, FTAG);
364 		}
365 	}
366 
367 	if (trunc) {
368 		dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
369 
370 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
371 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
372 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
373 		    dn->dn_phys->dn_maxblkid == 0 ||
374 		    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
375 	}
376 }
377 
378 typedef struct dnode_sync_free_range_arg {
379 	dnode_t *dsfra_dnode;
380 	dmu_tx_t *dsfra_tx;
381 } dnode_sync_free_range_arg_t;
382 
383 static void
384 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
385 {
386 	dnode_sync_free_range_arg_t *dsfra = arg;
387 	dnode_t *dn = dsfra->dsfra_dnode;
388 
389 	mutex_exit(&dn->dn_mtx);
390 	dnode_sync_free_range_impl(dn, blkid, nblks, dsfra->dsfra_tx);
391 	mutex_enter(&dn->dn_mtx);
392 }
393 
394 /*
395  * Try to kick all the dnode's dbufs out of the cache...
396  */
397 void
398 dnode_evict_dbufs(dnode_t *dn)
399 {
400 	dmu_buf_impl_t db_marker;
401 	dmu_buf_impl_t *db, *db_next;
402 
403 	mutex_enter(&dn->dn_dbufs_mtx);
404 	for (db = avl_first(&dn->dn_dbufs); db != NULL; db = db_next) {
405 
406 #ifdef	DEBUG
407 		DB_DNODE_ENTER(db);
408 		ASSERT3P(DB_DNODE(db), ==, dn);
409 		DB_DNODE_EXIT(db);
410 #endif	/* DEBUG */
411 
412 		mutex_enter(&db->db_mtx);
413 		if (db->db_state != DB_EVICTING &&
414 		    refcount_is_zero(&db->db_holds)) {
415 			db_marker.db_level = db->db_level;
416 			db_marker.db_blkid = db->db_blkid;
417 			db_marker.db_state = DB_SEARCH;
418 			avl_insert_here(&dn->dn_dbufs, &db_marker, db,
419 			    AVL_BEFORE);
420 
421 			dbuf_clear(db);
422 
423 			db_next = AVL_NEXT(&dn->dn_dbufs, &db_marker);
424 			avl_remove(&dn->dn_dbufs, &db_marker);
425 		} else {
426 			mutex_exit(&db->db_mtx);
427 			db_next = AVL_NEXT(&dn->dn_dbufs, db);
428 		}
429 	}
430 	mutex_exit(&dn->dn_dbufs_mtx);
431 
432 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
433 	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
434 		mutex_enter(&dn->dn_bonus->db_mtx);
435 		dbuf_evict(dn->dn_bonus);
436 		dn->dn_bonus = NULL;
437 	}
438 	rw_exit(&dn->dn_struct_rwlock);
439 }
440 
441 static void
442 dnode_undirty_dbufs(list_t *list)
443 {
444 	dbuf_dirty_record_t *dr;
445 
446 	while (dr = list_head(list)) {
447 		dmu_buf_impl_t *db = dr->dr_dbuf;
448 		uint64_t txg = dr->dr_txg;
449 
450 		if (db->db_level != 0)
451 			dnode_undirty_dbufs(&dr->dt.di.dr_children);
452 
453 		mutex_enter(&db->db_mtx);
454 		/* XXX - use dbuf_undirty()? */
455 		list_remove(list, dr);
456 		ASSERT(db->db_last_dirty == dr);
457 		db->db_last_dirty = NULL;
458 		db->db_dirtycnt -= 1;
459 		if (db->db_level == 0) {
460 			ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
461 			    dr->dt.dl.dr_data == db->db_buf);
462 			dbuf_unoverride(dr);
463 		} else {
464 			mutex_destroy(&dr->dt.di.dr_mtx);
465 			list_destroy(&dr->dt.di.dr_children);
466 		}
467 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
468 		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
469 	}
470 }
471 
472 static void
473 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
474 {
475 	int txgoff = tx->tx_txg & TXG_MASK;
476 
477 	ASSERT(dmu_tx_is_syncing(tx));
478 
479 	/*
480 	 * Our contents should have been freed in dnode_sync() by the
481 	 * free range record inserted by the caller of dnode_free().
482 	 */
483 	ASSERT0(DN_USED_BYTES(dn->dn_phys));
484 	ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
485 
486 	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
487 	dnode_evict_dbufs(dn);
488 	ASSERT(avl_is_empty(&dn->dn_dbufs));
489 
490 	/*
491 	 * XXX - It would be nice to assert this, but we may still
492 	 * have residual holds from async evictions from the arc...
493 	 *
494 	 * zfs_obj_to_path() also depends on this being
495 	 * commented out.
496 	 *
497 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
498 	 */
499 
500 	/* Undirty next bits */
501 	dn->dn_next_nlevels[txgoff] = 0;
502 	dn->dn_next_indblkshift[txgoff] = 0;
503 	dn->dn_next_blksz[txgoff] = 0;
504 
505 	/* ASSERT(blkptrs are zero); */
506 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
507 	ASSERT(dn->dn_type != DMU_OT_NONE);
508 
509 	ASSERT(dn->dn_free_txg > 0);
510 	if (dn->dn_allocated_txg != dn->dn_free_txg)
511 		dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
512 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
513 
514 	mutex_enter(&dn->dn_mtx);
515 	dn->dn_type = DMU_OT_NONE;
516 	dn->dn_maxblkid = 0;
517 	dn->dn_allocated_txg = 0;
518 	dn->dn_free_txg = 0;
519 	dn->dn_have_spill = B_FALSE;
520 	mutex_exit(&dn->dn_mtx);
521 
522 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
523 
524 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
525 	/*
526 	 * Now that we've released our hold, the dnode may
527 	 * be evicted, so we musn't access it.
528 	 */
529 }
530 
531 /*
532  * Write out the dnode's dirty buffers.
533  */
534 void
535 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
536 {
537 	dnode_phys_t *dnp = dn->dn_phys;
538 	int txgoff = tx->tx_txg & TXG_MASK;
539 	list_t *list = &dn->dn_dirty_records[txgoff];
540 	static const dnode_phys_t zerodn = { 0 };
541 	boolean_t kill_spill = B_FALSE;
542 
543 	ASSERT(dmu_tx_is_syncing(tx));
544 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
545 	ASSERT(dnp->dn_type != DMU_OT_NONE ||
546 	    bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
547 	DNODE_VERIFY(dn);
548 
549 	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
550 
551 	if (dmu_objset_userused_enabled(dn->dn_objset) &&
552 	    !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
553 		mutex_enter(&dn->dn_mtx);
554 		dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
555 		dn->dn_oldflags = dn->dn_phys->dn_flags;
556 		dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
557 		mutex_exit(&dn->dn_mtx);
558 		dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
559 	} else {
560 		/* Once we account for it, we should always account for it. */
561 		ASSERT(!(dn->dn_phys->dn_flags &
562 		    DNODE_FLAG_USERUSED_ACCOUNTED));
563 	}
564 
565 	mutex_enter(&dn->dn_mtx);
566 	if (dn->dn_allocated_txg == tx->tx_txg) {
567 		/* The dnode is newly allocated or reallocated */
568 		if (dnp->dn_type == DMU_OT_NONE) {
569 			/* this is a first alloc, not a realloc */
570 			dnp->dn_nlevels = 1;
571 			dnp->dn_nblkptr = dn->dn_nblkptr;
572 		}
573 
574 		dnp->dn_type = dn->dn_type;
575 		dnp->dn_bonustype = dn->dn_bonustype;
576 		dnp->dn_bonuslen = dn->dn_bonuslen;
577 	}
578 	ASSERT(dnp->dn_nlevels > 1 ||
579 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
580 	    BP_IS_EMBEDDED(&dnp->dn_blkptr[0]) ||
581 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
582 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
583 	ASSERT(dnp->dn_nlevels < 2 ||
584 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
585 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) == 1 << dnp->dn_indblkshift);
586 
587 	if (dn->dn_next_type[txgoff] != 0) {
588 		dnp->dn_type = dn->dn_type;
589 		dn->dn_next_type[txgoff] = 0;
590 	}
591 
592 	if (dn->dn_next_blksz[txgoff] != 0) {
593 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
594 		    SPA_MINBLOCKSIZE) == 0);
595 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
596 		    dn->dn_maxblkid == 0 || list_head(list) != NULL ||
597 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
598 		    dnp->dn_datablkszsec ||
599 		    range_tree_space(dn->dn_free_ranges[txgoff]) != 0);
600 		dnp->dn_datablkszsec =
601 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
602 		dn->dn_next_blksz[txgoff] = 0;
603 	}
604 
605 	if (dn->dn_next_bonuslen[txgoff] != 0) {
606 		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
607 			dnp->dn_bonuslen = 0;
608 		else
609 			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
610 		ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
611 		dn->dn_next_bonuslen[txgoff] = 0;
612 	}
613 
614 	if (dn->dn_next_bonustype[txgoff] != 0) {
615 		ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
616 		dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
617 		dn->dn_next_bonustype[txgoff] = 0;
618 	}
619 
620 	boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
621 	    dn->dn_free_txg <= tx->tx_txg;
622 
623 	/*
624 	 * Remove the spill block if we have been explicitly asked to
625 	 * remove it, or if the object is being removed.
626 	 */
627 	if (dn->dn_rm_spillblk[txgoff] || freeing_dnode) {
628 		if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
629 			kill_spill = B_TRUE;
630 		dn->dn_rm_spillblk[txgoff] = 0;
631 	}
632 
633 	if (dn->dn_next_indblkshift[txgoff] != 0) {
634 		ASSERT(dnp->dn_nlevels == 1);
635 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
636 		dn->dn_next_indblkshift[txgoff] = 0;
637 	}
638 
639 	/*
640 	 * Just take the live (open-context) values for checksum and compress.
641 	 * Strictly speaking it's a future leak, but nothing bad happens if we
642 	 * start using the new checksum or compress algorithm a little early.
643 	 */
644 	dnp->dn_checksum = dn->dn_checksum;
645 	dnp->dn_compress = dn->dn_compress;
646 
647 	mutex_exit(&dn->dn_mtx);
648 
649 	if (kill_spill) {
650 		free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
651 		mutex_enter(&dn->dn_mtx);
652 		dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
653 		mutex_exit(&dn->dn_mtx);
654 	}
655 
656 	/* process all the "freed" ranges in the file */
657 	if (dn->dn_free_ranges[txgoff] != NULL) {
658 		dnode_sync_free_range_arg_t dsfra;
659 		dsfra.dsfra_dnode = dn;
660 		dsfra.dsfra_tx = tx;
661 		mutex_enter(&dn->dn_mtx);
662 		range_tree_vacate(dn->dn_free_ranges[txgoff],
663 		    dnode_sync_free_range, &dsfra);
664 		range_tree_destroy(dn->dn_free_ranges[txgoff]);
665 		dn->dn_free_ranges[txgoff] = NULL;
666 		mutex_exit(&dn->dn_mtx);
667 	}
668 
669 	if (freeing_dnode) {
670 		dnode_sync_free(dn, tx);
671 		return;
672 	}
673 
674 	if (dn->dn_next_nlevels[txgoff]) {
675 		dnode_increase_indirection(dn, tx);
676 		dn->dn_next_nlevels[txgoff] = 0;
677 	}
678 
679 	if (dn->dn_next_nblkptr[txgoff]) {
680 		/* this should only happen on a realloc */
681 		ASSERT(dn->dn_allocated_txg == tx->tx_txg);
682 		if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
683 			/* zero the new blkptrs we are gaining */
684 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
685 			    sizeof (blkptr_t) *
686 			    (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
687 #ifdef ZFS_DEBUG
688 		} else {
689 			int i;
690 			ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
691 			/* the blkptrs we are losing better be unallocated */
692 			for (i = dn->dn_next_nblkptr[txgoff];
693 			    i < dnp->dn_nblkptr; i++)
694 				ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
695 #endif
696 		}
697 		mutex_enter(&dn->dn_mtx);
698 		dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
699 		dn->dn_next_nblkptr[txgoff] = 0;
700 		mutex_exit(&dn->dn_mtx);
701 	}
702 
703 	dbuf_sync_list(list, tx);
704 
705 	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
706 		ASSERT3P(list_head(list), ==, NULL);
707 		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
708 	}
709 
710 	/*
711 	 * Although we have dropped our reference to the dnode, it
712 	 * can't be evicted until its written, and we haven't yet
713 	 * initiated the IO for the dnode's dbuf.
714 	 */
715 }
716