xref: /titanic_51/usr/src/uts/common/fs/zfs/dnode_sync.c (revision 9278ddffeeeafc4766f74c6e2327b83ada62329a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25  */
26 
27 #include <sys/zfs_context.h>
28 #include <sys/dbuf.h>
29 #include <sys/dnode.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/spa.h>
35 #include <sys/range_tree.h>
36 #include <sys/zfeature.h>
37 
38 static void
39 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
40 {
41 	dmu_buf_impl_t *db;
42 	int txgoff = tx->tx_txg & TXG_MASK;
43 	int nblkptr = dn->dn_phys->dn_nblkptr;
44 	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
45 	int new_level = dn->dn_next_nlevels[txgoff];
46 	int i;
47 
48 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
49 
50 	/* this dnode can't be paged out because it's dirty */
51 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
52 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
53 	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
54 
55 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
56 	ASSERT(db != NULL);
57 
58 	dn->dn_phys->dn_nlevels = new_level;
59 	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
60 	    dn->dn_object, dn->dn_phys->dn_nlevels);
61 
62 	/* check for existing blkptrs in the dnode */
63 	for (i = 0; i < nblkptr; i++)
64 		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
65 			break;
66 	if (i != nblkptr) {
67 		/* transfer dnode's block pointers to new indirect block */
68 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
69 		ASSERT(db->db.db_data);
70 		ASSERT(arc_released(db->db_buf));
71 		ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
72 		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
73 		    sizeof (blkptr_t) * nblkptr);
74 		arc_buf_freeze(db->db_buf);
75 	}
76 
77 	/* set dbuf's parent pointers to new indirect buf */
78 	for (i = 0; i < nblkptr; i++) {
79 		dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
80 
81 		if (child == NULL)
82 			continue;
83 #ifdef	DEBUG
84 		DB_DNODE_ENTER(child);
85 		ASSERT3P(DB_DNODE(child), ==, dn);
86 		DB_DNODE_EXIT(child);
87 #endif	/* DEBUG */
88 		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
89 			ASSERT(child->db_parent->db_level == db->db_level);
90 			ASSERT(child->db_blkptr !=
91 			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
92 			mutex_exit(&child->db_mtx);
93 			continue;
94 		}
95 		ASSERT(child->db_parent == NULL ||
96 		    child->db_parent == dn->dn_dbuf);
97 
98 		child->db_parent = db;
99 		dbuf_add_ref(db, child);
100 		if (db->db.db_data)
101 			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
102 		else
103 			child->db_blkptr = NULL;
104 		dprintf_dbuf_bp(child, child->db_blkptr,
105 		    "changed db_blkptr to new indirect %s", "");
106 
107 		mutex_exit(&child->db_mtx);
108 	}
109 
110 	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
111 
112 	dbuf_rele(db, FTAG);
113 
114 	rw_exit(&dn->dn_struct_rwlock);
115 }
116 
117 static void
118 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
119 {
120 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
121 	uint64_t bytesfreed = 0;
122 
123 	dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
124 
125 	for (int i = 0; i < num; i++, bp++) {
126 		if (BP_IS_HOLE(bp))
127 			continue;
128 
129 		bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
130 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
131 
132 		/*
133 		 * Save some useful information on the holes being
134 		 * punched, including logical size, type, and indirection
135 		 * level. Retaining birth time enables detection of when
136 		 * holes are punched for reducing the number of free
137 		 * records transmitted during a zfs send.
138 		 */
139 
140 		uint64_t lsize = BP_GET_LSIZE(bp);
141 		dmu_object_type_t type = BP_GET_TYPE(bp);
142 		uint64_t lvl = BP_GET_LEVEL(bp);
143 
144 		bzero(bp, sizeof (blkptr_t));
145 
146 		if (spa_feature_is_active(dn->dn_objset->os_spa,
147 		    SPA_FEATURE_HOLE_BIRTH)) {
148 			BP_SET_LSIZE(bp, lsize);
149 			BP_SET_TYPE(bp, type);
150 			BP_SET_LEVEL(bp, lvl);
151 			BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
152 		}
153 	}
154 	dnode_diduse_space(dn, -bytesfreed);
155 }
156 
157 #ifdef ZFS_DEBUG
158 static void
159 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
160 {
161 	int off, num;
162 	int i, err, epbs;
163 	uint64_t txg = tx->tx_txg;
164 	dnode_t *dn;
165 
166 	DB_DNODE_ENTER(db);
167 	dn = DB_DNODE(db);
168 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
169 	off = start - (db->db_blkid * 1<<epbs);
170 	num = end - start + 1;
171 
172 	ASSERT3U(off, >=, 0);
173 	ASSERT3U(num, >=, 0);
174 	ASSERT3U(db->db_level, >, 0);
175 	ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
176 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
177 	ASSERT(db->db_blkptr != NULL);
178 
179 	for (i = off; i < off+num; i++) {
180 		uint64_t *buf;
181 		dmu_buf_impl_t *child;
182 		dbuf_dirty_record_t *dr;
183 		int j;
184 
185 		ASSERT(db->db_level == 1);
186 
187 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
188 		err = dbuf_hold_impl(dn, db->db_level-1,
189 		    (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
190 		rw_exit(&dn->dn_struct_rwlock);
191 		if (err == ENOENT)
192 			continue;
193 		ASSERT(err == 0);
194 		ASSERT(child->db_level == 0);
195 		dr = child->db_last_dirty;
196 		while (dr && dr->dr_txg > txg)
197 			dr = dr->dr_next;
198 		ASSERT(dr == NULL || dr->dr_txg == txg);
199 
200 		/* data_old better be zeroed */
201 		if (dr) {
202 			buf = dr->dt.dl.dr_data->b_data;
203 			for (j = 0; j < child->db.db_size >> 3; j++) {
204 				if (buf[j] != 0) {
205 					panic("freed data not zero: "
206 					    "child=%p i=%d off=%d num=%d\n",
207 					    (void *)child, i, off, num);
208 				}
209 			}
210 		}
211 
212 		/*
213 		 * db_data better be zeroed unless it's dirty in a
214 		 * future txg.
215 		 */
216 		mutex_enter(&child->db_mtx);
217 		buf = child->db.db_data;
218 		if (buf != NULL && child->db_state != DB_FILL &&
219 		    child->db_last_dirty == NULL) {
220 			for (j = 0; j < child->db.db_size >> 3; j++) {
221 				if (buf[j] != 0) {
222 					panic("freed data not zero: "
223 					    "child=%p i=%d off=%d num=%d\n",
224 					    (void *)child, i, off, num);
225 				}
226 			}
227 		}
228 		mutex_exit(&child->db_mtx);
229 
230 		dbuf_rele(child, FTAG);
231 	}
232 	DB_DNODE_EXIT(db);
233 }
234 #endif
235 
236 #define	ALL -1
237 
238 static void
239 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
240     dmu_tx_t *tx)
241 {
242 	dnode_t *dn;
243 	blkptr_t *bp;
244 	dmu_buf_impl_t *subdb;
245 	uint64_t start, end, dbstart, dbend, i;
246 	int epbs, shift;
247 
248 	/*
249 	 * There is a small possibility that this block will not be cached:
250 	 *   1 - if level > 1 and there are no children with level <= 1
251 	 *   2 - if this block was evicted since we read it from
252 	 *	 dmu_tx_hold_free().
253 	 */
254 	if (db->db_state != DB_CACHED)
255 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
256 
257 	dbuf_release_bp(db);
258 	bp = db->db.db_data;
259 
260 	DB_DNODE_ENTER(db);
261 	dn = DB_DNODE(db);
262 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
263 	shift = (db->db_level - 1) * epbs;
264 	dbstart = db->db_blkid << epbs;
265 	start = blkid >> shift;
266 	if (dbstart < start) {
267 		bp += start - dbstart;
268 	} else {
269 		start = dbstart;
270 	}
271 	dbend = ((db->db_blkid + 1) << epbs) - 1;
272 	end = (blkid + nblks - 1) >> shift;
273 	if (dbend <= end)
274 		end = dbend;
275 
276 	ASSERT3U(start, <=, end);
277 
278 	if (db->db_level == 1) {
279 		FREE_VERIFY(db, start, end, tx);
280 		free_blocks(dn, bp, end-start+1, tx);
281 	} else {
282 		for (i = start; i <= end; i++, bp++) {
283 			if (BP_IS_HOLE(bp))
284 				continue;
285 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
286 			VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
287 			    i, B_TRUE, FTAG, &subdb));
288 			rw_exit(&dn->dn_struct_rwlock);
289 			ASSERT3P(bp, ==, subdb->db_blkptr);
290 
291 			free_children(subdb, blkid, nblks, tx);
292 			dbuf_rele(subdb, FTAG);
293 		}
294 	}
295 
296 	/* If this whole block is free, free ourself too. */
297 	for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
298 		if (!BP_IS_HOLE(bp))
299 			break;
300 	}
301 	if (i == 1 << epbs) {
302 		/* didn't find any non-holes */
303 		bzero(db->db.db_data, db->db.db_size);
304 		free_blocks(dn, db->db_blkptr, 1, tx);
305 	} else {
306 		/*
307 		 * Partial block free; must be marked dirty so that it
308 		 * will be written out.
309 		 */
310 		ASSERT(db->db_dirtycnt > 0);
311 	}
312 
313 	DB_DNODE_EXIT(db);
314 	arc_buf_freeze(db->db_buf);
315 }
316 
317 /*
318  * Traverse the indicated range of the provided file
319  * and "free" all the blocks contained there.
320  */
321 static void
322 dnode_sync_free_range_impl(dnode_t *dn, uint64_t blkid, uint64_t nblks,
323     dmu_tx_t *tx)
324 {
325 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
326 	int dnlevel = dn->dn_phys->dn_nlevels;
327 	boolean_t trunc = B_FALSE;
328 
329 	if (blkid > dn->dn_phys->dn_maxblkid)
330 		return;
331 
332 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
333 	if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
334 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
335 		trunc = B_TRUE;
336 	}
337 
338 	/* There are no indirect blocks in the object */
339 	if (dnlevel == 1) {
340 		if (blkid >= dn->dn_phys->dn_nblkptr) {
341 			/* this range was never made persistent */
342 			return;
343 		}
344 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
345 		free_blocks(dn, bp + blkid, nblks, tx);
346 	} else {
347 		int shift = (dnlevel - 1) *
348 		    (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
349 		int start = blkid >> shift;
350 		int end = (blkid + nblks - 1) >> shift;
351 		dmu_buf_impl_t *db;
352 
353 		ASSERT(start < dn->dn_phys->dn_nblkptr);
354 		bp += start;
355 		for (int i = start; i <= end; i++, bp++) {
356 			if (BP_IS_HOLE(bp))
357 				continue;
358 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
359 			VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
360 			    TRUE, FTAG, &db));
361 			rw_exit(&dn->dn_struct_rwlock);
362 
363 			free_children(db, blkid, nblks, tx);
364 			dbuf_rele(db, FTAG);
365 
366 		}
367 	}
368 
369 	if (trunc) {
370 		dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
371 
372 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
373 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
374 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
375 		    dn->dn_phys->dn_maxblkid == 0 ||
376 		    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
377 	}
378 }
379 
380 typedef struct dnode_sync_free_range_arg {
381 	dnode_t *dsfra_dnode;
382 	dmu_tx_t *dsfra_tx;
383 } dnode_sync_free_range_arg_t;
384 
385 static void
386 dnode_sync_free_range(void *arg, uint64_t blkid, uint64_t nblks)
387 {
388 	dnode_sync_free_range_arg_t *dsfra = arg;
389 	dnode_t *dn = dsfra->dsfra_dnode;
390 
391 	mutex_exit(&dn->dn_mtx);
392 	dnode_sync_free_range_impl(dn, blkid, nblks, dsfra->dsfra_tx);
393 	mutex_enter(&dn->dn_mtx);
394 }
395 
396 /*
397  * Try to kick all the dnode's dbufs out of the cache...
398  */
399 void
400 dnode_evict_dbufs(dnode_t *dn)
401 {
402 	int progress;
403 	int pass = 0;
404 
405 	do {
406 		dmu_buf_impl_t *db, marker;
407 		int evicting = FALSE;
408 
409 		progress = FALSE;
410 		mutex_enter(&dn->dn_dbufs_mtx);
411 		list_insert_tail(&dn->dn_dbufs, &marker);
412 		db = list_head(&dn->dn_dbufs);
413 		for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
414 			list_remove(&dn->dn_dbufs, db);
415 			list_insert_tail(&dn->dn_dbufs, db);
416 #ifdef	DEBUG
417 			DB_DNODE_ENTER(db);
418 			ASSERT3P(DB_DNODE(db), ==, dn);
419 			DB_DNODE_EXIT(db);
420 #endif	/* DEBUG */
421 
422 			mutex_enter(&db->db_mtx);
423 			if (db->db_state == DB_EVICTING) {
424 				progress = TRUE;
425 				evicting = TRUE;
426 				mutex_exit(&db->db_mtx);
427 			} else if (refcount_is_zero(&db->db_holds)) {
428 				progress = TRUE;
429 				dbuf_clear(db); /* exits db_mtx for us */
430 			} else {
431 				mutex_exit(&db->db_mtx);
432 			}
433 
434 		}
435 		list_remove(&dn->dn_dbufs, &marker);
436 		/*
437 		 * NB: we need to drop dn_dbufs_mtx between passes so
438 		 * that any DB_EVICTING dbufs can make progress.
439 		 * Ideally, we would have some cv we could wait on, but
440 		 * since we don't, just wait a bit to give the other
441 		 * thread a chance to run.
442 		 */
443 		mutex_exit(&dn->dn_dbufs_mtx);
444 		if (evicting)
445 			delay(1);
446 		pass++;
447 		ASSERT(pass < 100); /* sanity check */
448 	} while (progress);
449 
450 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
451 	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
452 		mutex_enter(&dn->dn_bonus->db_mtx);
453 		dbuf_evict(dn->dn_bonus);
454 		dn->dn_bonus = NULL;
455 	}
456 	rw_exit(&dn->dn_struct_rwlock);
457 }
458 
459 static void
460 dnode_undirty_dbufs(list_t *list)
461 {
462 	dbuf_dirty_record_t *dr;
463 
464 	while (dr = list_head(list)) {
465 		dmu_buf_impl_t *db = dr->dr_dbuf;
466 		uint64_t txg = dr->dr_txg;
467 
468 		if (db->db_level != 0)
469 			dnode_undirty_dbufs(&dr->dt.di.dr_children);
470 
471 		mutex_enter(&db->db_mtx);
472 		/* XXX - use dbuf_undirty()? */
473 		list_remove(list, dr);
474 		ASSERT(db->db_last_dirty == dr);
475 		db->db_last_dirty = NULL;
476 		db->db_dirtycnt -= 1;
477 		if (db->db_level == 0) {
478 			ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
479 			    dr->dt.dl.dr_data == db->db_buf);
480 			dbuf_unoverride(dr);
481 		}
482 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
483 		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
484 	}
485 }
486 
487 static void
488 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
489 {
490 	int txgoff = tx->tx_txg & TXG_MASK;
491 
492 	ASSERT(dmu_tx_is_syncing(tx));
493 
494 	/*
495 	 * Our contents should have been freed in dnode_sync() by the
496 	 * free range record inserted by the caller of dnode_free().
497 	 */
498 	ASSERT0(DN_USED_BYTES(dn->dn_phys));
499 	ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
500 
501 	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
502 	dnode_evict_dbufs(dn);
503 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
504 	ASSERT3P(dn->dn_bonus, ==, NULL);
505 
506 	/*
507 	 * XXX - It would be nice to assert this, but we may still
508 	 * have residual holds from async evictions from the arc...
509 	 *
510 	 * zfs_obj_to_path() also depends on this being
511 	 * commented out.
512 	 *
513 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
514 	 */
515 
516 	/* Undirty next bits */
517 	dn->dn_next_nlevels[txgoff] = 0;
518 	dn->dn_next_indblkshift[txgoff] = 0;
519 	dn->dn_next_blksz[txgoff] = 0;
520 
521 	/* ASSERT(blkptrs are zero); */
522 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
523 	ASSERT(dn->dn_type != DMU_OT_NONE);
524 
525 	ASSERT(dn->dn_free_txg > 0);
526 	if (dn->dn_allocated_txg != dn->dn_free_txg)
527 		dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
528 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
529 
530 	mutex_enter(&dn->dn_mtx);
531 	dn->dn_type = DMU_OT_NONE;
532 	dn->dn_maxblkid = 0;
533 	dn->dn_allocated_txg = 0;
534 	dn->dn_free_txg = 0;
535 	dn->dn_have_spill = B_FALSE;
536 	mutex_exit(&dn->dn_mtx);
537 
538 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
539 
540 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
541 	/*
542 	 * Now that we've released our hold, the dnode may
543 	 * be evicted, so we musn't access it.
544 	 */
545 }
546 
547 /*
548  * Write out the dnode's dirty buffers.
549  */
550 void
551 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
552 {
553 	dnode_phys_t *dnp = dn->dn_phys;
554 	int txgoff = tx->tx_txg & TXG_MASK;
555 	list_t *list = &dn->dn_dirty_records[txgoff];
556 	static const dnode_phys_t zerodn = { 0 };
557 	boolean_t kill_spill = B_FALSE;
558 
559 	ASSERT(dmu_tx_is_syncing(tx));
560 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
561 	ASSERT(dnp->dn_type != DMU_OT_NONE ||
562 	    bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
563 	DNODE_VERIFY(dn);
564 
565 	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
566 
567 	if (dmu_objset_userused_enabled(dn->dn_objset) &&
568 	    !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
569 		mutex_enter(&dn->dn_mtx);
570 		dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
571 		dn->dn_oldflags = dn->dn_phys->dn_flags;
572 		dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
573 		mutex_exit(&dn->dn_mtx);
574 		dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
575 	} else {
576 		/* Once we account for it, we should always account for it. */
577 		ASSERT(!(dn->dn_phys->dn_flags &
578 		    DNODE_FLAG_USERUSED_ACCOUNTED));
579 	}
580 
581 	mutex_enter(&dn->dn_mtx);
582 	if (dn->dn_allocated_txg == tx->tx_txg) {
583 		/* The dnode is newly allocated or reallocated */
584 		if (dnp->dn_type == DMU_OT_NONE) {
585 			/* this is a first alloc, not a realloc */
586 			dnp->dn_nlevels = 1;
587 			dnp->dn_nblkptr = dn->dn_nblkptr;
588 		}
589 
590 		dnp->dn_type = dn->dn_type;
591 		dnp->dn_bonustype = dn->dn_bonustype;
592 		dnp->dn_bonuslen = dn->dn_bonuslen;
593 	}
594 
595 	ASSERT(dnp->dn_nlevels > 1 ||
596 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
597 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
598 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
599 
600 	if (dn->dn_next_type[txgoff] != 0) {
601 		dnp->dn_type = dn->dn_type;
602 		dn->dn_next_type[txgoff] = 0;
603 	}
604 
605 	if (dn->dn_next_blksz[txgoff] != 0) {
606 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
607 		    SPA_MINBLOCKSIZE) == 0);
608 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
609 		    dn->dn_maxblkid == 0 || list_head(list) != NULL ||
610 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
611 		    dnp->dn_datablkszsec ||
612 		    range_tree_space(dn->dn_free_ranges[txgoff]) != 0);
613 		dnp->dn_datablkszsec =
614 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
615 		dn->dn_next_blksz[txgoff] = 0;
616 	}
617 
618 	if (dn->dn_next_bonuslen[txgoff] != 0) {
619 		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
620 			dnp->dn_bonuslen = 0;
621 		else
622 			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
623 		ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
624 		dn->dn_next_bonuslen[txgoff] = 0;
625 	}
626 
627 	if (dn->dn_next_bonustype[txgoff] != 0) {
628 		ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
629 		dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
630 		dn->dn_next_bonustype[txgoff] = 0;
631 	}
632 
633 	boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
634 	    dn->dn_free_txg <= tx->tx_txg;
635 
636 	/*
637 	 * We will either remove a spill block when a file is being removed
638 	 * or we have been asked to remove it.
639 	 */
640 	if (dn->dn_rm_spillblk[txgoff] ||
641 	    ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) && freeing_dnode)) {
642 		if ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
643 			kill_spill = B_TRUE;
644 		dn->dn_rm_spillblk[txgoff] = 0;
645 	}
646 
647 	if (dn->dn_next_indblkshift[txgoff] != 0) {
648 		ASSERT(dnp->dn_nlevels == 1);
649 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
650 		dn->dn_next_indblkshift[txgoff] = 0;
651 	}
652 
653 	/*
654 	 * Just take the live (open-context) values for checksum and compress.
655 	 * Strictly speaking it's a future leak, but nothing bad happens if we
656 	 * start using the new checksum or compress algorithm a little early.
657 	 */
658 	dnp->dn_checksum = dn->dn_checksum;
659 	dnp->dn_compress = dn->dn_compress;
660 
661 	mutex_exit(&dn->dn_mtx);
662 
663 	if (kill_spill) {
664 		free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
665 		mutex_enter(&dn->dn_mtx);
666 		dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
667 		mutex_exit(&dn->dn_mtx);
668 	}
669 
670 	/* process all the "freed" ranges in the file */
671 	if (dn->dn_free_ranges[txgoff] != NULL) {
672 		dnode_sync_free_range_arg_t dsfra;
673 		dsfra.dsfra_dnode = dn;
674 		dsfra.dsfra_tx = tx;
675 		mutex_enter(&dn->dn_mtx);
676 		range_tree_vacate(dn->dn_free_ranges[txgoff],
677 		    dnode_sync_free_range, &dsfra);
678 		range_tree_destroy(dn->dn_free_ranges[txgoff]);
679 		dn->dn_free_ranges[txgoff] = NULL;
680 		mutex_exit(&dn->dn_mtx);
681 	}
682 
683 	if (freeing_dnode) {
684 		dnode_sync_free(dn, tx);
685 		return;
686 	}
687 
688 	if (dn->dn_next_nblkptr[txgoff]) {
689 		/* this should only happen on a realloc */
690 		ASSERT(dn->dn_allocated_txg == tx->tx_txg);
691 		if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
692 			/* zero the new blkptrs we are gaining */
693 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
694 			    sizeof (blkptr_t) *
695 			    (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
696 #ifdef ZFS_DEBUG
697 		} else {
698 			int i;
699 			ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
700 			/* the blkptrs we are losing better be unallocated */
701 			for (i = dn->dn_next_nblkptr[txgoff];
702 			    i < dnp->dn_nblkptr; i++)
703 				ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
704 #endif
705 		}
706 		mutex_enter(&dn->dn_mtx);
707 		dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
708 		dn->dn_next_nblkptr[txgoff] = 0;
709 		mutex_exit(&dn->dn_mtx);
710 	}
711 
712 	if (dn->dn_next_nlevels[txgoff]) {
713 		dnode_increase_indirection(dn, tx);
714 		dn->dn_next_nlevels[txgoff] = 0;
715 	}
716 
717 	dbuf_sync_list(list, tx);
718 
719 	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
720 		ASSERT3P(list_head(list), ==, NULL);
721 		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
722 	}
723 
724 	/*
725 	 * Although we have dropped our reference to the dnode, it
726 	 * can't be evicted until its written, and we haven't yet
727 	 * initiated the IO for the dnode's dbuf.
728 	 */
729 }
730