xref: /titanic_50/usr/src/uts/common/fs/zfs/dnode_sync.c (revision ef1d07349e941417706ea6d639bac69cb863b2f8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2013 by Delphix. All rights reserved.
25  */
26 
27 #include <sys/zfs_context.h>
28 #include <sys/dbuf.h>
29 #include <sys/dnode.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dataset.h>
34 #include <sys/spa.h>
35 #include <sys/zfeature.h>
36 
37 static void
38 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
39 {
40 	dmu_buf_impl_t *db;
41 	int txgoff = tx->tx_txg & TXG_MASK;
42 	int nblkptr = dn->dn_phys->dn_nblkptr;
43 	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
44 	int new_level = dn->dn_next_nlevels[txgoff];
45 	int i;
46 
47 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
48 
49 	/* this dnode can't be paged out because it's dirty */
50 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
51 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
52 	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
53 
54 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
55 	ASSERT(db != NULL);
56 
57 	dn->dn_phys->dn_nlevels = new_level;
58 	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
59 	    dn->dn_object, dn->dn_phys->dn_nlevels);
60 
61 	/* check for existing blkptrs in the dnode */
62 	for (i = 0; i < nblkptr; i++)
63 		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
64 			break;
65 	if (i != nblkptr) {
66 		/* transfer dnode's block pointers to new indirect block */
67 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
68 		ASSERT(db->db.db_data);
69 		ASSERT(arc_released(db->db_buf));
70 		ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
71 		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
72 		    sizeof (blkptr_t) * nblkptr);
73 		arc_buf_freeze(db->db_buf);
74 	}
75 
76 	/* set dbuf's parent pointers to new indirect buf */
77 	for (i = 0; i < nblkptr; i++) {
78 		dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
79 
80 		if (child == NULL)
81 			continue;
82 #ifdef	DEBUG
83 		DB_DNODE_ENTER(child);
84 		ASSERT3P(DB_DNODE(child), ==, dn);
85 		DB_DNODE_EXIT(child);
86 #endif	/* DEBUG */
87 		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
88 			ASSERT(child->db_parent->db_level == db->db_level);
89 			ASSERT(child->db_blkptr !=
90 			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
91 			mutex_exit(&child->db_mtx);
92 			continue;
93 		}
94 		ASSERT(child->db_parent == NULL ||
95 		    child->db_parent == dn->dn_dbuf);
96 
97 		child->db_parent = db;
98 		dbuf_add_ref(db, child);
99 		if (db->db.db_data)
100 			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
101 		else
102 			child->db_blkptr = NULL;
103 		dprintf_dbuf_bp(child, child->db_blkptr,
104 		    "changed db_blkptr to new indirect %s", "");
105 
106 		mutex_exit(&child->db_mtx);
107 	}
108 
109 	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
110 
111 	dbuf_rele(db, FTAG);
112 
113 	rw_exit(&dn->dn_struct_rwlock);
114 }
115 
116 static void
117 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
118 {
119 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
120 	uint64_t bytesfreed = 0;
121 
122 	dprintf("ds=%p obj=%llx num=%d\n", ds, dn->dn_object, num);
123 
124 	for (int i = 0; i < num; i++, bp++) {
125 		if (BP_IS_HOLE(bp))
126 			continue;
127 
128 		bytesfreed += dsl_dataset_block_kill(ds, bp, tx, B_FALSE);
129 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
130 
131 		/*
132 		 * Save some useful information on the holes being
133 		 * punched, including logical size, type, and indirection
134 		 * level. Retaining birth time enables detection of when
135 		 * holes are punched for reducing the number of free
136 		 * records transmitted during a zfs send.
137 		 */
138 
139 		uint64_t lsize = BP_GET_LSIZE(bp);
140 		dmu_object_type_t type = BP_GET_TYPE(bp);
141 		uint64_t lvl = BP_GET_LEVEL(bp);
142 
143 		bzero(bp, sizeof (blkptr_t));
144 
145 		if (spa_feature_is_active(dn->dn_objset->os_spa,
146 		    SPA_FEATURE_HOLE_BIRTH)) {
147 			BP_SET_LSIZE(bp, lsize);
148 			BP_SET_TYPE(bp, type);
149 			BP_SET_LEVEL(bp, lvl);
150 			BP_SET_BIRTH(bp, dmu_tx_get_txg(tx), 0);
151 		}
152 	}
153 	dnode_diduse_space(dn, -bytesfreed);
154 }
155 
156 #ifdef ZFS_DEBUG
157 static void
158 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
159 {
160 	int off, num;
161 	int i, err, epbs;
162 	uint64_t txg = tx->tx_txg;
163 	dnode_t *dn;
164 
165 	DB_DNODE_ENTER(db);
166 	dn = DB_DNODE(db);
167 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
168 	off = start - (db->db_blkid * 1<<epbs);
169 	num = end - start + 1;
170 
171 	ASSERT3U(off, >=, 0);
172 	ASSERT3U(num, >=, 0);
173 	ASSERT3U(db->db_level, >, 0);
174 	ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
175 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
176 	ASSERT(db->db_blkptr != NULL);
177 
178 	for (i = off; i < off+num; i++) {
179 		uint64_t *buf;
180 		dmu_buf_impl_t *child;
181 		dbuf_dirty_record_t *dr;
182 		int j;
183 
184 		ASSERT(db->db_level == 1);
185 
186 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
187 		err = dbuf_hold_impl(dn, db->db_level-1,
188 		    (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
189 		rw_exit(&dn->dn_struct_rwlock);
190 		if (err == ENOENT)
191 			continue;
192 		ASSERT(err == 0);
193 		ASSERT(child->db_level == 0);
194 		dr = child->db_last_dirty;
195 		while (dr && dr->dr_txg > txg)
196 			dr = dr->dr_next;
197 		ASSERT(dr == NULL || dr->dr_txg == txg);
198 
199 		/* data_old better be zeroed */
200 		if (dr) {
201 			buf = dr->dt.dl.dr_data->b_data;
202 			for (j = 0; j < child->db.db_size >> 3; j++) {
203 				if (buf[j] != 0) {
204 					panic("freed data not zero: "
205 					    "child=%p i=%d off=%d num=%d\n",
206 					    (void *)child, i, off, num);
207 				}
208 			}
209 		}
210 
211 		/*
212 		 * db_data better be zeroed unless it's dirty in a
213 		 * future txg.
214 		 */
215 		mutex_enter(&child->db_mtx);
216 		buf = child->db.db_data;
217 		if (buf != NULL && child->db_state != DB_FILL &&
218 		    child->db_last_dirty == NULL) {
219 			for (j = 0; j < child->db.db_size >> 3; j++) {
220 				if (buf[j] != 0) {
221 					panic("freed data not zero: "
222 					    "child=%p i=%d off=%d num=%d\n",
223 					    (void *)child, i, off, num);
224 				}
225 			}
226 		}
227 		mutex_exit(&child->db_mtx);
228 
229 		dbuf_rele(child, FTAG);
230 	}
231 	DB_DNODE_EXIT(db);
232 }
233 #endif
234 
235 #define	ALL -1
236 
237 static void
238 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks,
239     dmu_tx_t *tx)
240 {
241 	dnode_t *dn;
242 	blkptr_t *bp;
243 	dmu_buf_impl_t *subdb;
244 	uint64_t start, end, dbstart, dbend, i;
245 	int epbs, shift;
246 
247 	/*
248 	 * There is a small possibility that this block will not be cached:
249 	 *   1 - if level > 1 and there are no children with level <= 1
250 	 *   2 - if this block was evicted since we read it from
251 	 *	 dmu_tx_hold_free().
252 	 */
253 	if (db->db_state != DB_CACHED)
254 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
255 
256 	dbuf_release_bp(db);
257 	bp = db->db.db_data;
258 
259 	DB_DNODE_ENTER(db);
260 	dn = DB_DNODE(db);
261 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
262 	shift = (db->db_level - 1) * epbs;
263 	dbstart = db->db_blkid << epbs;
264 	start = blkid >> shift;
265 	if (dbstart < start) {
266 		bp += start - dbstart;
267 	} else {
268 		start = dbstart;
269 	}
270 	dbend = ((db->db_blkid + 1) << epbs) - 1;
271 	end = (blkid + nblks - 1) >> shift;
272 	if (dbend <= end)
273 		end = dbend;
274 
275 	ASSERT3U(start, <=, end);
276 
277 	if (db->db_level == 1) {
278 		FREE_VERIFY(db, start, end, tx);
279 		free_blocks(dn, bp, end-start+1, tx);
280 	} else {
281 		for (i = start; i <= end; i++, bp++) {
282 			if (BP_IS_HOLE(bp))
283 				continue;
284 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
285 			VERIFY0(dbuf_hold_impl(dn, db->db_level - 1,
286 			    i, B_TRUE, FTAG, &subdb));
287 			rw_exit(&dn->dn_struct_rwlock);
288 			ASSERT3P(bp, ==, subdb->db_blkptr);
289 
290 			free_children(subdb, blkid, nblks, tx);
291 			dbuf_rele(subdb, FTAG);
292 		}
293 	}
294 
295 	/* If this whole block is free, free ourself too. */
296 	for (i = 0, bp = db->db.db_data; i < 1 << epbs; i++, bp++) {
297 		if (!BP_IS_HOLE(bp))
298 			break;
299 	}
300 	if (i == 1 << epbs) {
301 		/* didn't find any non-holes */
302 		bzero(db->db.db_data, db->db.db_size);
303 		free_blocks(dn, db->db_blkptr, 1, tx);
304 	} else {
305 		/*
306 		 * Partial block free; must be marked dirty so that it
307 		 * will be written out.
308 		 */
309 		ASSERT(db->db_dirtycnt > 0);
310 	}
311 
312 	DB_DNODE_EXIT(db);
313 	arc_buf_freeze(db->db_buf);
314 }
315 
316 /*
317  * Traverse the indicated range of the provided file
318  * and "free" all the blocks contained there.
319  */
320 static void
321 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks,
322     dmu_tx_t *tx)
323 {
324 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
325 	int dnlevel = dn->dn_phys->dn_nlevels;
326 	boolean_t trunc = B_FALSE;
327 
328 	if (blkid > dn->dn_phys->dn_maxblkid)
329 		return;
330 
331 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
332 	if (blkid + nblks > dn->dn_phys->dn_maxblkid) {
333 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
334 		trunc = B_TRUE;
335 	}
336 
337 	/* There are no indirect blocks in the object */
338 	if (dnlevel == 1) {
339 		if (blkid >= dn->dn_phys->dn_nblkptr) {
340 			/* this range was never made persistent */
341 			return;
342 		}
343 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
344 		free_blocks(dn, bp + blkid, nblks, tx);
345 	} else {
346 		int shift = (dnlevel - 1) *
347 		    (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
348 		int start = blkid >> shift;
349 		int end = (blkid + nblks - 1) >> shift;
350 		dmu_buf_impl_t *db;
351 
352 		ASSERT(start < dn->dn_phys->dn_nblkptr);
353 		bp += start;
354 		for (int i = start; i <= end; i++, bp++) {
355 			if (BP_IS_HOLE(bp))
356 				continue;
357 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
358 			VERIFY0(dbuf_hold_impl(dn, dnlevel - 1, i,
359 			    TRUE, FTAG, &db));
360 			rw_exit(&dn->dn_struct_rwlock);
361 
362 			free_children(db, blkid, nblks, tx);
363 			dbuf_rele(db, FTAG);
364 
365 		}
366 	}
367 
368 	if (trunc) {
369 		dn->dn_phys->dn_maxblkid = blkid == 0 ? 0 : blkid - 1;
370 
371 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
372 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
373 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
374 		    dn->dn_phys->dn_maxblkid == 0 ||
375 		    dnode_next_offset(dn, 0, &off, 1, 1, 0) != 0);
376 	}
377 }
378 
379 /*
380  * Try to kick all the dnode's dbufs out of the cache...
381  */
382 void
383 dnode_evict_dbufs(dnode_t *dn)
384 {
385 	int progress;
386 	int pass = 0;
387 
388 	do {
389 		dmu_buf_impl_t *db, marker;
390 		int evicting = FALSE;
391 
392 		progress = FALSE;
393 		mutex_enter(&dn->dn_dbufs_mtx);
394 		list_insert_tail(&dn->dn_dbufs, &marker);
395 		db = list_head(&dn->dn_dbufs);
396 		for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
397 			list_remove(&dn->dn_dbufs, db);
398 			list_insert_tail(&dn->dn_dbufs, db);
399 #ifdef	DEBUG
400 			DB_DNODE_ENTER(db);
401 			ASSERT3P(DB_DNODE(db), ==, dn);
402 			DB_DNODE_EXIT(db);
403 #endif	/* DEBUG */
404 
405 			mutex_enter(&db->db_mtx);
406 			if (db->db_state == DB_EVICTING) {
407 				progress = TRUE;
408 				evicting = TRUE;
409 				mutex_exit(&db->db_mtx);
410 			} else if (refcount_is_zero(&db->db_holds)) {
411 				progress = TRUE;
412 				dbuf_clear(db); /* exits db_mtx for us */
413 			} else {
414 				mutex_exit(&db->db_mtx);
415 			}
416 
417 		}
418 		list_remove(&dn->dn_dbufs, &marker);
419 		/*
420 		 * NB: we need to drop dn_dbufs_mtx between passes so
421 		 * that any DB_EVICTING dbufs can make progress.
422 		 * Ideally, we would have some cv we could wait on, but
423 		 * since we don't, just wait a bit to give the other
424 		 * thread a chance to run.
425 		 */
426 		mutex_exit(&dn->dn_dbufs_mtx);
427 		if (evicting)
428 			delay(1);
429 		pass++;
430 		ASSERT(pass < 100); /* sanity check */
431 	} while (progress);
432 
433 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
434 	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
435 		mutex_enter(&dn->dn_bonus->db_mtx);
436 		dbuf_evict(dn->dn_bonus);
437 		dn->dn_bonus = NULL;
438 	}
439 	rw_exit(&dn->dn_struct_rwlock);
440 }
441 
442 static void
443 dnode_undirty_dbufs(list_t *list)
444 {
445 	dbuf_dirty_record_t *dr;
446 
447 	while (dr = list_head(list)) {
448 		dmu_buf_impl_t *db = dr->dr_dbuf;
449 		uint64_t txg = dr->dr_txg;
450 
451 		if (db->db_level != 0)
452 			dnode_undirty_dbufs(&dr->dt.di.dr_children);
453 
454 		mutex_enter(&db->db_mtx);
455 		/* XXX - use dbuf_undirty()? */
456 		list_remove(list, dr);
457 		ASSERT(db->db_last_dirty == dr);
458 		db->db_last_dirty = NULL;
459 		db->db_dirtycnt -= 1;
460 		if (db->db_level == 0) {
461 			ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
462 			    dr->dt.dl.dr_data == db->db_buf);
463 			dbuf_unoverride(dr);
464 		}
465 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
466 		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
467 	}
468 }
469 
470 static void
471 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
472 {
473 	int txgoff = tx->tx_txg & TXG_MASK;
474 
475 	ASSERT(dmu_tx_is_syncing(tx));
476 
477 	/*
478 	 * Our contents should have been freed in dnode_sync() by the
479 	 * free range record inserted by the caller of dnode_free().
480 	 */
481 	ASSERT0(DN_USED_BYTES(dn->dn_phys));
482 	ASSERT(BP_IS_HOLE(dn->dn_phys->dn_blkptr));
483 
484 	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
485 	dnode_evict_dbufs(dn);
486 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
487 	ASSERT3P(dn->dn_bonus, ==, NULL);
488 
489 	/*
490 	 * XXX - It would be nice to assert this, but we may still
491 	 * have residual holds from async evictions from the arc...
492 	 *
493 	 * zfs_obj_to_path() also depends on this being
494 	 * commented out.
495 	 *
496 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
497 	 */
498 
499 	/* Undirty next bits */
500 	dn->dn_next_nlevels[txgoff] = 0;
501 	dn->dn_next_indblkshift[txgoff] = 0;
502 	dn->dn_next_blksz[txgoff] = 0;
503 
504 	/* ASSERT(blkptrs are zero); */
505 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
506 	ASSERT(dn->dn_type != DMU_OT_NONE);
507 
508 	ASSERT(dn->dn_free_txg > 0);
509 	if (dn->dn_allocated_txg != dn->dn_free_txg)
510 		dmu_buf_will_dirty(&dn->dn_dbuf->db, tx);
511 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
512 
513 	mutex_enter(&dn->dn_mtx);
514 	dn->dn_type = DMU_OT_NONE;
515 	dn->dn_maxblkid = 0;
516 	dn->dn_allocated_txg = 0;
517 	dn->dn_free_txg = 0;
518 	dn->dn_have_spill = B_FALSE;
519 	mutex_exit(&dn->dn_mtx);
520 
521 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
522 
523 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
524 	/*
525 	 * Now that we've released our hold, the dnode may
526 	 * be evicted, so we musn't access it.
527 	 */
528 }
529 
530 /*
531  * Write out the dnode's dirty buffers.
532  */
533 void
534 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
535 {
536 	free_range_t *rp;
537 	dnode_phys_t *dnp = dn->dn_phys;
538 	int txgoff = tx->tx_txg & TXG_MASK;
539 	list_t *list = &dn->dn_dirty_records[txgoff];
540 	static const dnode_phys_t zerodn = { 0 };
541 	boolean_t kill_spill = B_FALSE;
542 
543 	ASSERT(dmu_tx_is_syncing(tx));
544 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
545 	ASSERT(dnp->dn_type != DMU_OT_NONE ||
546 	    bcmp(dnp, &zerodn, DNODE_SIZE) == 0);
547 	DNODE_VERIFY(dn);
548 
549 	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
550 
551 	if (dmu_objset_userused_enabled(dn->dn_objset) &&
552 	    !DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
553 		mutex_enter(&dn->dn_mtx);
554 		dn->dn_oldused = DN_USED_BYTES(dn->dn_phys);
555 		dn->dn_oldflags = dn->dn_phys->dn_flags;
556 		dn->dn_phys->dn_flags |= DNODE_FLAG_USERUSED_ACCOUNTED;
557 		mutex_exit(&dn->dn_mtx);
558 		dmu_objset_userquota_get_ids(dn, B_FALSE, tx);
559 	} else {
560 		/* Once we account for it, we should always account for it. */
561 		ASSERT(!(dn->dn_phys->dn_flags &
562 		    DNODE_FLAG_USERUSED_ACCOUNTED));
563 	}
564 
565 	mutex_enter(&dn->dn_mtx);
566 	if (dn->dn_allocated_txg == tx->tx_txg) {
567 		/* The dnode is newly allocated or reallocated */
568 		if (dnp->dn_type == DMU_OT_NONE) {
569 			/* this is a first alloc, not a realloc */
570 			dnp->dn_nlevels = 1;
571 			dnp->dn_nblkptr = dn->dn_nblkptr;
572 		}
573 
574 		dnp->dn_type = dn->dn_type;
575 		dnp->dn_bonustype = dn->dn_bonustype;
576 		dnp->dn_bonuslen = dn->dn_bonuslen;
577 	}
578 
579 	ASSERT(dnp->dn_nlevels > 1 ||
580 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
581 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
582 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
583 
584 	if (dn->dn_next_type[txgoff] != 0) {
585 		dnp->dn_type = dn->dn_type;
586 		dn->dn_next_type[txgoff] = 0;
587 	}
588 
589 	if (dn->dn_next_blksz[txgoff] != 0) {
590 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
591 		    SPA_MINBLOCKSIZE) == 0);
592 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
593 		    dn->dn_maxblkid == 0 || list_head(list) != NULL ||
594 		    avl_last(&dn->dn_ranges[txgoff]) ||
595 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
596 		    dnp->dn_datablkszsec);
597 		dnp->dn_datablkszsec =
598 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
599 		dn->dn_next_blksz[txgoff] = 0;
600 	}
601 
602 	if (dn->dn_next_bonuslen[txgoff] != 0) {
603 		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
604 			dnp->dn_bonuslen = 0;
605 		else
606 			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
607 		ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
608 		dn->dn_next_bonuslen[txgoff] = 0;
609 	}
610 
611 	if (dn->dn_next_bonustype[txgoff] != 0) {
612 		ASSERT(DMU_OT_IS_VALID(dn->dn_next_bonustype[txgoff]));
613 		dnp->dn_bonustype = dn->dn_next_bonustype[txgoff];
614 		dn->dn_next_bonustype[txgoff] = 0;
615 	}
616 
617 	boolean_t freeing_dnode = dn->dn_free_txg > 0 &&
618 	    dn->dn_free_txg <= tx->tx_txg;
619 
620 	/*
621 	 * We will either remove a spill block when a file is being removed
622 	 * or we have been asked to remove it.
623 	 */
624 	if (dn->dn_rm_spillblk[txgoff] ||
625 	    ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) && freeing_dnode)) {
626 		if ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
627 			kill_spill = B_TRUE;
628 		dn->dn_rm_spillblk[txgoff] = 0;
629 	}
630 
631 	if (dn->dn_next_indblkshift[txgoff] != 0) {
632 		ASSERT(dnp->dn_nlevels == 1);
633 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
634 		dn->dn_next_indblkshift[txgoff] = 0;
635 	}
636 
637 	/*
638 	 * Just take the live (open-context) values for checksum and compress.
639 	 * Strictly speaking it's a future leak, but nothing bad happens if we
640 	 * start using the new checksum or compress algorithm a little early.
641 	 */
642 	dnp->dn_checksum = dn->dn_checksum;
643 	dnp->dn_compress = dn->dn_compress;
644 
645 	mutex_exit(&dn->dn_mtx);
646 
647 	if (kill_spill) {
648 		free_blocks(dn, &dn->dn_phys->dn_spill, 1, tx);
649 		mutex_enter(&dn->dn_mtx);
650 		dnp->dn_flags &= ~DNODE_FLAG_SPILL_BLKPTR;
651 		mutex_exit(&dn->dn_mtx);
652 	}
653 
654 	/* process all the "freed" ranges in the file */
655 	while (rp = avl_last(&dn->dn_ranges[txgoff])) {
656 		dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx);
657 		/* grab the mutex so we don't race with dnode_block_freed() */
658 		mutex_enter(&dn->dn_mtx);
659 		avl_remove(&dn->dn_ranges[txgoff], rp);
660 		mutex_exit(&dn->dn_mtx);
661 		kmem_free(rp, sizeof (free_range_t));
662 	}
663 
664 	if (freeing_dnode) {
665 		dnode_sync_free(dn, tx);
666 		return;
667 	}
668 
669 	if (dn->dn_next_nblkptr[txgoff]) {
670 		/* this should only happen on a realloc */
671 		ASSERT(dn->dn_allocated_txg == tx->tx_txg);
672 		if (dn->dn_next_nblkptr[txgoff] > dnp->dn_nblkptr) {
673 			/* zero the new blkptrs we are gaining */
674 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
675 			    sizeof (blkptr_t) *
676 			    (dn->dn_next_nblkptr[txgoff] - dnp->dn_nblkptr));
677 #ifdef ZFS_DEBUG
678 		} else {
679 			int i;
680 			ASSERT(dn->dn_next_nblkptr[txgoff] < dnp->dn_nblkptr);
681 			/* the blkptrs we are losing better be unallocated */
682 			for (i = dn->dn_next_nblkptr[txgoff];
683 			    i < dnp->dn_nblkptr; i++)
684 				ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[i]));
685 #endif
686 		}
687 		mutex_enter(&dn->dn_mtx);
688 		dnp->dn_nblkptr = dn->dn_next_nblkptr[txgoff];
689 		dn->dn_next_nblkptr[txgoff] = 0;
690 		mutex_exit(&dn->dn_mtx);
691 	}
692 
693 	if (dn->dn_next_nlevels[txgoff]) {
694 		dnode_increase_indirection(dn, tx);
695 		dn->dn_next_nlevels[txgoff] = 0;
696 	}
697 
698 	dbuf_sync_list(list, tx);
699 
700 	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
701 		ASSERT3P(list_head(list), ==, NULL);
702 		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
703 	}
704 
705 	/*
706 	 * Although we have dropped our reference to the dnode, it
707 	 * can't be evicted until its written, and we haven't yet
708 	 * initiated the IO for the dnode's dbuf.
709 	 */
710 }
711