xref: /titanic_50/usr/src/uts/common/fs/zfs/dnode_sync.c (revision dfb96a4f56fb431b915bc67e5d9d5c8d4f4f6679)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 
37 static void
38 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
39 {
40 	dmu_buf_impl_t *db;
41 	int txgoff = tx->tx_txg & TXG_MASK;
42 	int nblkptr = dn->dn_phys->dn_nblkptr;
43 	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
44 	int new_level = dn->dn_next_nlevels[txgoff];
45 	int i;
46 
47 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
48 
49 	/* this dnode can't be paged out because it's dirty */
50 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
51 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
52 	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
53 
54 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
55 	ASSERT(db != NULL);
56 
57 	dn->dn_phys->dn_nlevels = new_level;
58 	dprintf("os=%p obj=%llu, increase to %d\n",
59 		dn->dn_objset, dn->dn_object,
60 		dn->dn_phys->dn_nlevels);
61 
62 	/* check for existing blkptrs in the dnode */
63 	for (i = 0; i < nblkptr; i++)
64 		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
65 			break;
66 	if (i != nblkptr) {
67 		/* transfer dnode's block pointers to new indirect block */
68 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
69 		ASSERT(db->db.db_data);
70 		ASSERT(arc_released(db->db_buf));
71 		ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
72 		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
73 		    sizeof (blkptr_t) * nblkptr);
74 		arc_buf_freeze(db->db_buf);
75 	}
76 
77 	/* set dbuf's parent pointers to new indirect buf */
78 	for (i = 0; i < nblkptr; i++) {
79 		dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
80 
81 		if (child == NULL)
82 			continue;
83 		ASSERT3P(child->db_dnode, ==, dn);
84 		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
85 			ASSERT(child->db_parent->db_level == db->db_level);
86 			ASSERT(child->db_blkptr !=
87 			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
88 			mutex_exit(&child->db_mtx);
89 			continue;
90 		}
91 		ASSERT(child->db_parent == NULL ||
92 		    child->db_parent == dn->dn_dbuf);
93 
94 		child->db_parent = db;
95 		dbuf_add_ref(db, child);
96 		if (db->db.db_data)
97 			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
98 		else
99 			child->db_blkptr = NULL;
100 		dprintf_dbuf_bp(child, child->db_blkptr,
101 		    "changed db_blkptr to new indirect %s", "");
102 
103 		mutex_exit(&child->db_mtx);
104 	}
105 
106 	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
107 
108 	dbuf_rele(db, FTAG);
109 
110 	rw_exit(&dn->dn_struct_rwlock);
111 }
112 
113 static void
114 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
115 {
116 	objset_impl_t *os = dn->dn_objset;
117 	uint64_t bytesfreed = 0;
118 	int i;
119 
120 	dprintf("os=%p obj=%llx num=%d\n", os, dn->dn_object, num);
121 
122 	for (i = 0; i < num; i++, bp++) {
123 		if (BP_IS_HOLE(bp))
124 			continue;
125 
126 		bytesfreed += bp_get_dasize(os->os_spa, bp);
127 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
128 		dsl_dataset_block_kill(os->os_dsl_dataset, bp, dn->dn_zio, tx);
129 		bzero(bp, sizeof (blkptr_t));
130 	}
131 	dnode_diduse_space(dn, -bytesfreed);
132 }
133 
134 #ifdef ZFS_DEBUG
135 static void
136 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
137 {
138 	int off, num;
139 	int i, err, epbs;
140 	uint64_t txg = tx->tx_txg;
141 
142 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
143 	off = start - (db->db_blkid * 1<<epbs);
144 	num = end - start + 1;
145 
146 	ASSERT3U(off, >=, 0);
147 	ASSERT3U(num, >=, 0);
148 	ASSERT3U(db->db_level, >, 0);
149 	ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift);
150 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
151 	ASSERT(db->db_blkptr != NULL);
152 
153 	for (i = off; i < off+num; i++) {
154 		uint64_t *buf;
155 		dmu_buf_impl_t *child;
156 		dbuf_dirty_record_t *dr;
157 		int j;
158 
159 		ASSERT(db->db_level == 1);
160 
161 		rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER);
162 		err = dbuf_hold_impl(db->db_dnode, db->db_level-1,
163 			(db->db_blkid << epbs) + i, TRUE, FTAG, &child);
164 		rw_exit(&db->db_dnode->dn_struct_rwlock);
165 		if (err == ENOENT)
166 			continue;
167 		ASSERT(err == 0);
168 		ASSERT(child->db_level == 0);
169 		dr = child->db_last_dirty;
170 		while (dr && dr->dr_txg > txg)
171 			dr = dr->dr_next;
172 		ASSERT(dr == NULL || dr->dr_txg == txg);
173 
174 		/* data_old better be zeroed */
175 		if (dr) {
176 			buf = dr->dt.dl.dr_data->b_data;
177 			for (j = 0; j < child->db.db_size >> 3; j++) {
178 				if (buf[j] != 0) {
179 					panic("freed data not zero: "
180 					    "child=%p i=%d off=%d num=%d\n",
181 					    child, i, off, num);
182 				}
183 			}
184 		}
185 
186 		/*
187 		 * db_data better be zeroed unless it's dirty in a
188 		 * future txg.
189 		 */
190 		mutex_enter(&child->db_mtx);
191 		buf = child->db.db_data;
192 		if (buf != NULL && child->db_state != DB_FILL &&
193 		    child->db_last_dirty == NULL) {
194 			for (j = 0; j < child->db.db_size >> 3; j++) {
195 				if (buf[j] != 0) {
196 					panic("freed data not zero: "
197 					    "child=%p i=%d off=%d num=%d\n",
198 					    child, i, off, num);
199 				}
200 			}
201 		}
202 		mutex_exit(&child->db_mtx);
203 
204 		dbuf_rele(child, FTAG);
205 	}
206 }
207 #endif
208 
209 static int
210 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
211     dmu_tx_t *tx)
212 {
213 	dnode_t *dn = db->db_dnode;
214 	blkptr_t *bp;
215 	dmu_buf_impl_t *subdb;
216 	uint64_t start, end, dbstart, dbend, i;
217 	int epbs, shift, err;
218 	int all = TRUE;
219 
220 	(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
221 	arc_release(db->db_buf, db);
222 	bp = (blkptr_t *)db->db.db_data;
223 
224 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
225 	shift = (db->db_level - 1) * epbs;
226 	dbstart = db->db_blkid << epbs;
227 	start = blkid >> shift;
228 	if (dbstart < start) {
229 		bp += start - dbstart;
230 		all = FALSE;
231 	} else {
232 		start = dbstart;
233 	}
234 	dbend = ((db->db_blkid + 1) << epbs) - 1;
235 	end = (blkid + nblks - 1) >> shift;
236 	if (dbend <= end)
237 		end = dbend;
238 	else if (all)
239 		all = trunc;
240 	ASSERT3U(start, <=, end);
241 
242 	if (db->db_level == 1) {
243 		FREE_VERIFY(db, start, end, tx);
244 		free_blocks(dn, bp, end-start+1, tx);
245 		arc_buf_freeze(db->db_buf);
246 		ASSERT(all || db->db_last_dirty);
247 		return (all);
248 	}
249 
250 	for (i = start; i <= end; i++, bp++) {
251 		if (BP_IS_HOLE(bp))
252 			continue;
253 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
254 		err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
255 		ASSERT3U(err, ==, 0);
256 		rw_exit(&dn->dn_struct_rwlock);
257 
258 		if (free_children(subdb, blkid, nblks, trunc, tx)) {
259 			ASSERT3P(subdb->db_blkptr, ==, bp);
260 			free_blocks(dn, bp, 1, tx);
261 		} else {
262 			all = FALSE;
263 		}
264 		dbuf_rele(subdb, FTAG);
265 	}
266 	arc_buf_freeze(db->db_buf);
267 #ifdef ZFS_DEBUG
268 	bp -= (end-start)+1;
269 	for (i = start; i <= end; i++, bp++) {
270 		if (i == start && blkid != 0)
271 			continue;
272 		else if (i == end && !trunc)
273 			continue;
274 		ASSERT3U(bp->blk_birth, ==, 0);
275 	}
276 #endif
277 	ASSERT(all || db->db_last_dirty);
278 	return (all);
279 }
280 
281 /*
282  * free_range: Traverse the indicated range of the provided file
283  * and "free" all the blocks contained there.
284  */
285 static void
286 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
287 {
288 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
289 	dmu_buf_impl_t *db;
290 	int trunc, start, end, shift, i, err;
291 	int dnlevel = dn->dn_phys->dn_nlevels;
292 
293 	if (blkid > dn->dn_phys->dn_maxblkid)
294 		return;
295 
296 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
297 	trunc = blkid + nblks > dn->dn_phys->dn_maxblkid;
298 	if (trunc)
299 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
300 
301 	/* There are no indirect blocks in the object */
302 	if (dnlevel == 1) {
303 		if (blkid >= dn->dn_phys->dn_nblkptr) {
304 			/* this range was never made persistent */
305 			return;
306 		}
307 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
308 		free_blocks(dn, bp + blkid, nblks, tx);
309 		if (trunc) {
310 			uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
311 			    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
312 			dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
313 			ASSERT(off < dn->dn_phys->dn_maxblkid ||
314 			    dn->dn_phys->dn_maxblkid == 0 ||
315 			    dnode_next_offset(dn, FALSE, &off,
316 			    1, 1, 0) != 0);
317 		}
318 		return;
319 	}
320 
321 	shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
322 	start = blkid >> shift;
323 	ASSERT(start < dn->dn_phys->dn_nblkptr);
324 	end = (blkid + nblks - 1) >> shift;
325 	bp += start;
326 	for (i = start; i <= end; i++, bp++) {
327 		if (BP_IS_HOLE(bp))
328 			continue;
329 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
330 		err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
331 		ASSERT3U(err, ==, 0);
332 		rw_exit(&dn->dn_struct_rwlock);
333 
334 		if (free_children(db, blkid, nblks, trunc, tx)) {
335 			ASSERT3P(db->db_blkptr, ==, bp);
336 			free_blocks(dn, bp, 1, tx);
337 		}
338 		dbuf_rele(db, FTAG);
339 	}
340 	if (trunc) {
341 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
342 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
343 		dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
344 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
345 		    dn->dn_phys->dn_maxblkid == 0 ||
346 		    dnode_next_offset(dn, FALSE, &off, 1, 1, 0) != 0);
347 	}
348 }
349 
350 /*
351  * Try to kick all the dnodes dbufs out of the cache...
352  */
353 int
354 dnode_evict_dbufs(dnode_t *dn, int try)
355 {
356 	int progress;
357 	int pass = 0;
358 
359 	do {
360 		dmu_buf_impl_t *db, marker;
361 		int evicting = FALSE;
362 
363 		progress = FALSE;
364 		mutex_enter(&dn->dn_dbufs_mtx);
365 		list_insert_tail(&dn->dn_dbufs, &marker);
366 		db = list_head(&dn->dn_dbufs);
367 		for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
368 			list_remove(&dn->dn_dbufs, db);
369 			list_insert_tail(&dn->dn_dbufs, db);
370 
371 			mutex_enter(&db->db_mtx);
372 			if (db->db_state == DB_EVICTING) {
373 				progress = TRUE;
374 				evicting = TRUE;
375 				mutex_exit(&db->db_mtx);
376 			} else if (refcount_is_zero(&db->db_holds)) {
377 				progress = TRUE;
378 				ASSERT(!arc_released(db->db_buf));
379 				dbuf_clear(db); /* exits db_mtx for us */
380 			} else {
381 				mutex_exit(&db->db_mtx);
382 			}
383 
384 		}
385 		list_remove(&dn->dn_dbufs, &marker);
386 		/*
387 		 * NB: we need to drop dn_dbufs_mtx between passes so
388 		 * that any DB_EVICTING dbufs can make progress.
389 		 * Ideally, we would have some cv we could wait on, but
390 		 * since we don't, just wait a bit to give the other
391 		 * thread a chance to run.
392 		 */
393 		mutex_exit(&dn->dn_dbufs_mtx);
394 		if (evicting)
395 			delay(1);
396 		pass++;
397 		ASSERT(pass < 100); /* sanity check */
398 	} while (progress);
399 
400 	/*
401 	 * This function works fine even if it can't evict everything.
402 	 * If were only asked to try to evict everything then
403 	 * return an error if we can't. Otherwise panic as the caller
404 	 * expects total eviction.
405 	 */
406 	if (list_head(&dn->dn_dbufs) != NULL) {
407 		if (try) {
408 			return (1);
409 		} else {
410 			panic("dangling dbufs (dn=%p, dbuf=%p)\n",
411 			    dn, list_head(&dn->dn_dbufs));
412 		}
413 	}
414 
415 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
416 	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
417 		mutex_enter(&dn->dn_bonus->db_mtx);
418 		dbuf_evict(dn->dn_bonus);
419 		dn->dn_bonus = NULL;
420 	}
421 	rw_exit(&dn->dn_struct_rwlock);
422 	return (0);
423 }
424 
425 static void
426 dnode_undirty_dbufs(list_t *list)
427 {
428 	dbuf_dirty_record_t *dr;
429 
430 	while (dr = list_head(list)) {
431 		dmu_buf_impl_t *db = dr->dr_dbuf;
432 		uint64_t txg = dr->dr_txg;
433 
434 		mutex_enter(&db->db_mtx);
435 		/* XXX - use dbuf_undirty()? */
436 		list_remove(list, dr);
437 		ASSERT(db->db_last_dirty == dr);
438 		db->db_last_dirty = NULL;
439 		db->db_dirtycnt -= 1;
440 		if (db->db_level == 0) {
441 			ASSERT(db->db_blkid == DB_BONUS_BLKID ||
442 			    dr->dt.dl.dr_data == db->db_buf);
443 			dbuf_unoverride(dr);
444 			mutex_exit(&db->db_mtx);
445 		} else {
446 			mutex_exit(&db->db_mtx);
447 			dnode_undirty_dbufs(&dr->dt.di.dr_children);
448 		}
449 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
450 		dbuf_rele(db, (void *)(uintptr_t)txg);
451 	}
452 }
453 
454 static void
455 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
456 {
457 	int txgoff = tx->tx_txg & TXG_MASK;
458 
459 	ASSERT(dmu_tx_is_syncing(tx));
460 
461 	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
462 	(void) dnode_evict_dbufs(dn, 0);
463 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
464 
465 	/*
466 	 * XXX - It would be nice to assert this, but we may still
467 	 * have residual holds from async evictions from the arc...
468 	 *
469 	 * zfs_obj_to_path() also depends on this being
470 	 * commented out.
471 	 *
472 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
473 	 */
474 
475 	/* Undirty next bits */
476 	dn->dn_next_nlevels[txgoff] = 0;
477 	dn->dn_next_indblkshift[txgoff] = 0;
478 	dn->dn_next_blksz[txgoff] = 0;
479 
480 	/* free up all the blocks in the file. */
481 	dnode_sync_free_range(dn, 0, dn->dn_phys->dn_maxblkid+1, tx);
482 	ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
483 
484 	/* ASSERT(blkptrs are zero); */
485 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
486 	ASSERT(dn->dn_type != DMU_OT_NONE);
487 
488 	ASSERT(dn->dn_free_txg > 0);
489 	if (dn->dn_allocated_txg != dn->dn_free_txg)
490 		dbuf_will_dirty(dn->dn_dbuf, tx);
491 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
492 
493 	mutex_enter(&dn->dn_mtx);
494 	dn->dn_type = DMU_OT_NONE;
495 	dn->dn_maxblkid = 0;
496 	dn->dn_allocated_txg = 0;
497 	mutex_exit(&dn->dn_mtx);
498 
499 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
500 
501 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
502 	/*
503 	 * Now that we've released our hold, the dnode may
504 	 * be evicted, so we musn't access it.
505 	 */
506 }
507 
508 /*
509  * Write out the dnode's dirty buffers.
510  *
511  * NOTE: The dnode is kept in memory by being dirty.  Once the
512  * dirty bit is cleared, it may be evicted.  Beware of this!
513  */
514 void
515 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
516 {
517 	free_range_t *rp;
518 	dnode_phys_t *dnp = dn->dn_phys;
519 	int txgoff = tx->tx_txg & TXG_MASK;
520 	list_t *list = &dn->dn_dirty_records[txgoff];
521 
522 	ASSERT(dmu_tx_is_syncing(tx));
523 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
524 	DNODE_VERIFY(dn);
525 
526 	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
527 
528 	mutex_enter(&dn->dn_mtx);
529 	if (dn->dn_allocated_txg == tx->tx_txg) {
530 		/* The dnode is newly allocated or reallocated */
531 		if (dnp->dn_type == DMU_OT_NONE) {
532 			/* this is a first alloc, not a realloc */
533 			/* XXX shouldn't the phys already be zeroed? */
534 			bzero(dnp, DNODE_CORE_SIZE);
535 			dnp->dn_nlevels = 1;
536 		}
537 
538 		if (dn->dn_nblkptr > dnp->dn_nblkptr) {
539 			/* zero the new blkptrs we are gaining */
540 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
541 			    sizeof (blkptr_t) *
542 			    (dn->dn_nblkptr - dnp->dn_nblkptr));
543 		}
544 		dnp->dn_type = dn->dn_type;
545 		dnp->dn_bonustype = dn->dn_bonustype;
546 		dnp->dn_bonuslen = dn->dn_bonuslen;
547 		dnp->dn_nblkptr = dn->dn_nblkptr;
548 	}
549 
550 	ASSERT(dnp->dn_nlevels > 1 ||
551 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
552 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
553 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
554 
555 	if (dn->dn_next_blksz[txgoff]) {
556 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
557 		    SPA_MINBLOCKSIZE) == 0);
558 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
559 		    list_head(list) != NULL ||
560 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
561 		    dnp->dn_datablkszsec);
562 		dnp->dn_datablkszsec =
563 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
564 		dn->dn_next_blksz[txgoff] = 0;
565 	}
566 
567 	if (dn->dn_next_indblkshift[txgoff]) {
568 		ASSERT(dnp->dn_nlevels == 1);
569 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
570 		dn->dn_next_indblkshift[txgoff] = 0;
571 	}
572 
573 	/*
574 	 * Just take the live (open-context) values for checksum and compress.
575 	 * Strictly speaking it's a future leak, but nothing bad happens if we
576 	 * start using the new checksum or compress algorithm a little early.
577 	 */
578 	dnp->dn_checksum = dn->dn_checksum;
579 	dnp->dn_compress = dn->dn_compress;
580 
581 	mutex_exit(&dn->dn_mtx);
582 
583 	/* process all the "freed" ranges in the file */
584 	if (dn->dn_free_txg == 0 || dn->dn_free_txg > tx->tx_txg) {
585 		for (rp = avl_last(&dn->dn_ranges[txgoff]); rp != NULL;
586 		    rp = AVL_PREV(&dn->dn_ranges[txgoff], rp))
587 			dnode_sync_free_range(dn,
588 			    rp->fr_blkid, rp->fr_nblks, tx);
589 	}
590 	mutex_enter(&dn->dn_mtx);
591 	for (rp = avl_first(&dn->dn_ranges[txgoff]); rp; ) {
592 		free_range_t *last = rp;
593 		rp = AVL_NEXT(&dn->dn_ranges[txgoff], rp);
594 		avl_remove(&dn->dn_ranges[txgoff], last);
595 		kmem_free(last, sizeof (free_range_t));
596 	}
597 	mutex_exit(&dn->dn_mtx);
598 
599 	if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) {
600 		dnode_sync_free(dn, tx);
601 		return;
602 	}
603 
604 	if (dn->dn_next_nlevels[txgoff]) {
605 		dnode_increase_indirection(dn, tx);
606 		dn->dn_next_nlevels[txgoff] = 0;
607 	}
608 
609 	dbuf_sync_list(list, tx);
610 
611 	if (dn->dn_object != DMU_META_DNODE_OBJECT) {
612 		ASSERT3P(list_head(list), ==, NULL);
613 		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
614 	}
615 
616 	/*
617 	 * Although we have dropped our reference to the dnode, it
618 	 * can't be evicted until its written, and we haven't yet
619 	 * initiated the IO for the dnode's dbuf.
620 	 */
621 }
622