xref: /titanic_50/usr/src/uts/common/fs/zfs/dnode_sync.c (revision 936b7af69172dce89b577831f79c0e18d15e854b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 
37 static void
38 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
39 {
40 	dmu_buf_impl_t *db;
41 	int txgoff = tx->tx_txg & TXG_MASK;
42 	int nblkptr = dn->dn_phys->dn_nblkptr;
43 	int old_toplvl = dn->dn_phys->dn_nlevels - 1;
44 	int new_level = dn->dn_next_nlevels[txgoff];
45 	int i;
46 
47 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
48 
49 	/* this dnode can't be paged out because it's dirty */
50 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
51 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
52 	ASSERT(new_level > 1 && dn->dn_phys->dn_nlevels > 0);
53 
54 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
55 	ASSERT(db != NULL);
56 
57 	dn->dn_phys->dn_nlevels = new_level;
58 	dprintf("os=%p obj=%llu, increase to %d\n", dn->dn_objset,
59 	    dn->dn_object, dn->dn_phys->dn_nlevels);
60 
61 	/* check for existing blkptrs in the dnode */
62 	for (i = 0; i < nblkptr; i++)
63 		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
64 			break;
65 	if (i != nblkptr) {
66 		/* transfer dnode's block pointers to new indirect block */
67 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED|DB_RF_HAVESTRUCT);
68 		ASSERT(db->db.db_data);
69 		ASSERT(arc_released(db->db_buf));
70 		ASSERT3U(sizeof (blkptr_t) * nblkptr, <=, db->db.db_size);
71 		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
72 		    sizeof (blkptr_t) * nblkptr);
73 		arc_buf_freeze(db->db_buf);
74 	}
75 
76 	/* set dbuf's parent pointers to new indirect buf */
77 	for (i = 0; i < nblkptr; i++) {
78 		dmu_buf_impl_t *child = dbuf_find(dn, old_toplvl, i);
79 
80 		if (child == NULL)
81 			continue;
82 		ASSERT3P(child->db_dnode, ==, dn);
83 		if (child->db_parent && child->db_parent != dn->dn_dbuf) {
84 			ASSERT(child->db_parent->db_level == db->db_level);
85 			ASSERT(child->db_blkptr !=
86 			    &dn->dn_phys->dn_blkptr[child->db_blkid]);
87 			mutex_exit(&child->db_mtx);
88 			continue;
89 		}
90 		ASSERT(child->db_parent == NULL ||
91 		    child->db_parent == dn->dn_dbuf);
92 
93 		child->db_parent = db;
94 		dbuf_add_ref(db, child);
95 		if (db->db.db_data)
96 			child->db_blkptr = (blkptr_t *)db->db.db_data + i;
97 		else
98 			child->db_blkptr = NULL;
99 		dprintf_dbuf_bp(child, child->db_blkptr,
100 		    "changed db_blkptr to new indirect %s", "");
101 
102 		mutex_exit(&child->db_mtx);
103 	}
104 
105 	bzero(dn->dn_phys->dn_blkptr, sizeof (blkptr_t) * nblkptr);
106 
107 	dbuf_rele(db, FTAG);
108 
109 	rw_exit(&dn->dn_struct_rwlock);
110 }
111 
112 static void
113 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
114 {
115 	objset_impl_t *os = dn->dn_objset;
116 	uint64_t bytesfreed = 0;
117 	int i;
118 
119 	dprintf("os=%p obj=%llx num=%d\n", os, dn->dn_object, num);
120 
121 	for (i = 0; i < num; i++, bp++) {
122 		if (BP_IS_HOLE(bp))
123 			continue;
124 
125 		bytesfreed += bp_get_dasize(os->os_spa, bp);
126 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
127 		dsl_dataset_block_kill(os->os_dsl_dataset, bp, dn->dn_zio, tx);
128 		bzero(bp, sizeof (blkptr_t));
129 	}
130 	dnode_diduse_space(dn, -bytesfreed);
131 }
132 
133 #ifdef ZFS_DEBUG
134 static void
135 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
136 {
137 	int off, num;
138 	int i, err, epbs;
139 	uint64_t txg = tx->tx_txg;
140 
141 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
142 	off = start - (db->db_blkid * 1<<epbs);
143 	num = end - start + 1;
144 
145 	ASSERT3U(off, >=, 0);
146 	ASSERT3U(num, >=, 0);
147 	ASSERT3U(db->db_level, >, 0);
148 	ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift);
149 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
150 	ASSERT(db->db_blkptr != NULL);
151 
152 	for (i = off; i < off+num; i++) {
153 		uint64_t *buf;
154 		dmu_buf_impl_t *child;
155 		dbuf_dirty_record_t *dr;
156 		int j;
157 
158 		ASSERT(db->db_level == 1);
159 
160 		rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER);
161 		err = dbuf_hold_impl(db->db_dnode, db->db_level-1,
162 		    (db->db_blkid << epbs) + i, TRUE, FTAG, &child);
163 		rw_exit(&db->db_dnode->dn_struct_rwlock);
164 		if (err == ENOENT)
165 			continue;
166 		ASSERT(err == 0);
167 		ASSERT(child->db_level == 0);
168 		dr = child->db_last_dirty;
169 		while (dr && dr->dr_txg > txg)
170 			dr = dr->dr_next;
171 		ASSERT(dr == NULL || dr->dr_txg == txg);
172 
173 		/* data_old better be zeroed */
174 		if (dr) {
175 			buf = dr->dt.dl.dr_data->b_data;
176 			for (j = 0; j < child->db.db_size >> 3; j++) {
177 				if (buf[j] != 0) {
178 					panic("freed data not zero: "
179 					    "child=%p i=%d off=%d num=%d\n",
180 					    child, i, off, num);
181 				}
182 			}
183 		}
184 
185 		/*
186 		 * db_data better be zeroed unless it's dirty in a
187 		 * future txg.
188 		 */
189 		mutex_enter(&child->db_mtx);
190 		buf = child->db.db_data;
191 		if (buf != NULL && child->db_state != DB_FILL &&
192 		    child->db_last_dirty == NULL) {
193 			for (j = 0; j < child->db.db_size >> 3; j++) {
194 				if (buf[j] != 0) {
195 					panic("freed data not zero: "
196 					    "child=%p i=%d off=%d num=%d\n",
197 					    child, i, off, num);
198 				}
199 			}
200 		}
201 		mutex_exit(&child->db_mtx);
202 
203 		dbuf_rele(child, FTAG);
204 	}
205 }
206 #endif
207 
208 static int
209 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
210     dmu_tx_t *tx)
211 {
212 	dnode_t *dn = db->db_dnode;
213 	blkptr_t *bp;
214 	dmu_buf_impl_t *subdb;
215 	uint64_t start, end, dbstart, dbend, i;
216 	int epbs, shift, err;
217 	int all = TRUE;
218 
219 	(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
220 	arc_release(db->db_buf, db);
221 	bp = (blkptr_t *)db->db.db_data;
222 
223 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
224 	shift = (db->db_level - 1) * epbs;
225 	dbstart = db->db_blkid << epbs;
226 	start = blkid >> shift;
227 	if (dbstart < start) {
228 		bp += start - dbstart;
229 		all = FALSE;
230 	} else {
231 		start = dbstart;
232 	}
233 	dbend = ((db->db_blkid + 1) << epbs) - 1;
234 	end = (blkid + nblks - 1) >> shift;
235 	if (dbend <= end)
236 		end = dbend;
237 	else if (all)
238 		all = trunc;
239 	ASSERT3U(start, <=, end);
240 
241 	if (db->db_level == 1) {
242 		FREE_VERIFY(db, start, end, tx);
243 		free_blocks(dn, bp, end-start+1, tx);
244 		arc_buf_freeze(db->db_buf);
245 		ASSERT(all || db->db_last_dirty);
246 		return (all);
247 	}
248 
249 	for (i = start; i <= end; i++, bp++) {
250 		if (BP_IS_HOLE(bp))
251 			continue;
252 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
253 		err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
254 		ASSERT3U(err, ==, 0);
255 		rw_exit(&dn->dn_struct_rwlock);
256 
257 		if (free_children(subdb, blkid, nblks, trunc, tx)) {
258 			ASSERT3P(subdb->db_blkptr, ==, bp);
259 			free_blocks(dn, bp, 1, tx);
260 		} else {
261 			all = FALSE;
262 		}
263 		dbuf_rele(subdb, FTAG);
264 	}
265 	arc_buf_freeze(db->db_buf);
266 #ifdef ZFS_DEBUG
267 	bp -= (end-start)+1;
268 	for (i = start; i <= end; i++, bp++) {
269 		if (i == start && blkid != 0)
270 			continue;
271 		else if (i == end && !trunc)
272 			continue;
273 		ASSERT3U(bp->blk_birth, ==, 0);
274 	}
275 #endif
276 	ASSERT(all || db->db_last_dirty);
277 	return (all);
278 }
279 
280 /*
281  * free_range: Traverse the indicated range of the provided file
282  * and "free" all the blocks contained there.
283  */
284 static void
285 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
286 {
287 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
288 	dmu_buf_impl_t *db;
289 	int trunc, start, end, shift, i, err;
290 	int dnlevel = dn->dn_phys->dn_nlevels;
291 
292 	if (blkid > dn->dn_phys->dn_maxblkid)
293 		return;
294 
295 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
296 	trunc = blkid + nblks > dn->dn_phys->dn_maxblkid;
297 	if (trunc)
298 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
299 
300 	/* There are no indirect blocks in the object */
301 	if (dnlevel == 1) {
302 		if (blkid >= dn->dn_phys->dn_nblkptr) {
303 			/* this range was never made persistent */
304 			return;
305 		}
306 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
307 		free_blocks(dn, bp + blkid, nblks, tx);
308 		if (trunc) {
309 			uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
310 			    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
311 			dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
312 			ASSERT(off < dn->dn_phys->dn_maxblkid ||
313 			    dn->dn_phys->dn_maxblkid == 0 ||
314 			    dnode_next_offset(dn, FALSE, &off,
315 			    1, 1, 0) != 0);
316 		}
317 		return;
318 	}
319 
320 	shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
321 	start = blkid >> shift;
322 	ASSERT(start < dn->dn_phys->dn_nblkptr);
323 	end = (blkid + nblks - 1) >> shift;
324 	bp += start;
325 	for (i = start; i <= end; i++, bp++) {
326 		if (BP_IS_HOLE(bp))
327 			continue;
328 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
329 		err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
330 		ASSERT3U(err, ==, 0);
331 		rw_exit(&dn->dn_struct_rwlock);
332 
333 		if (free_children(db, blkid, nblks, trunc, tx)) {
334 			ASSERT3P(db->db_blkptr, ==, bp);
335 			free_blocks(dn, bp, 1, tx);
336 		}
337 		dbuf_rele(db, FTAG);
338 	}
339 	if (trunc) {
340 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
341 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
342 		dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
343 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
344 		    dn->dn_phys->dn_maxblkid == 0 ||
345 		    dnode_next_offset(dn, FALSE, &off, 1, 1, 0) != 0);
346 	}
347 }
348 
349 /*
350  * Try to kick all the dnodes dbufs out of the cache...
351  */
352 void
353 dnode_evict_dbufs(dnode_t *dn)
354 {
355 	int progress;
356 	int pass = 0;
357 
358 	do {
359 		dmu_buf_impl_t *db, marker;
360 		int evicting = FALSE;
361 
362 		progress = FALSE;
363 		mutex_enter(&dn->dn_dbufs_mtx);
364 		list_insert_tail(&dn->dn_dbufs, &marker);
365 		db = list_head(&dn->dn_dbufs);
366 		for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
367 			list_remove(&dn->dn_dbufs, db);
368 			list_insert_tail(&dn->dn_dbufs, db);
369 			ASSERT3P(db->db_dnode, ==, dn);
370 
371 			mutex_enter(&db->db_mtx);
372 			if (db->db_state == DB_EVICTING) {
373 				progress = TRUE;
374 				evicting = TRUE;
375 				mutex_exit(&db->db_mtx);
376 			} else if (refcount_is_zero(&db->db_holds)) {
377 				progress = TRUE;
378 				ASSERT(!arc_released(db->db_buf));
379 				dbuf_clear(db); /* exits db_mtx for us */
380 			} else {
381 				mutex_exit(&db->db_mtx);
382 			}
383 
384 		}
385 		list_remove(&dn->dn_dbufs, &marker);
386 		/*
387 		 * NB: we need to drop dn_dbufs_mtx between passes so
388 		 * that any DB_EVICTING dbufs can make progress.
389 		 * Ideally, we would have some cv we could wait on, but
390 		 * since we don't, just wait a bit to give the other
391 		 * thread a chance to run.
392 		 */
393 		mutex_exit(&dn->dn_dbufs_mtx);
394 		if (evicting)
395 			delay(1);
396 		pass++;
397 		ASSERT(pass < 100); /* sanity check */
398 	} while (progress);
399 
400 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
401 	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
402 		mutex_enter(&dn->dn_bonus->db_mtx);
403 		dbuf_evict(dn->dn_bonus);
404 		dn->dn_bonus = NULL;
405 	}
406 	rw_exit(&dn->dn_struct_rwlock);
407 }
408 
409 static void
410 dnode_undirty_dbufs(list_t *list)
411 {
412 	dbuf_dirty_record_t *dr;
413 
414 	while (dr = list_head(list)) {
415 		dmu_buf_impl_t *db = dr->dr_dbuf;
416 		uint64_t txg = dr->dr_txg;
417 
418 		mutex_enter(&db->db_mtx);
419 		/* XXX - use dbuf_undirty()? */
420 		list_remove(list, dr);
421 		ASSERT(db->db_last_dirty == dr);
422 		db->db_last_dirty = NULL;
423 		db->db_dirtycnt -= 1;
424 		if (db->db_level == 0) {
425 			ASSERT(db->db_blkid == DB_BONUS_BLKID ||
426 			    dr->dt.dl.dr_data == db->db_buf);
427 			dbuf_unoverride(dr);
428 			mutex_exit(&db->db_mtx);
429 		} else {
430 			mutex_exit(&db->db_mtx);
431 			dnode_undirty_dbufs(&dr->dt.di.dr_children);
432 		}
433 		kmem_free(dr, sizeof (dbuf_dirty_record_t));
434 		dbuf_rele(db, (void *)(uintptr_t)txg);
435 	}
436 }
437 
438 static void
439 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
440 {
441 	int txgoff = tx->tx_txg & TXG_MASK;
442 
443 	ASSERT(dmu_tx_is_syncing(tx));
444 
445 	dnode_undirty_dbufs(&dn->dn_dirty_records[txgoff]);
446 	dnode_evict_dbufs(dn);
447 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
448 
449 	/*
450 	 * XXX - It would be nice to assert this, but we may still
451 	 * have residual holds from async evictions from the arc...
452 	 *
453 	 * zfs_obj_to_path() also depends on this being
454 	 * commented out.
455 	 *
456 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
457 	 */
458 
459 	/* Undirty next bits */
460 	dn->dn_next_nlevels[txgoff] = 0;
461 	dn->dn_next_indblkshift[txgoff] = 0;
462 	dn->dn_next_blksz[txgoff] = 0;
463 
464 	/* free up all the blocks in the file. */
465 	dnode_sync_free_range(dn, 0, dn->dn_phys->dn_maxblkid+1, tx);
466 	ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
467 
468 	/* ASSERT(blkptrs are zero); */
469 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
470 	ASSERT(dn->dn_type != DMU_OT_NONE);
471 
472 	ASSERT(dn->dn_free_txg > 0);
473 	if (dn->dn_allocated_txg != dn->dn_free_txg)
474 		dbuf_will_dirty(dn->dn_dbuf, tx);
475 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
476 
477 	mutex_enter(&dn->dn_mtx);
478 	dn->dn_type = DMU_OT_NONE;
479 	dn->dn_maxblkid = 0;
480 	dn->dn_allocated_txg = 0;
481 	dn->dn_free_txg = 0;
482 	mutex_exit(&dn->dn_mtx);
483 
484 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
485 
486 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
487 	/*
488 	 * Now that we've released our hold, the dnode may
489 	 * be evicted, so we musn't access it.
490 	 */
491 }
492 
493 /*
494  * Write out the dnode's dirty buffers.
495  *
496  * NOTE: The dnode is kept in memory by being dirty.  Once the
497  * dirty bit is cleared, it may be evicted.  Beware of this!
498  */
499 void
500 dnode_sync(dnode_t *dn, dmu_tx_t *tx)
501 {
502 	free_range_t *rp;
503 	dnode_phys_t *dnp = dn->dn_phys;
504 	int txgoff = tx->tx_txg & TXG_MASK;
505 	list_t *list = &dn->dn_dirty_records[txgoff];
506 
507 	ASSERT(dmu_tx_is_syncing(tx));
508 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
509 	DNODE_VERIFY(dn);
510 
511 	ASSERT(dn->dn_dbuf == NULL || arc_released(dn->dn_dbuf->db_buf));
512 
513 	mutex_enter(&dn->dn_mtx);
514 	if (dn->dn_allocated_txg == tx->tx_txg) {
515 		/* The dnode is newly allocated or reallocated */
516 		if (dnp->dn_type == DMU_OT_NONE) {
517 			/* this is a first alloc, not a realloc */
518 			/* XXX shouldn't the phys already be zeroed? */
519 			bzero(dnp, DNODE_CORE_SIZE);
520 			dnp->dn_nlevels = 1;
521 		}
522 
523 		if (dn->dn_nblkptr > dnp->dn_nblkptr) {
524 			/* zero the new blkptrs we are gaining */
525 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
526 			    sizeof (blkptr_t) *
527 			    (dn->dn_nblkptr - dnp->dn_nblkptr));
528 		}
529 		dnp->dn_type = dn->dn_type;
530 		dnp->dn_bonustype = dn->dn_bonustype;
531 		dnp->dn_bonuslen = dn->dn_bonuslen;
532 		dnp->dn_nblkptr = dn->dn_nblkptr;
533 	}
534 
535 	ASSERT(dnp->dn_nlevels > 1 ||
536 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
537 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
538 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
539 
540 	if (dn->dn_next_blksz[txgoff]) {
541 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
542 		    SPA_MINBLOCKSIZE) == 0);
543 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
544 		    list_head(list) != NULL ||
545 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
546 		    dnp->dn_datablkszsec);
547 		dnp->dn_datablkszsec =
548 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
549 		dn->dn_next_blksz[txgoff] = 0;
550 	}
551 
552 	if (dn->dn_next_bonuslen[txgoff]) {
553 		if (dn->dn_next_bonuslen[txgoff] == DN_ZERO_BONUSLEN)
554 			dnp->dn_bonuslen = 0;
555 		else
556 			dnp->dn_bonuslen = dn->dn_next_bonuslen[txgoff];
557 		ASSERT(dnp->dn_bonuslen <= DN_MAX_BONUSLEN);
558 		dn->dn_next_bonuslen[txgoff] = 0;
559 	}
560 
561 	if (dn->dn_next_indblkshift[txgoff]) {
562 		ASSERT(dnp->dn_nlevels == 1);
563 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
564 		dn->dn_next_indblkshift[txgoff] = 0;
565 	}
566 
567 	/*
568 	 * Just take the live (open-context) values for checksum and compress.
569 	 * Strictly speaking it's a future leak, but nothing bad happens if we
570 	 * start using the new checksum or compress algorithm a little early.
571 	 */
572 	dnp->dn_checksum = dn->dn_checksum;
573 	dnp->dn_compress = dn->dn_compress;
574 
575 	mutex_exit(&dn->dn_mtx);
576 
577 	/* process all the "freed" ranges in the file */
578 	if (dn->dn_free_txg == 0 || dn->dn_free_txg > tx->tx_txg) {
579 		for (rp = avl_last(&dn->dn_ranges[txgoff]); rp != NULL;
580 		    rp = AVL_PREV(&dn->dn_ranges[txgoff], rp))
581 			dnode_sync_free_range(dn,
582 			    rp->fr_blkid, rp->fr_nblks, tx);
583 	}
584 	/* grab the mutex so we don't race with dnode_block_freed() */
585 	mutex_enter(&dn->dn_mtx);
586 	for (rp = avl_first(&dn->dn_ranges[txgoff]); rp; ) {
587 
588 		free_range_t *last = rp;
589 		rp = AVL_NEXT(&dn->dn_ranges[txgoff], rp);
590 		avl_remove(&dn->dn_ranges[txgoff], last);
591 		kmem_free(last, sizeof (free_range_t));
592 	}
593 	mutex_exit(&dn->dn_mtx);
594 	if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) {
595 		dnode_sync_free(dn, tx);
596 		return;
597 	}
598 
599 	if (dn->dn_next_nlevels[txgoff]) {
600 		dnode_increase_indirection(dn, tx);
601 		dn->dn_next_nlevels[txgoff] = 0;
602 	}
603 
604 	dbuf_sync_list(list, tx);
605 
606 	if (dn->dn_object != DMU_META_DNODE_OBJECT) {
607 		ASSERT3P(list_head(list), ==, NULL);
608 		dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
609 	}
610 
611 	/*
612 	 * Although we have dropped our reference to the dnode, it
613 	 * can't be evicted until its written, and we haven't yet
614 	 * initiated the IO for the dnode's dbuf.
615 	 */
616 }
617