xref: /titanic_41/usr/src/uts/common/fs/zfs/dnode_sync.c (revision e8fb11a1575b42dcd5c49341c588d9f6cd636135)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 #include <sys/zio.h>
37 
38 static void
39 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
40 {
41 	dmu_buf_impl_t *db;
42 	int i;
43 	uint64_t txg = tx->tx_txg;
44 
45 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
46 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
47 	/* this dnode can't be paged out because it's dirty */
48 
49 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
50 	ASSERT(db != NULL);
51 	for (i = 0; i < dn->dn_phys->dn_nblkptr; i++)
52 		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
53 			break;
54 	if (i != dn->dn_phys->dn_nblkptr) {
55 		ASSERT(list_link_active(&db->db_dirty_node[txg&TXG_MASK]));
56 
57 		(void) dbuf_read(db, NULL,
58 		    DB_RF_HAVESTRUCT | DB_RF_MUST_SUCCEED);
59 		arc_release(db->db_buf, db);
60 		/* copy dnode's block pointers to new indirect block */
61 		ASSERT3U(sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr, <=,
62 		    db->db.db_size);
63 		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
64 		    sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr);
65 		arc_buf_freeze(db->db_buf);
66 	}
67 
68 	dn->dn_phys->dn_nlevels += 1;
69 	dprintf("os=%p obj=%llu, increase to %d\n",
70 		dn->dn_objset, dn->dn_object,
71 		dn->dn_phys->dn_nlevels);
72 
73 	/* set dbuf's parent pointers to new indirect buf */
74 	for (i = 0; i < dn->dn_phys->dn_nblkptr; i++) {
75 		dmu_buf_impl_t *child =
76 		    dbuf_find(dn, dn->dn_phys->dn_nlevels-2, i);
77 		if (child == NULL)
78 			continue;
79 		if (child->db_dnode == NULL) {
80 			mutex_exit(&child->db_mtx);
81 			continue;
82 		}
83 
84 		if (child->db_parent == NULL ||
85 		    child->db_parent == dn->dn_dbuf) {
86 			dprintf_dbuf_bp(child, child->db_blkptr,
87 			    "changing db_blkptr to new indirect %s", "");
88 			child->db_parent = db;
89 			dbuf_add_ref(db, child);
90 			if (db->db.db_data) {
91 				child->db_blkptr =
92 				    (blkptr_t *)db->db.db_data + i;
93 			} else {
94 				child->db_blkptr = NULL;
95 			}
96 			dprintf_dbuf_bp(child, child->db_blkptr,
97 			    "changed db_blkptr to new indirect %s", "");
98 		}
99 		ASSERT3P(child->db_parent, ==, db);
100 
101 		mutex_exit(&child->db_mtx);
102 	}
103 
104 	bzero(dn->dn_phys->dn_blkptr,
105 		sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr);
106 
107 	dbuf_rele(db, FTAG);
108 }
109 
110 static void
111 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
112 {
113 	objset_impl_t *os = dn->dn_objset;
114 	uint64_t bytesfreed = 0;
115 	int i;
116 
117 	dprintf("os=%p obj=%llx num=%d\n", os, dn->dn_object, num);
118 
119 	for (i = 0; i < num; i++, bp++) {
120 		if (BP_IS_HOLE(bp))
121 			continue;
122 
123 		bytesfreed += bp_get_dasize(os->os_spa, bp);
124 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
125 		dsl_dataset_block_kill(os->os_dsl_dataset, bp, tx);
126 	}
127 	dnode_diduse_space(dn, -bytesfreed);
128 }
129 
130 #ifdef ZFS_DEBUG
131 static void
132 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
133 {
134 	int off, num;
135 	int i, err, epbs;
136 	uint64_t txg = tx->tx_txg;
137 
138 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
139 	off = start - (db->db_blkid * 1<<epbs);
140 	num = end - start + 1;
141 
142 	ASSERT3U(off, >=, 0);
143 	ASSERT3U(num, >=, 0);
144 	ASSERT3U(db->db_level, >, 0);
145 	ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift);
146 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
147 	ASSERT(db->db_blkptr != NULL);
148 
149 	for (i = off; i < off+num; i++) {
150 		uint64_t *buf;
151 		int j;
152 		dmu_buf_impl_t *child;
153 
154 		ASSERT(db->db_level == 1);
155 
156 		rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER);
157 		err = dbuf_hold_impl(db->db_dnode, db->db_level-1,
158 			(db->db_blkid << epbs) + i, TRUE, FTAG, &child);
159 		rw_exit(&db->db_dnode->dn_struct_rwlock);
160 		if (err == ENOENT)
161 			continue;
162 		ASSERT(err == 0);
163 		ASSERT(child->db_level == 0);
164 		ASSERT(!list_link_active(&child->db_dirty_node[txg&TXG_MASK]));
165 
166 		/* db_data_old better be zeroed */
167 		if (child->db_d.db_data_old[txg & TXG_MASK]) {
168 			buf = child->db_d.db_data_old[txg & TXG_MASK]->b_data;
169 			for (j = 0; j < child->db.db_size >> 3; j++) {
170 				if (buf[j] != 0) {
171 					panic("freed data not zero: "
172 					    "child=%p i=%d off=%d num=%d\n",
173 					    child, i, off, num);
174 				}
175 			}
176 		}
177 
178 		/*
179 		 * db_data better be zeroed unless it's dirty in a
180 		 * future txg.
181 		 */
182 		mutex_enter(&child->db_mtx);
183 		buf = child->db.db_data;
184 		if (buf != NULL && child->db_state != DB_FILL &&
185 		    !list_link_active(&child->db_dirty_node
186 			[(txg+1) & TXG_MASK]) &&
187 		    !list_link_active(&child->db_dirty_node
188 			[(txg+2) & TXG_MASK])) {
189 			for (j = 0; j < child->db.db_size >> 3; j++) {
190 				if (buf[j] != 0) {
191 					panic("freed data not zero: "
192 					    "child=%p i=%d off=%d num=%d\n",
193 					    child, i, off, num);
194 				}
195 			}
196 		}
197 		mutex_exit(&child->db_mtx);
198 
199 		dbuf_rele(child, FTAG);
200 	}
201 }
202 #endif
203 
204 static int
205 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
206     dmu_tx_t *tx)
207 {
208 	dnode_t *dn = db->db_dnode;
209 	blkptr_t *bp;
210 	dmu_buf_impl_t *subdb;
211 	uint64_t start, end, dbstart, dbend, i;
212 	int epbs, shift, err;
213 	int txgoff = tx->tx_txg & TXG_MASK;
214 	int all = TRUE;
215 
216 	(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
217 	arc_release(db->db_buf, db);
218 	bp = (blkptr_t *)db->db.db_data;
219 
220 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
221 	shift = (db->db_level - 1) * epbs;
222 	dbstart = db->db_blkid << epbs;
223 	start = blkid >> shift;
224 	if (dbstart < start) {
225 		bp += start - dbstart;
226 		all = FALSE;
227 	} else {
228 		start = dbstart;
229 	}
230 	dbend = ((db->db_blkid + 1) << epbs) - 1;
231 	end = (blkid + nblks - 1) >> shift;
232 	if (dbend <= end)
233 		end = dbend;
234 	else if (all)
235 		all = trunc;
236 	ASSERT3U(start, <=, end);
237 
238 	if (db->db_level == 1) {
239 		FREE_VERIFY(db, start, end, tx);
240 		free_blocks(dn, bp, end-start+1, tx);
241 		arc_buf_freeze(db->db_buf);
242 		ASSERT(all || list_link_active(&db->db_dirty_node[txgoff]));
243 		return (all);
244 	}
245 
246 	for (i = start; i <= end; i++, bp++) {
247 		if (BP_IS_HOLE(bp))
248 			continue;
249 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
250 		err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
251 		ASSERT3U(err, ==, 0);
252 		rw_exit(&dn->dn_struct_rwlock);
253 
254 		if (free_children(subdb, blkid, nblks, trunc, tx)) {
255 			ASSERT3P(subdb->db_blkptr, ==, bp);
256 			free_blocks(dn, bp, 1, tx);
257 		} else {
258 			all = FALSE;
259 		}
260 		dbuf_rele(subdb, FTAG);
261 	}
262 	arc_buf_freeze(db->db_buf);
263 #ifdef ZFS_DEBUG
264 	bp -= (end-start)+1;
265 	for (i = start; i <= end; i++, bp++) {
266 		if (i == start && blkid != 0)
267 			continue;
268 		else if (i == end && !trunc)
269 			continue;
270 		ASSERT3U(bp->blk_birth, ==, 0);
271 	}
272 #endif
273 	ASSERT(all || list_link_active(&db->db_dirty_node[txgoff]));
274 	return (all);
275 }
276 
277 /*
278  * free_range: Traverse the indicated range of the provided file
279  * and "free" all the blocks contained there.
280  */
281 static void
282 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
283 {
284 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
285 	dmu_buf_impl_t *db;
286 	int trunc, start, end, shift, i, err;
287 	int dnlevel = dn->dn_phys->dn_nlevels;
288 
289 	if (blkid > dn->dn_phys->dn_maxblkid)
290 		return;
291 
292 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
293 	trunc = blkid + nblks > dn->dn_phys->dn_maxblkid;
294 	if (trunc)
295 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
296 
297 	/* There are no indirect blocks in the object */
298 	if (dnlevel == 1) {
299 		if (blkid >= dn->dn_phys->dn_nblkptr) {
300 			/* this range was never made persistent */
301 			return;
302 		}
303 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
304 		free_blocks(dn, bp + blkid, nblks, tx);
305 		if (trunc) {
306 			uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
307 			    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
308 			dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
309 			ASSERT(off < dn->dn_phys->dn_maxblkid ||
310 			    dn->dn_phys->dn_maxblkid == 0 ||
311 			    dnode_next_offset(dn, FALSE, &off,
312 			    1, 1, 0) != 0);
313 		}
314 		return;
315 	}
316 
317 	shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
318 	start = blkid >> shift;
319 	ASSERT(start < dn->dn_phys->dn_nblkptr);
320 	end = (blkid + nblks - 1) >> shift;
321 	bp += start;
322 	for (i = start; i <= end; i++, bp++) {
323 		if (BP_IS_HOLE(bp))
324 			continue;
325 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
326 		err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
327 		ASSERT3U(err, ==, 0);
328 		rw_exit(&dn->dn_struct_rwlock);
329 
330 		if (free_children(db, blkid, nblks, trunc, tx)) {
331 			ASSERT3P(db->db_blkptr, ==, bp);
332 			free_blocks(dn, bp, 1, tx);
333 		}
334 		dbuf_rele(db, FTAG);
335 	}
336 	if (trunc) {
337 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
338 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
339 		dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
340 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
341 		    dn->dn_phys->dn_maxblkid == 0 ||
342 		    dnode_next_offset(dn, FALSE, &off, 1, 1, 0) != 0);
343 	}
344 }
345 
346 /*
347  * Try to kick all the dnodes dbufs out of the cache...
348  */
349 int
350 dnode_evict_dbufs(dnode_t *dn, int try)
351 {
352 	int progress;
353 	int pass = 0;
354 
355 	do {
356 		dmu_buf_impl_t *db, marker;
357 		int evicting = FALSE;
358 
359 		progress = FALSE;
360 		mutex_enter(&dn->dn_dbufs_mtx);
361 		list_insert_tail(&dn->dn_dbufs, &marker);
362 		db = list_head(&dn->dn_dbufs);
363 		for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
364 			list_remove(&dn->dn_dbufs, db);
365 			list_insert_tail(&dn->dn_dbufs, db);
366 
367 			mutex_enter(&db->db_mtx);
368 			if (db->db_state == DB_EVICTING) {
369 				progress = TRUE;
370 				evicting = TRUE;
371 				mutex_exit(&db->db_mtx);
372 			} else if (refcount_is_zero(&db->db_holds)) {
373 				progress = TRUE;
374 				ASSERT(!arc_released(db->db_buf));
375 				dbuf_clear(db); /* exits db_mtx for us */
376 			} else {
377 				mutex_exit(&db->db_mtx);
378 			}
379 
380 		}
381 		list_remove(&dn->dn_dbufs, &marker);
382 		/*
383 		 * NB: we need to drop dn_dbufs_mtx between passes so
384 		 * that any DB_EVICTING dbufs can make progress.
385 		 * Ideally, we would have some cv we could wait on, but
386 		 * since we don't, just wait a bit to give the other
387 		 * thread a chance to run.
388 		 */
389 		mutex_exit(&dn->dn_dbufs_mtx);
390 		if (evicting)
391 			delay(1);
392 		pass++;
393 		ASSERT(pass < 100); /* sanity check */
394 	} while (progress);
395 
396 	/*
397 	 * This function works fine even if it can't evict everything.
398 	 * If were only asked to try to evict everything then
399 	 * return an error if we can't. Otherwise panic as the caller
400 	 * expects total eviction.
401 	 */
402 	if (list_head(&dn->dn_dbufs) != NULL) {
403 		if (try) {
404 			return (1);
405 		} else {
406 			panic("dangling dbufs (dn=%p, dbuf=%p)\n",
407 			    dn, list_head(&dn->dn_dbufs));
408 		}
409 	}
410 
411 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
412 	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
413 		mutex_enter(&dn->dn_bonus->db_mtx);
414 		dbuf_evict(dn->dn_bonus);
415 		dn->dn_bonus = NULL;
416 	}
417 	rw_exit(&dn->dn_struct_rwlock);
418 	return (0);
419 }
420 
421 static int
422 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
423 {
424 	dmu_buf_impl_t *db;
425 	int txgoff = tx->tx_txg & TXG_MASK;
426 
427 	ASSERT(dmu_tx_is_syncing(tx));
428 
429 	/* Undirty all buffers */
430 	while (db = list_head(&dn->dn_dirty_dbufs[txgoff])) {
431 		mutex_enter(&db->db_mtx);
432 		/* XXX - use dbuf_undirty()? */
433 		list_remove(&dn->dn_dirty_dbufs[txgoff], db);
434 		if (db->db_level == 0) {
435 			ASSERT(db->db_blkid == DB_BONUS_BLKID ||
436 			    db->db_d.db_data_old[txgoff] == db->db_buf);
437 			if (db->db_d.db_overridden_by[txgoff])
438 				dbuf_unoverride(db, tx->tx_txg);
439 			db->db_d.db_data_old[txgoff] = NULL;
440 		}
441 		db->db_dirtycnt -= 1;
442 		mutex_exit(&db->db_mtx);
443 		dbuf_rele(db, (void *)(uintptr_t)tx->tx_txg);
444 	}
445 
446 	(void) dnode_evict_dbufs(dn, 0);
447 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
448 
449 	/*
450 	 * XXX - It would be nice to assert this, but we may still
451 	 * have residual holds from async evictions from the arc...
452 	 *
453 	 * zfs_obj_to_path() also depends on this being
454 	 * commented out.
455 	 *
456 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
457 	 */
458 
459 	/* Undirty next bits */
460 	dn->dn_next_nlevels[txgoff] = 0;
461 	dn->dn_next_indblkshift[txgoff] = 0;
462 	dn->dn_next_blksz[txgoff] = 0;
463 
464 	/* free up all the blocks in the file. */
465 	dnode_sync_free_range(dn, 0, dn->dn_phys->dn_maxblkid+1, tx);
466 	ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
467 
468 	/* ASSERT(blkptrs are zero); */
469 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
470 	ASSERT(dn->dn_type != DMU_OT_NONE);
471 
472 	ASSERT(dn->dn_free_txg > 0);
473 	if (dn->dn_allocated_txg != dn->dn_free_txg)
474 		dbuf_will_dirty(dn->dn_dbuf, tx);
475 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
476 
477 	mutex_enter(&dn->dn_mtx);
478 	dn->dn_type = DMU_OT_NONE;
479 	dn->dn_maxblkid = 0;
480 	dn->dn_allocated_txg = 0;
481 	mutex_exit(&dn->dn_mtx);
482 
483 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
484 
485 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
486 	/*
487 	 * Now that we've released our hold, the dnode may
488 	 * be evicted, so we musn't access it.
489 	 */
490 	return (1);
491 }
492 
493 /*
494  * Write out the dnode's dirty buffers at the specified level.
495  * This may create more dirty buffers at the next level up.
496  *
497  * NOTE: The dnode is kept in memory by being dirty.  Once the
498  * dirty bit is cleared, it may be evicted.  Beware of this!
499  */
500 int
501 dnode_sync(dnode_t *dn, int level, zio_t *zio, dmu_tx_t *tx)
502 {
503 	free_range_t *rp;
504 	int txgoff = tx->tx_txg & TXG_MASK;
505 	dnode_phys_t *dnp = dn->dn_phys;
506 
507 	ASSERT(dmu_tx_is_syncing(tx));
508 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
509 	DNODE_VERIFY(dn);
510 
511 	/*
512 	 * Make sure the dbuf for the dn_phys is released before we modify it.
513 	 */
514 	if (dn->dn_dbuf)
515 		arc_release(dn->dn_dbuf->db_buf, dn->dn_dbuf);
516 
517 	mutex_enter(&dn->dn_mtx);
518 	if (dn->dn_allocated_txg == tx->tx_txg) {
519 		/* The dnode is newly allocated or reallocated */
520 		if (dnp->dn_type == DMU_OT_NONE) {
521 			/* this is a first alloc, not a realloc */
522 			/* XXX shouldn't the phys already be zeroed? */
523 			bzero(dnp, DNODE_CORE_SIZE);
524 			dnp->dn_nlevels = 1;
525 		}
526 
527 		if (dn->dn_nblkptr > dnp->dn_nblkptr) {
528 			/* zero the new blkptrs we are gaining */
529 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
530 			    sizeof (blkptr_t) *
531 			    (dn->dn_nblkptr - dnp->dn_nblkptr));
532 		}
533 		dnp->dn_type = dn->dn_type;
534 		dnp->dn_bonustype = dn->dn_bonustype;
535 		dnp->dn_bonuslen = dn->dn_bonuslen;
536 		dnp->dn_nblkptr = dn->dn_nblkptr;
537 	}
538 
539 	ASSERT(level != 0 || dnp->dn_nlevels > 1 ||
540 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
541 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
542 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
543 
544 	if (dn->dn_next_blksz[txgoff]) {
545 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
546 		    SPA_MINBLOCKSIZE) == 0);
547 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
548 		    list_head(&dn->dn_dirty_dbufs[txgoff]) != NULL ||
549 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
550 		    dnp->dn_datablkszsec);
551 		dnp->dn_datablkszsec =
552 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
553 		dn->dn_next_blksz[txgoff] = 0;
554 	}
555 
556 	if (dn->dn_next_indblkshift[txgoff]) {
557 		ASSERT(dnp->dn_nlevels == 1);
558 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
559 		dn->dn_next_indblkshift[txgoff] = 0;
560 	}
561 
562 	/*
563 	 * Just take the live (open-context) values for checksum and compress.
564 	 * Strictly speaking it's a future leak, but nothing bad happens if we
565 	 * start using the new checksum or compress algorithm a little early.
566 	 */
567 	dnp->dn_checksum = dn->dn_checksum;
568 	dnp->dn_compress = dn->dn_compress;
569 
570 	mutex_exit(&dn->dn_mtx);
571 
572 	/* process all the "freed" ranges in the file */
573 	if (dn->dn_free_txg == 0 || dn->dn_free_txg > tx->tx_txg) {
574 		for (rp = avl_last(&dn->dn_ranges[txgoff]); rp != NULL;
575 		    rp = AVL_PREV(&dn->dn_ranges[txgoff], rp))
576 			dnode_sync_free_range(dn,
577 			    rp->fr_blkid, rp->fr_nblks, tx);
578 	}
579 	mutex_enter(&dn->dn_mtx);
580 	for (rp = avl_first(&dn->dn_ranges[txgoff]); rp; ) {
581 		free_range_t *last = rp;
582 		rp = AVL_NEXT(&dn->dn_ranges[txgoff], rp);
583 		avl_remove(&dn->dn_ranges[txgoff], last);
584 		kmem_free(last, sizeof (free_range_t));
585 	}
586 	mutex_exit(&dn->dn_mtx);
587 
588 	if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) {
589 		ASSERT3U(level, ==, 0);
590 		return (dnode_sync_free(dn, tx));
591 	}
592 
593 	if (dn->dn_next_nlevels[txgoff]) {
594 		int new_lvl = dn->dn_next_nlevels[txgoff];
595 
596 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
597 		while (new_lvl > dnp->dn_nlevels)
598 			dnode_increase_indirection(dn, tx);
599 		rw_exit(&dn->dn_struct_rwlock);
600 		dn->dn_next_nlevels[txgoff] = 0;
601 	}
602 
603 	if (level == dnp->dn_nlevels) {
604 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
605 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
606 
607 		/* we've already synced out all data and indirect blocks */
608 		/* there are no more dirty dbufs under this dnode */
609 		ASSERT3P(list_head(&dn->dn_dirty_dbufs[txgoff]), ==, NULL);
610 		ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= tx->tx_txg);
611 
612 		/* NB: the "off < maxblkid" is to catch overflow */
613 		/*
614 		 * NB: if blocksize is changing, we could get confused,
615 		 * so only bother if there are multiple blocks and thus
616 		 * it can't be changing.
617 		 */
618 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
619 		    dn->dn_phys->dn_maxblkid == 0 ||
620 		    dnode_next_offset(dn, FALSE, &off, 1, 1, 0) != 0);
621 
622 		ASSERT(dnp->dn_nlevels > 1 ||
623 		    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
624 		    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
625 		    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
626 
627 		if (dn->dn_object != DMU_META_DNODE_OBJECT) {
628 			dbuf_will_dirty(dn->dn_dbuf, tx);
629 			dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
630 		}
631 
632 		/*
633 		 * Now that we've dropped the reference, the dnode may
634 		 * be evicted, so we musn't access it.
635 		 */
636 		return (1);
637 	} else {
638 		dmu_buf_impl_t *db, *db_next;
639 		list_t *list = &dn->dn_dirty_dbufs[txgoff];
640 		/*
641 		 * Iterate over the list, removing and sync'ing dbufs
642 		 * which are on the level we want, and leaving others.
643 		 */
644 		for (db = list_head(list); db; db = db_next) {
645 			db_next = list_next(list, db);
646 			if (db->db_level == level) {
647 				list_remove(list, db);
648 				dbuf_sync(db, zio, tx);
649 			}
650 		}
651 		return (0);
652 	}
653 }
654