xref: /titanic_41/usr/src/uts/common/fs/zfs/dnode_sync.c (revision 60405de4d8688d96dd05157c28db3ade5c9bc234)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dbuf.h>
30 #include <sys/dnode.h>
31 #include <sys/dmu.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/dmu_objset.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 #include <sys/zio.h>
37 
38 static void
39 dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx)
40 {
41 	dmu_buf_impl_t *db;
42 	int i;
43 	uint64_t txg = tx->tx_txg;
44 
45 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
46 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
47 	/* this dnode can't be paged out because it's dirty */
48 
49 	db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG);
50 	ASSERT(db != NULL);
51 	for (i = 0; i < dn->dn_phys->dn_nblkptr; i++)
52 		if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i]))
53 			break;
54 	if (i != dn->dn_phys->dn_nblkptr) {
55 		ASSERT(list_link_active(&db->db_dirty_node[txg&TXG_MASK]));
56 
57 		(void) dbuf_read(db, NULL,
58 		    DB_RF_HAVESTRUCT | DB_RF_MUST_SUCCEED);
59 		arc_release(db->db_buf, db);
60 		/* copy dnode's block pointers to new indirect block */
61 		ASSERT3U(sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr, <=,
62 		    db->db.db_size);
63 		bcopy(dn->dn_phys->dn_blkptr, db->db.db_data,
64 		    sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr);
65 	}
66 
67 	dn->dn_phys->dn_nlevels += 1;
68 	dprintf("os=%p obj=%llu, increase to %d\n",
69 		dn->dn_objset, dn->dn_object,
70 		dn->dn_phys->dn_nlevels);
71 
72 	/* set dbuf's parent pointers to new indirect buf */
73 	for (i = 0; i < dn->dn_phys->dn_nblkptr; i++) {
74 		dmu_buf_impl_t *child =
75 		    dbuf_find(dn, dn->dn_phys->dn_nlevels-2, i);
76 		if (child == NULL)
77 			continue;
78 		if (child->db_dnode == NULL) {
79 			mutex_exit(&child->db_mtx);
80 			continue;
81 		}
82 
83 		if (child->db_parent == NULL ||
84 		    child->db_parent == dn->dn_dbuf) {
85 			dprintf_dbuf_bp(child, child->db_blkptr,
86 			    "changing db_blkptr to new indirect %s", "");
87 			child->db_parent = db;
88 			dbuf_add_ref(db, child);
89 			if (db->db.db_data) {
90 				child->db_blkptr =
91 				    (blkptr_t *)db->db.db_data + i;
92 			} else {
93 				child->db_blkptr = NULL;
94 			}
95 			dprintf_dbuf_bp(child, child->db_blkptr,
96 			    "changed db_blkptr to new indirect %s", "");
97 		}
98 		ASSERT3P(child->db_parent, ==, db);
99 
100 		mutex_exit(&child->db_mtx);
101 	}
102 
103 	bzero(dn->dn_phys->dn_blkptr,
104 		sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr);
105 
106 	dbuf_rele(db, FTAG);
107 }
108 
109 static void
110 free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx)
111 {
112 	objset_impl_t *os = dn->dn_objset;
113 	uint64_t bytesfreed = 0;
114 	int i;
115 
116 	dprintf("os=%p obj=%llx num=%d\n", os, dn->dn_object, num);
117 
118 	for (i = 0; i < num; i++, bp++) {
119 		if (BP_IS_HOLE(bp))
120 			continue;
121 
122 		bytesfreed += bp_get_dasize(os->os_spa, bp);
123 		ASSERT3U(bytesfreed, <=, DN_USED_BYTES(dn->dn_phys));
124 		dsl_dataset_block_kill(os->os_dsl_dataset, bp, tx);
125 	}
126 	dnode_diduse_space(dn, -bytesfreed);
127 }
128 
129 #ifdef ZFS_DEBUG
130 static void
131 free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
132 {
133 	int off, num;
134 	int i, err, epbs;
135 	uint64_t txg = tx->tx_txg;
136 
137 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
138 	off = start - (db->db_blkid * 1<<epbs);
139 	num = end - start + 1;
140 
141 	ASSERT3U(off, >=, 0);
142 	ASSERT3U(num, >=, 0);
143 	ASSERT3U(db->db_level, >, 0);
144 	ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift);
145 	ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT);
146 	ASSERT(db->db_blkptr != NULL);
147 
148 	for (i = off; i < off+num; i++) {
149 		uint64_t *buf;
150 		int j;
151 		dmu_buf_impl_t *child;
152 
153 		ASSERT(db->db_level == 1);
154 
155 		rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER);
156 		err = dbuf_hold_impl(db->db_dnode, db->db_level-1,
157 			(db->db_blkid << epbs) + i, TRUE, FTAG, &child);
158 		rw_exit(&db->db_dnode->dn_struct_rwlock);
159 		if (err == ENOENT)
160 			continue;
161 		ASSERT(err == 0);
162 		ASSERT(child->db_level == 0);
163 		ASSERT(!list_link_active(&child->db_dirty_node[txg&TXG_MASK]));
164 
165 		/* db_data_old better be zeroed */
166 		if (child->db_d.db_data_old[txg & TXG_MASK]) {
167 			buf = ((arc_buf_t *)child->db_d.db_data_old
168 			    [txg & TXG_MASK])->b_data;
169 			for (j = 0; j < child->db.db_size >> 3; j++) {
170 				if (buf[j] != 0) {
171 					panic("freed data not zero: "
172 					    "child=%p i=%d off=%d num=%d\n",
173 					    child, i, off, num);
174 				}
175 			}
176 		}
177 
178 		/*
179 		 * db_data better be zeroed unless it's dirty in a
180 		 * future txg.
181 		 */
182 		mutex_enter(&child->db_mtx);
183 		buf = child->db.db_data;
184 		if (buf != NULL && child->db_state != DB_FILL &&
185 		    !list_link_active(&child->db_dirty_node
186 			[(txg+1) & TXG_MASK]) &&
187 		    !list_link_active(&child->db_dirty_node
188 			[(txg+2) & TXG_MASK])) {
189 			for (j = 0; j < child->db.db_size >> 3; j++) {
190 				if (buf[j] != 0) {
191 					panic("freed data not zero: "
192 					    "child=%p i=%d off=%d num=%d\n",
193 					    child, i, off, num);
194 				}
195 			}
196 		}
197 		mutex_exit(&child->db_mtx);
198 
199 		dbuf_rele(child, FTAG);
200 	}
201 }
202 #endif
203 
204 static int
205 free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
206     dmu_tx_t *tx)
207 {
208 	dnode_t *dn = db->db_dnode;
209 	blkptr_t *bp;
210 	dmu_buf_impl_t *subdb;
211 	uint64_t start, end, dbstart, dbend, i;
212 	int epbs, shift, err;
213 	int txgoff = tx->tx_txg & TXG_MASK;
214 	int all = TRUE;
215 
216 	(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
217 	arc_release(db->db_buf, db);
218 	bp = (blkptr_t *)db->db.db_data;
219 
220 	epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
221 	shift = (db->db_level - 1) * epbs;
222 	dbstart = db->db_blkid << epbs;
223 	start = blkid >> shift;
224 	if (dbstart < start) {
225 		bp += start - dbstart;
226 		all = FALSE;
227 	} else {
228 		start = dbstart;
229 	}
230 	dbend = ((db->db_blkid + 1) << epbs) - 1;
231 	end = (blkid + nblks - 1) >> shift;
232 	if (dbend <= end)
233 		end = dbend;
234 	else if (all)
235 		all = trunc;
236 	ASSERT3U(start, <=, end);
237 
238 	if (db->db_level == 1) {
239 		FREE_VERIFY(db, start, end, tx);
240 		free_blocks(dn, bp, end-start+1, tx);
241 		ASSERT(all || list_link_active(&db->db_dirty_node[txgoff]));
242 		return (all);
243 	}
244 
245 	for (i = start; i <= end; i++, bp++) {
246 		if (BP_IS_HOLE(bp))
247 			continue;
248 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
249 		err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb);
250 		ASSERT3U(err, ==, 0);
251 		rw_exit(&dn->dn_struct_rwlock);
252 
253 		if (free_children(subdb, blkid, nblks, trunc, tx)) {
254 			ASSERT3P(subdb->db_blkptr, ==, bp);
255 			free_blocks(dn, bp, 1, tx);
256 		} else {
257 			all = FALSE;
258 		}
259 		dbuf_rele(subdb, FTAG);
260 	}
261 #ifdef ZFS_DEBUG
262 	bp -= (end-start)+1;
263 	for (i = start; i <= end; i++, bp++) {
264 		if (i == start && blkid != 0)
265 			continue;
266 		else if (i == end && !trunc)
267 			continue;
268 		ASSERT3U(bp->blk_birth, ==, 0);
269 	}
270 #endif
271 	ASSERT(all || list_link_active(&db->db_dirty_node[txgoff]));
272 	return (all);
273 }
274 
275 /*
276  * free_range: Traverse the indicated range of the provided file
277  * and "free" all the blocks contained there.
278  */
279 static void
280 dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
281 {
282 	blkptr_t *bp = dn->dn_phys->dn_blkptr;
283 	dmu_buf_impl_t *db;
284 	int trunc, start, end, shift, i, err;
285 	int dnlevel = dn->dn_phys->dn_nlevels;
286 
287 	if (blkid > dn->dn_phys->dn_maxblkid)
288 		return;
289 
290 	ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX);
291 	trunc = blkid + nblks > dn->dn_phys->dn_maxblkid;
292 	if (trunc)
293 		nblks = dn->dn_phys->dn_maxblkid - blkid + 1;
294 
295 	/* There are no indirect blocks in the object */
296 	if (dnlevel == 1) {
297 		if (blkid >= dn->dn_phys->dn_nblkptr) {
298 			/* this range was never made persistent */
299 			return;
300 		}
301 		ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
302 		free_blocks(dn, bp + blkid, nblks, tx);
303 		if (trunc) {
304 			uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
305 			    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
306 			dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
307 			ASSERT(off < dn->dn_phys->dn_maxblkid ||
308 			    dn->dn_phys->dn_maxblkid == 0 ||
309 			    dnode_next_offset(dn, FALSE, &off, 1, 1) == ESRCH);
310 		}
311 		return;
312 	}
313 
314 	shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT);
315 	start = blkid >> shift;
316 	ASSERT(start < dn->dn_phys->dn_nblkptr);
317 	end = (blkid + nblks - 1) >> shift;
318 	bp += start;
319 	for (i = start; i <= end; i++, bp++) {
320 		if (BP_IS_HOLE(bp))
321 			continue;
322 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
323 		err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db);
324 		ASSERT3U(err, ==, 0);
325 		rw_exit(&dn->dn_struct_rwlock);
326 
327 		if (free_children(db, blkid, nblks, trunc, tx)) {
328 			ASSERT3P(db->db_blkptr, ==, bp);
329 			free_blocks(dn, bp, 1, tx);
330 		}
331 		dbuf_rele(db, FTAG);
332 	}
333 	if (trunc) {
334 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
335 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
336 		dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
337 		ASSERT(off < dn->dn_phys->dn_maxblkid ||
338 		    dn->dn_phys->dn_maxblkid == 0 ||
339 		    dnode_next_offset(dn, FALSE, &off, 1, 1) == ESRCH);
340 	}
341 }
342 
343 /*
344  * Try to kick all the dnodes dbufs out of the cache...
345  */
346 int
347 dnode_evict_dbufs(dnode_t *dn, int try)
348 {
349 	int progress;
350 	int pass = 0;
351 
352 	do {
353 		dmu_buf_impl_t *db, marker;
354 		int evicting = FALSE;
355 
356 		progress = FALSE;
357 		mutex_enter(&dn->dn_dbufs_mtx);
358 		list_insert_tail(&dn->dn_dbufs, &marker);
359 		db = list_head(&dn->dn_dbufs);
360 		for (; db != &marker; db = list_head(&dn->dn_dbufs)) {
361 			list_remove(&dn->dn_dbufs, db);
362 			list_insert_tail(&dn->dn_dbufs, db);
363 
364 			mutex_enter(&db->db_mtx);
365 			if (db->db_state == DB_EVICTING) {
366 				progress = TRUE;
367 				evicting = TRUE;
368 				mutex_exit(&db->db_mtx);
369 			} else if (refcount_is_zero(&db->db_holds)) {
370 				progress = TRUE;
371 				ASSERT(!arc_released(db->db_buf));
372 				dbuf_clear(db); /* exits db_mtx for us */
373 			} else {
374 				mutex_exit(&db->db_mtx);
375 			}
376 
377 		}
378 		list_remove(&dn->dn_dbufs, &marker);
379 		/*
380 		 * NB: we need to drop dn_dbufs_mtx between passes so
381 		 * that any DB_EVICTING dbufs can make progress.
382 		 * Ideally, we would have some cv we could wait on, but
383 		 * since we don't, just wait a bit to give the other
384 		 * thread a chance to run.
385 		 */
386 		mutex_exit(&dn->dn_dbufs_mtx);
387 		if (evicting)
388 			delay(1);
389 		pass++;
390 		ASSERT(pass < 100); /* sanity check */
391 	} while (progress);
392 
393 	/*
394 	 * This function works fine even if it can't evict everything.
395 	 * If were only asked to try to evict everything then
396 	 * return an error if we can't. Otherwise panic as the caller
397 	 * expects total eviction.
398 	 */
399 	if (list_head(&dn->dn_dbufs) != NULL) {
400 		if (try) {
401 			return (1);
402 		} else {
403 			panic("dangling dbufs (dn=%p, dbuf=%p)\n",
404 			    dn, list_head(&dn->dn_dbufs));
405 		}
406 	}
407 
408 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
409 	if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
410 		mutex_enter(&dn->dn_bonus->db_mtx);
411 		dbuf_evict(dn->dn_bonus);
412 		dn->dn_bonus = NULL;
413 	}
414 	rw_exit(&dn->dn_struct_rwlock);
415 	return (0);
416 }
417 
418 static int
419 dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
420 {
421 	dmu_buf_impl_t *db;
422 	int txgoff = tx->tx_txg & TXG_MASK;
423 
424 	ASSERT(dmu_tx_is_syncing(tx));
425 
426 	/* Undirty all buffers */
427 	while (db = list_head(&dn->dn_dirty_dbufs[txgoff])) {
428 		mutex_enter(&db->db_mtx);
429 		/* XXX - use dbuf_undirty()? */
430 		list_remove(&dn->dn_dirty_dbufs[txgoff], db);
431 		if (db->db_level == 0) {
432 			ASSERT(db->db_blkid == DB_BONUS_BLKID ||
433 			    db->db_d.db_data_old[txgoff] == db->db_buf);
434 			if (db->db_d.db_overridden_by[txgoff])
435 				dbuf_unoverride(db, tx->tx_txg);
436 			db->db_d.db_data_old[txgoff] = NULL;
437 		}
438 		db->db_dirtycnt -= 1;
439 		mutex_exit(&db->db_mtx);
440 		dbuf_rele(db, (void *)(uintptr_t)tx->tx_txg);
441 	}
442 
443 	(void) dnode_evict_dbufs(dn, 0);
444 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
445 
446 	/*
447 	 * XXX - It would be nice to assert this, but we may still
448 	 * have residual holds from async evictions from the arc...
449 	 *
450 	 * ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
451 	 */
452 
453 	/* Undirty next bits */
454 	dn->dn_next_nlevels[txgoff] = 0;
455 	dn->dn_next_indblkshift[txgoff] = 0;
456 	dn->dn_next_blksz[txgoff] = 0;
457 
458 	/* free up all the blocks in the file. */
459 	dnode_sync_free_range(dn, 0, dn->dn_phys->dn_maxblkid+1, tx);
460 	ASSERT3U(DN_USED_BYTES(dn->dn_phys), ==, 0);
461 
462 	/* ASSERT(blkptrs are zero); */
463 	ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE);
464 	ASSERT(dn->dn_type != DMU_OT_NONE);
465 
466 	ASSERT(dn->dn_free_txg > 0);
467 	if (dn->dn_allocated_txg != dn->dn_free_txg)
468 		dbuf_will_dirty(dn->dn_dbuf, tx);
469 	bzero(dn->dn_phys, sizeof (dnode_phys_t));
470 
471 	mutex_enter(&dn->dn_mtx);
472 	dn->dn_type = DMU_OT_NONE;
473 	dn->dn_maxblkid = 0;
474 	dn->dn_allocated_txg = 0;
475 	mutex_exit(&dn->dn_mtx);
476 
477 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
478 
479 	dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
480 	/*
481 	 * Now that we've released our hold, the dnode may
482 	 * be evicted, so we musn't access it.
483 	 */
484 	return (1);
485 }
486 
487 /*
488  * Write out the dnode's dirty buffers at the specified level.
489  * This may create more dirty buffers at the next level up.
490  *
491  * NOTE: The dnode is kept in memory by being dirty.  Once the
492  * dirty bit is cleared, it may be evicted.  Beware of this!
493  */
494 int
495 dnode_sync(dnode_t *dn, int level, zio_t *zio, dmu_tx_t *tx)
496 {
497 	free_range_t *rp;
498 	int txgoff = tx->tx_txg & TXG_MASK;
499 	dnode_phys_t *dnp = dn->dn_phys;
500 
501 	ASSERT(dmu_tx_is_syncing(tx));
502 	ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg);
503 	DNODE_VERIFY(dn);
504 
505 	/*
506 	 * Make sure the dbuf for the dn_phys is released before we modify it.
507 	 */
508 	if (dn->dn_dbuf)
509 		arc_release(dn->dn_dbuf->db_buf, dn->dn_dbuf);
510 
511 	mutex_enter(&dn->dn_mtx);
512 	if (dn->dn_allocated_txg == tx->tx_txg) {
513 		/* The dnode is newly allocated or reallocated */
514 		if (dnp->dn_type == DMU_OT_NONE) {
515 			/* this is a first alloc, not a realloc */
516 			/* XXX shouldn't the phys already be zeroed? */
517 			bzero(dnp, DNODE_CORE_SIZE);
518 			dnp->dn_nlevels = 1;
519 		}
520 
521 		if (dn->dn_nblkptr > dnp->dn_nblkptr) {
522 			/* zero the new blkptrs we are gaining */
523 			bzero(dnp->dn_blkptr + dnp->dn_nblkptr,
524 			    sizeof (blkptr_t) *
525 			    (dn->dn_nblkptr - dnp->dn_nblkptr));
526 		}
527 		dnp->dn_type = dn->dn_type;
528 		dnp->dn_bonustype = dn->dn_bonustype;
529 		dnp->dn_bonuslen = dn->dn_bonuslen;
530 		dnp->dn_nblkptr = dn->dn_nblkptr;
531 	}
532 
533 	ASSERT(level != 0 || dnp->dn_nlevels > 1 ||
534 	    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
535 	    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
536 	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
537 
538 	if (dn->dn_next_blksz[txgoff]) {
539 		ASSERT(P2PHASE(dn->dn_next_blksz[txgoff],
540 		    SPA_MINBLOCKSIZE) == 0);
541 		ASSERT(BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
542 		    list_head(&dn->dn_dirty_dbufs[txgoff]) != NULL ||
543 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT ==
544 		    dnp->dn_datablkszsec);
545 		dnp->dn_datablkszsec =
546 		    dn->dn_next_blksz[txgoff] >> SPA_MINBLOCKSHIFT;
547 		dn->dn_next_blksz[txgoff] = 0;
548 	}
549 
550 	if (dn->dn_next_indblkshift[txgoff]) {
551 		ASSERT(dnp->dn_nlevels == 1);
552 		dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff];
553 		dn->dn_next_indblkshift[txgoff] = 0;
554 	}
555 
556 	/*
557 	 * Just take the live (open-context) values for checksum and compress.
558 	 * Strictly speaking it's a future leak, but nothing bad happens if we
559 	 * start using the new checksum or compress algorithm a little early.
560 	 */
561 	dnp->dn_checksum = dn->dn_checksum;
562 	dnp->dn_compress = dn->dn_compress;
563 
564 	mutex_exit(&dn->dn_mtx);
565 
566 	/* process all the "freed" ranges in the file */
567 	if (dn->dn_free_txg == 0 || dn->dn_free_txg > tx->tx_txg) {
568 		for (rp = avl_last(&dn->dn_ranges[txgoff]); rp != NULL;
569 		    rp = AVL_PREV(&dn->dn_ranges[txgoff], rp))
570 			dnode_sync_free_range(dn,
571 			    rp->fr_blkid, rp->fr_nblks, tx);
572 	}
573 	mutex_enter(&dn->dn_mtx);
574 	for (rp = avl_first(&dn->dn_ranges[txgoff]); rp; ) {
575 		free_range_t *last = rp;
576 		rp = AVL_NEXT(&dn->dn_ranges[txgoff], rp);
577 		avl_remove(&dn->dn_ranges[txgoff], last);
578 		kmem_free(last, sizeof (free_range_t));
579 	}
580 	mutex_exit(&dn->dn_mtx);
581 
582 	if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) {
583 		ASSERT3U(level, ==, 0);
584 		return (dnode_sync_free(dn, tx));
585 	}
586 
587 	if (dn->dn_next_nlevels[txgoff]) {
588 		int new_lvl = dn->dn_next_nlevels[txgoff];
589 
590 		rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
591 		while (new_lvl > dnp->dn_nlevels)
592 			dnode_increase_indirection(dn, tx);
593 		rw_exit(&dn->dn_struct_rwlock);
594 		dn->dn_next_nlevels[txgoff] = 0;
595 	}
596 
597 	if (level == dnp->dn_nlevels) {
598 		uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
599 		    (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
600 
601 		/* we've already synced out all data and indirect blocks */
602 		/* there are no more dirty dbufs under this dnode */
603 		ASSERT3P(list_head(&dn->dn_dirty_dbufs[txgoff]), ==, NULL);
604 		ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= tx->tx_txg);
605 
606 		/* XXX this is expensive. remove once 6343073 is closed. */
607 		/* NB: the "off < maxblkid" is to catch overflow */
608 		/*
609 		 * NB: if blocksize is changing, we could get confused,
610 		 * so only bother if there are multiple blocks and thus
611 		 * it can't be changing.
612 		 */
613 		if (!(off < dn->dn_phys->dn_maxblkid ||
614 		    dn->dn_phys->dn_maxblkid == 0 ||
615 		    dnode_next_offset(dn, FALSE, &off, 1, 1) == ESRCH))
616 			panic("data after EOF: off=%llu\n", (u_longlong_t)off);
617 
618 		ASSERT(dnp->dn_nlevels > 1 ||
619 		    BP_IS_HOLE(&dnp->dn_blkptr[0]) ||
620 		    BP_GET_LSIZE(&dnp->dn_blkptr[0]) ==
621 		    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
622 
623 		if (dn->dn_object != DMU_META_DNODE_OBJECT) {
624 			dbuf_will_dirty(dn->dn_dbuf, tx);
625 			dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg);
626 		}
627 
628 		/*
629 		 * Now that we've dropped the reference, the dnode may
630 		 * be evicted, so we musn't access it.
631 		 */
632 		return (1);
633 	} else {
634 		dmu_buf_impl_t *db, *db_next;
635 		list_t *list = &dn->dn_dirty_dbufs[txgoff];
636 		/*
637 		 * Iterate over the list, removing and sync'ing dbufs
638 		 * which are on the level we want, and leaving others.
639 		 */
640 		for (db = list_head(list); db; db = db_next) {
641 			db_next = list_next(list, db);
642 			if (db->db_level == level) {
643 				list_remove(list, db);
644 				dbuf_sync(db, zio, tx);
645 			}
646 		}
647 		return (0);
648 	}
649 }
650