xref: /illumos-gate/usr/src/uts/common/fs/zfs/dnode.c (revision 639c732ede1f837c0ccd952d813f5b85fc9e0abf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  * Copyright 2017 RackTop Systems.
27  */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/dbuf.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/spa.h>
39 #include <sys/zio.h>
40 #include <sys/dmu_zfetch.h>
41 #include <sys/range_tree.h>
42 
43 dnode_stats_t dnode_stats = {
44 	{ "dnode_hold_dbuf_hold",		KSTAT_DATA_UINT64 },
45 	{ "dnode_hold_dbuf_read",		KSTAT_DATA_UINT64 },
46 	{ "dnode_hold_alloc_hits",		KSTAT_DATA_UINT64 },
47 	{ "dnode_hold_alloc_misses",		KSTAT_DATA_UINT64 },
48 	{ "dnode_hold_alloc_interior",		KSTAT_DATA_UINT64 },
49 	{ "dnode_hold_alloc_lock_retry",	KSTAT_DATA_UINT64 },
50 	{ "dnode_hold_alloc_lock_misses",	KSTAT_DATA_UINT64 },
51 	{ "dnode_hold_alloc_type_none",		KSTAT_DATA_UINT64 },
52 	{ "dnode_hold_free_hits",		KSTAT_DATA_UINT64 },
53 	{ "dnode_hold_free_misses",		KSTAT_DATA_UINT64 },
54 	{ "dnode_hold_free_lock_misses",	KSTAT_DATA_UINT64 },
55 	{ "dnode_hold_free_lock_retry",		KSTAT_DATA_UINT64 },
56 	{ "dnode_hold_free_overflow",		KSTAT_DATA_UINT64 },
57 	{ "dnode_hold_free_refcount",		KSTAT_DATA_UINT64 },
58 	{ "dnode_hold_free_txg",		KSTAT_DATA_UINT64 },
59 	{ "dnode_free_interior_lock_retry",	KSTAT_DATA_UINT64 },
60 	{ "dnode_allocate",			KSTAT_DATA_UINT64 },
61 	{ "dnode_reallocate",			KSTAT_DATA_UINT64 },
62 	{ "dnode_buf_evict",			KSTAT_DATA_UINT64 },
63 	{ "dnode_alloc_next_chunk",		KSTAT_DATA_UINT64 },
64 	{ "dnode_alloc_race",			KSTAT_DATA_UINT64 },
65 	{ "dnode_alloc_next_block",		KSTAT_DATA_UINT64 },
66 	{ "dnode_move_invalid",			KSTAT_DATA_UINT64 },
67 	{ "dnode_move_recheck1",		KSTAT_DATA_UINT64 },
68 	{ "dnode_move_recheck2",		KSTAT_DATA_UINT64 },
69 	{ "dnode_move_special",			KSTAT_DATA_UINT64 },
70 	{ "dnode_move_handle",			KSTAT_DATA_UINT64 },
71 	{ "dnode_move_rwlock",			KSTAT_DATA_UINT64 },
72 	{ "dnode_move_active",			KSTAT_DATA_UINT64 },
73 };
74 
75 static kstat_t *dnode_ksp;
76 static kmem_cache_t *dnode_cache;
77 
78 static dnode_phys_t dnode_phys_zero;
79 
80 int zfs_default_bs = SPA_MINBLOCKSHIFT;
81 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
82 
83 #ifdef	_KERNEL
84 static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
85 #endif	/* _KERNEL */
86 
87 static int
88 dbuf_compare(const void *x1, const void *x2)
89 {
90 	const dmu_buf_impl_t *d1 = x1;
91 	const dmu_buf_impl_t *d2 = x2;
92 
93 	if (d1->db_level < d2->db_level) {
94 		return (-1);
95 	}
96 	if (d1->db_level > d2->db_level) {
97 		return (1);
98 	}
99 
100 	if (d1->db_blkid < d2->db_blkid) {
101 		return (-1);
102 	}
103 	if (d1->db_blkid > d2->db_blkid) {
104 		return (1);
105 	}
106 
107 	if (d1->db_state == DB_SEARCH) {
108 		ASSERT3S(d2->db_state, !=, DB_SEARCH);
109 		return (-1);
110 	} else if (d2->db_state == DB_SEARCH) {
111 		ASSERT3S(d1->db_state, !=, DB_SEARCH);
112 		return (1);
113 	}
114 
115 	if ((uintptr_t)d1 < (uintptr_t)d2) {
116 		return (-1);
117 	}
118 	if ((uintptr_t)d1 > (uintptr_t)d2) {
119 		return (1);
120 	}
121 	return (0);
122 }
123 
124 /* ARGSUSED */
125 static int
126 dnode_cons(void *arg, void *unused, int kmflag)
127 {
128 	dnode_t *dn = arg;
129 	int i;
130 
131 	rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
132 	mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
133 	mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
134 	cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
135 
136 	/*
137 	 * Every dbuf has a reference, and dropping a tracked reference is
138 	 * O(number of references), so don't track dn_holds.
139 	 */
140 	zfs_refcount_create_untracked(&dn->dn_holds);
141 	zfs_refcount_create(&dn->dn_tx_holds);
142 	list_link_init(&dn->dn_link);
143 
144 	bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
145 	bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
146 	bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
147 	bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
148 	bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
149 	bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
150 	bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
151 
152 	for (i = 0; i < TXG_SIZE; i++) {
153 		multilist_link_init(&dn->dn_dirty_link[i]);
154 		dn->dn_free_ranges[i] = NULL;
155 		list_create(&dn->dn_dirty_records[i],
156 		    sizeof (dbuf_dirty_record_t),
157 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
158 	}
159 
160 	dn->dn_allocated_txg = 0;
161 	dn->dn_free_txg = 0;
162 	dn->dn_assigned_txg = 0;
163 	dn->dn_dirty_txg = 0;
164 	dn->dn_dirtyctx = 0;
165 	dn->dn_dirtyctx_firstset = NULL;
166 	dn->dn_bonus = NULL;
167 	dn->dn_have_spill = B_FALSE;
168 	dn->dn_zio = NULL;
169 	dn->dn_oldused = 0;
170 	dn->dn_oldflags = 0;
171 	dn->dn_olduid = 0;
172 	dn->dn_oldgid = 0;
173 	dn->dn_newuid = 0;
174 	dn->dn_newgid = 0;
175 	dn->dn_id_flags = 0;
176 
177 	dn->dn_dbufs_count = 0;
178 	avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
179 	    offsetof(dmu_buf_impl_t, db_link));
180 
181 	dn->dn_moved = 0;
182 	return (0);
183 }
184 
185 /* ARGSUSED */
186 static void
187 dnode_dest(void *arg, void *unused)
188 {
189 	int i;
190 	dnode_t *dn = arg;
191 
192 	rw_destroy(&dn->dn_struct_rwlock);
193 	mutex_destroy(&dn->dn_mtx);
194 	mutex_destroy(&dn->dn_dbufs_mtx);
195 	cv_destroy(&dn->dn_notxholds);
196 	zfs_refcount_destroy(&dn->dn_holds);
197 	zfs_refcount_destroy(&dn->dn_tx_holds);
198 	ASSERT(!list_link_active(&dn->dn_link));
199 
200 	for (i = 0; i < TXG_SIZE; i++) {
201 		ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
202 		ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
203 		list_destroy(&dn->dn_dirty_records[i]);
204 		ASSERT0(dn->dn_next_nblkptr[i]);
205 		ASSERT0(dn->dn_next_nlevels[i]);
206 		ASSERT0(dn->dn_next_indblkshift[i]);
207 		ASSERT0(dn->dn_next_bonustype[i]);
208 		ASSERT0(dn->dn_rm_spillblk[i]);
209 		ASSERT0(dn->dn_next_bonuslen[i]);
210 		ASSERT0(dn->dn_next_blksz[i]);
211 	}
212 
213 	ASSERT0(dn->dn_allocated_txg);
214 	ASSERT0(dn->dn_free_txg);
215 	ASSERT0(dn->dn_assigned_txg);
216 	ASSERT0(dn->dn_dirty_txg);
217 	ASSERT0(dn->dn_dirtyctx);
218 	ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
219 	ASSERT3P(dn->dn_bonus, ==, NULL);
220 	ASSERT(!dn->dn_have_spill);
221 	ASSERT3P(dn->dn_zio, ==, NULL);
222 	ASSERT0(dn->dn_oldused);
223 	ASSERT0(dn->dn_oldflags);
224 	ASSERT0(dn->dn_olduid);
225 	ASSERT0(dn->dn_oldgid);
226 	ASSERT0(dn->dn_newuid);
227 	ASSERT0(dn->dn_newgid);
228 	ASSERT0(dn->dn_id_flags);
229 
230 	ASSERT0(dn->dn_dbufs_count);
231 	avl_destroy(&dn->dn_dbufs);
232 }
233 
234 void
235 dnode_init(void)
236 {
237 	ASSERT(dnode_cache == NULL);
238 	dnode_cache = kmem_cache_create("dnode_t",
239 	    sizeof (dnode_t),
240 	    0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
241 #ifdef	_KERNEL
242 	kmem_cache_set_move(dnode_cache, dnode_move);
243 
244 	dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
245 	    KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
246 	    KSTAT_FLAG_VIRTUAL);
247 	if (dnode_ksp != NULL) {
248 		dnode_ksp->ks_data = &dnode_stats;
249 		kstat_install(dnode_ksp);
250 	}
251 #endif	/* _KERNEL */
252 }
253 
254 void
255 dnode_fini(void)
256 {
257 	if (dnode_ksp != NULL) {
258 		kstat_delete(dnode_ksp);
259 		dnode_ksp = NULL;
260 	}
261 
262 	kmem_cache_destroy(dnode_cache);
263 	dnode_cache = NULL;
264 }
265 
266 
267 #ifdef ZFS_DEBUG
268 void
269 dnode_verify(dnode_t *dn)
270 {
271 	int drop_struct_lock = FALSE;
272 
273 	ASSERT(dn->dn_phys);
274 	ASSERT(dn->dn_objset);
275 	ASSERT(dn->dn_handle->dnh_dnode == dn);
276 
277 	ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
278 
279 	if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
280 		return;
281 
282 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
283 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
284 		drop_struct_lock = TRUE;
285 	}
286 	if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
287 		int i;
288 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
289 		ASSERT3U(dn->dn_indblkshift, >=, 0);
290 		ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
291 		if (dn->dn_datablkshift) {
292 			ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
293 			ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
294 			ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
295 		}
296 		ASSERT3U(dn->dn_nlevels, <=, 30);
297 		ASSERT(DMU_OT_IS_VALID(dn->dn_type));
298 		ASSERT3U(dn->dn_nblkptr, >=, 1);
299 		ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
300 		ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
301 		ASSERT3U(dn->dn_datablksz, ==,
302 		    dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
303 		ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
304 		ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
305 		    dn->dn_bonuslen, <=, max_bonuslen);
306 		for (i = 0; i < TXG_SIZE; i++) {
307 			ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
308 		}
309 	}
310 	if (dn->dn_phys->dn_type != DMU_OT_NONE)
311 		ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
312 	ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
313 	if (dn->dn_dbuf != NULL) {
314 		ASSERT3P(dn->dn_phys, ==,
315 		    (dnode_phys_t *)dn->dn_dbuf->db.db_data +
316 		    (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
317 	}
318 	if (drop_struct_lock)
319 		rw_exit(&dn->dn_struct_rwlock);
320 }
321 #endif
322 
323 void
324 dnode_byteswap(dnode_phys_t *dnp)
325 {
326 	uint64_t *buf64 = (void*)&dnp->dn_blkptr;
327 	int i;
328 
329 	if (dnp->dn_type == DMU_OT_NONE) {
330 		bzero(dnp, sizeof (dnode_phys_t));
331 		return;
332 	}
333 
334 	dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
335 	dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
336 	dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
337 	dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
338 	dnp->dn_used = BSWAP_64(dnp->dn_used);
339 
340 	/*
341 	 * dn_nblkptr is only one byte, so it's OK to read it in either
342 	 * byte order.  We can't read dn_bouslen.
343 	 */
344 	ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
345 	ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
346 	for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
347 		buf64[i] = BSWAP_64(buf64[i]);
348 
349 	/*
350 	 * OK to check dn_bonuslen for zero, because it won't matter if
351 	 * we have the wrong byte order.  This is necessary because the
352 	 * dnode dnode is smaller than a regular dnode.
353 	 */
354 	if (dnp->dn_bonuslen != 0) {
355 		/*
356 		 * Note that the bonus length calculated here may be
357 		 * longer than the actual bonus buffer.  This is because
358 		 * we always put the bonus buffer after the last block
359 		 * pointer (instead of packing it against the end of the
360 		 * dnode buffer).
361 		 */
362 		int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
363 		int slots = dnp->dn_extra_slots + 1;
364 		size_t len = DN_SLOTS_TO_BONUSLEN(slots) - off;
365 		ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
366 		dmu_object_byteswap_t byteswap =
367 		    DMU_OT_BYTESWAP(dnp->dn_bonustype);
368 		dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
369 	}
370 
371 	/* Swap SPILL block if we have one */
372 	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
373 		byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
374 
375 }
376 
377 void
378 dnode_buf_byteswap(void *vbuf, size_t size)
379 {
380 	int i = 0;
381 
382 	ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
383 	ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
384 
385 	while (i < size) {
386 		dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
387 		dnode_byteswap(dnp);
388 
389 		i += DNODE_MIN_SIZE;
390 		if (dnp->dn_type != DMU_OT_NONE)
391 			i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
392 	}
393 }
394 
395 void
396 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
397 {
398 	ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
399 
400 	dnode_setdirty(dn, tx);
401 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
402 	ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
403 	    (dn->dn_nblkptr-1) * sizeof (blkptr_t));
404 	dn->dn_bonuslen = newsize;
405 	if (newsize == 0)
406 		dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
407 	else
408 		dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
409 	rw_exit(&dn->dn_struct_rwlock);
410 }
411 
412 void
413 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
414 {
415 	ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
416 	dnode_setdirty(dn, tx);
417 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
418 	dn->dn_bonustype = newtype;
419 	dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
420 	rw_exit(&dn->dn_struct_rwlock);
421 }
422 
423 void
424 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
425 {
426 	ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
427 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
428 	dnode_setdirty(dn, tx);
429 	dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
430 	dn->dn_have_spill = B_FALSE;
431 }
432 
433 static void
434 dnode_setdblksz(dnode_t *dn, int size)
435 {
436 	ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
437 	ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
438 	ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
439 	ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
440 	    1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
441 	dn->dn_datablksz = size;
442 	dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
443 	dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
444 }
445 
446 static dnode_t *
447 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
448     uint64_t object, dnode_handle_t *dnh)
449 {
450 	dnode_t *dn;
451 
452 	dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
453 #ifdef _KERNEL
454 	ASSERT(!POINTER_IS_VALID(dn->dn_objset));
455 #endif /* _KERNEL */
456 	dn->dn_moved = 0;
457 
458 	/*
459 	 * Defer setting dn_objset until the dnode is ready to be a candidate
460 	 * for the dnode_move() callback.
461 	 */
462 	dn->dn_object = object;
463 	dn->dn_dbuf = db;
464 	dn->dn_handle = dnh;
465 	dn->dn_phys = dnp;
466 
467 	if (dnp->dn_datablkszsec) {
468 		dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
469 	} else {
470 		dn->dn_datablksz = 0;
471 		dn->dn_datablkszsec = 0;
472 		dn->dn_datablkshift = 0;
473 	}
474 	dn->dn_indblkshift = dnp->dn_indblkshift;
475 	dn->dn_nlevels = dnp->dn_nlevels;
476 	dn->dn_type = dnp->dn_type;
477 	dn->dn_nblkptr = dnp->dn_nblkptr;
478 	dn->dn_checksum = dnp->dn_checksum;
479 	dn->dn_compress = dnp->dn_compress;
480 	dn->dn_bonustype = dnp->dn_bonustype;
481 	dn->dn_bonuslen = dnp->dn_bonuslen;
482 	dn->dn_num_slots = dnp->dn_extra_slots + 1;
483 	dn->dn_maxblkid = dnp->dn_maxblkid;
484 	dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
485 	dn->dn_id_flags = 0;
486 
487 	dmu_zfetch_init(&dn->dn_zfetch, dn);
488 
489 	ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
490 	ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
491 	ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
492 
493 	mutex_enter(&os->os_lock);
494 
495 	/*
496 	 * Exclude special dnodes from os_dnodes so an empty os_dnodes
497 	 * signifies that the special dnodes have no references from
498 	 * their children (the entries in os_dnodes).  This allows
499 	 * dnode_destroy() to easily determine if the last child has
500 	 * been removed and then complete eviction of the objset.
501 	 */
502 	if (!DMU_OBJECT_IS_SPECIAL(object))
503 		list_insert_head(&os->os_dnodes, dn);
504 	membar_producer();
505 
506 	/*
507 	 * Everything else must be valid before assigning dn_objset
508 	 * makes the dnode eligible for dnode_move().
509 	 */
510 	dn->dn_objset = os;
511 
512 	dnh->dnh_dnode = dn;
513 	mutex_exit(&os->os_lock);
514 
515 	arc_space_consume(sizeof (dnode_t), ARC_SPACE_OTHER);
516 
517 	return (dn);
518 }
519 
520 /*
521  * Caller must be holding the dnode handle, which is released upon return.
522  */
523 static void
524 dnode_destroy(dnode_t *dn)
525 {
526 	objset_t *os = dn->dn_objset;
527 	boolean_t complete_os_eviction = B_FALSE;
528 
529 	ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
530 
531 	mutex_enter(&os->os_lock);
532 	POINTER_INVALIDATE(&dn->dn_objset);
533 	if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
534 		list_remove(&os->os_dnodes, dn);
535 		complete_os_eviction =
536 		    list_is_empty(&os->os_dnodes) &&
537 		    list_link_active(&os->os_evicting_node);
538 	}
539 	mutex_exit(&os->os_lock);
540 
541 	/* the dnode can no longer move, so we can release the handle */
542 	if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
543 		zrl_remove(&dn->dn_handle->dnh_zrlock);
544 
545 	dn->dn_allocated_txg = 0;
546 	dn->dn_free_txg = 0;
547 	dn->dn_assigned_txg = 0;
548 	dn->dn_dirty_txg = 0;
549 
550 	dn->dn_dirtyctx = 0;
551 	if (dn->dn_dirtyctx_firstset != NULL) {
552 		kmem_free(dn->dn_dirtyctx_firstset, 1);
553 		dn->dn_dirtyctx_firstset = NULL;
554 	}
555 	if (dn->dn_bonus != NULL) {
556 		mutex_enter(&dn->dn_bonus->db_mtx);
557 		dbuf_destroy(dn->dn_bonus);
558 		dn->dn_bonus = NULL;
559 	}
560 	dn->dn_zio = NULL;
561 
562 	dn->dn_have_spill = B_FALSE;
563 	dn->dn_oldused = 0;
564 	dn->dn_oldflags = 0;
565 	dn->dn_olduid = 0;
566 	dn->dn_oldgid = 0;
567 	dn->dn_newuid = 0;
568 	dn->dn_newgid = 0;
569 	dn->dn_id_flags = 0;
570 
571 	dmu_zfetch_fini(&dn->dn_zfetch);
572 	kmem_cache_free(dnode_cache, dn);
573 	arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER);
574 
575 	if (complete_os_eviction)
576 		dmu_objset_evict_done(os);
577 }
578 
579 void
580 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
581     dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
582 {
583 	int i;
584 
585 	ASSERT3U(dn_slots, >, 0);
586 	ASSERT3U(dn_slots << DNODE_SHIFT, <=,
587 	    spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
588 	ASSERT3U(blocksize, <=,
589 	    spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
590 	if (blocksize == 0)
591 		blocksize = 1 << zfs_default_bs;
592 	else
593 		blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
594 
595 	if (ibs == 0)
596 		ibs = zfs_default_ibs;
597 
598 	ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
599 
600 	dprintf("os=%p obj=%" PRIu64 " txg=%" PRIu64
601 	    " blocksize=%d ibs=%d dn_slots=%d\n",
602 	    dn->dn_objset, dn->dn_object, tx->tx_txg, blocksize, ibs, dn_slots);
603 	DNODE_STAT_BUMP(dnode_allocate);
604 
605 	ASSERT(dn->dn_type == DMU_OT_NONE);
606 	ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
607 	ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
608 	ASSERT(ot != DMU_OT_NONE);
609 	ASSERT(DMU_OT_IS_VALID(ot));
610 	ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
611 	    (bonustype == DMU_OT_SA && bonuslen == 0) ||
612 	    (bonustype != DMU_OT_NONE && bonuslen != 0));
613 	ASSERT(DMU_OT_IS_VALID(bonustype));
614 	ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
615 	ASSERT(dn->dn_type == DMU_OT_NONE);
616 	ASSERT0(dn->dn_maxblkid);
617 	ASSERT0(dn->dn_allocated_txg);
618 	ASSERT0(dn->dn_dirty_txg);
619 	ASSERT0(dn->dn_assigned_txg);
620 	ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
621 	ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
622 	ASSERT(avl_is_empty(&dn->dn_dbufs));
623 
624 	for (i = 0; i < TXG_SIZE; i++) {
625 		ASSERT0(dn->dn_next_nblkptr[i]);
626 		ASSERT0(dn->dn_next_nlevels[i]);
627 		ASSERT0(dn->dn_next_indblkshift[i]);
628 		ASSERT0(dn->dn_next_bonuslen[i]);
629 		ASSERT0(dn->dn_next_bonustype[i]);
630 		ASSERT0(dn->dn_rm_spillblk[i]);
631 		ASSERT0(dn->dn_next_blksz[i]);
632 		ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
633 		ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
634 		ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
635 	}
636 
637 	dn->dn_type = ot;
638 	dnode_setdblksz(dn, blocksize);
639 	dn->dn_indblkshift = ibs;
640 	dn->dn_nlevels = 1;
641 	dn->dn_num_slots = dn_slots;
642 	if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
643 		dn->dn_nblkptr = 1;
644 	else {
645 		dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
646 		    1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
647 		    SPA_BLKPTRSHIFT));
648 	}
649 
650 	dn->dn_bonustype = bonustype;
651 	dn->dn_bonuslen = bonuslen;
652 	dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
653 	dn->dn_compress = ZIO_COMPRESS_INHERIT;
654 	dn->dn_dirtyctx = 0;
655 
656 	dn->dn_free_txg = 0;
657 	if (dn->dn_dirtyctx_firstset) {
658 		kmem_free(dn->dn_dirtyctx_firstset, 1);
659 		dn->dn_dirtyctx_firstset = NULL;
660 	}
661 
662 	dn->dn_allocated_txg = tx->tx_txg;
663 	dn->dn_id_flags = 0;
664 
665 	dnode_setdirty(dn, tx);
666 	dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
667 	dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
668 	dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
669 	dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
670 }
671 
672 void
673 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
674     dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
675 {
676 	int nblkptr;
677 
678 	ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
679 	ASSERT3U(blocksize, <=,
680 	    spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
681 	ASSERT0(blocksize % SPA_MINBLOCKSIZE);
682 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
683 	ASSERT(tx->tx_txg != 0);
684 	ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
685 	    (bonustype != DMU_OT_NONE && bonuslen != 0) ||
686 	    (bonustype == DMU_OT_SA && bonuslen == 0));
687 	ASSERT(DMU_OT_IS_VALID(bonustype));
688 	ASSERT3U(bonuslen, <=,
689 	    DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
690 	ASSERT3U(bonuslen, <=, DN_BONUS_SIZE(dn_slots << DNODE_SHIFT));
691 
692 	dnode_free_interior_slots(dn);
693 	DNODE_STAT_BUMP(dnode_reallocate);
694 
695 	/* clean up any unreferenced dbufs */
696 	dnode_evict_dbufs(dn);
697 
698 	dn->dn_id_flags = 0;
699 
700 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
701 	dnode_setdirty(dn, tx);
702 	if (dn->dn_datablksz != blocksize) {
703 		/* change blocksize */
704 		ASSERT(dn->dn_maxblkid == 0 &&
705 		    (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
706 		    dnode_block_freed(dn, 0)));
707 		dnode_setdblksz(dn, blocksize);
708 		dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
709 	}
710 	if (dn->dn_bonuslen != bonuslen)
711 		dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
712 
713 	if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
714 		nblkptr = 1;
715 	else
716 		nblkptr = MIN(DN_MAX_NBLKPTR,
717 		    1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
718 		    SPA_BLKPTRSHIFT));
719 	if (dn->dn_bonustype != bonustype)
720 		dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
721 	if (dn->dn_nblkptr != nblkptr)
722 		dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
723 	if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
724 		dbuf_rm_spill(dn, tx);
725 		dnode_rm_spill(dn, tx);
726 	}
727 	rw_exit(&dn->dn_struct_rwlock);
728 
729 	/* change type */
730 	dn->dn_type = ot;
731 
732 	/* change bonus size and type */
733 	mutex_enter(&dn->dn_mtx);
734 	dn->dn_bonustype = bonustype;
735 	dn->dn_bonuslen = bonuslen;
736 	dn->dn_num_slots = dn_slots;
737 	dn->dn_nblkptr = nblkptr;
738 	dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
739 	dn->dn_compress = ZIO_COMPRESS_INHERIT;
740 	ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
741 
742 	/* fix up the bonus db_size */
743 	if (dn->dn_bonus) {
744 		dn->dn_bonus->db.db_size =
745 		    DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
746 		    (dn->dn_nblkptr - 1) * sizeof (blkptr_t);
747 		ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
748 	}
749 
750 	dn->dn_allocated_txg = tx->tx_txg;
751 	mutex_exit(&dn->dn_mtx);
752 }
753 
754 #ifdef	_KERNEL
755 static void
756 dnode_move_impl(dnode_t *odn, dnode_t *ndn)
757 {
758 	int i;
759 
760 	ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
761 	ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
762 	ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
763 	ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock));
764 
765 	/* Copy fields. */
766 	ndn->dn_objset = odn->dn_objset;
767 	ndn->dn_object = odn->dn_object;
768 	ndn->dn_dbuf = odn->dn_dbuf;
769 	ndn->dn_handle = odn->dn_handle;
770 	ndn->dn_phys = odn->dn_phys;
771 	ndn->dn_type = odn->dn_type;
772 	ndn->dn_bonuslen = odn->dn_bonuslen;
773 	ndn->dn_bonustype = odn->dn_bonustype;
774 	ndn->dn_nblkptr = odn->dn_nblkptr;
775 	ndn->dn_checksum = odn->dn_checksum;
776 	ndn->dn_compress = odn->dn_compress;
777 	ndn->dn_nlevels = odn->dn_nlevels;
778 	ndn->dn_indblkshift = odn->dn_indblkshift;
779 	ndn->dn_datablkshift = odn->dn_datablkshift;
780 	ndn->dn_datablkszsec = odn->dn_datablkszsec;
781 	ndn->dn_datablksz = odn->dn_datablksz;
782 	ndn->dn_maxblkid = odn->dn_maxblkid;
783 	ndn->dn_num_slots = odn->dn_num_slots;
784 	bcopy(&odn->dn_next_type[0], &ndn->dn_next_type[0],
785 	    sizeof (odn->dn_next_type));
786 	bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
787 	    sizeof (odn->dn_next_nblkptr));
788 	bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
789 	    sizeof (odn->dn_next_nlevels));
790 	bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
791 	    sizeof (odn->dn_next_indblkshift));
792 	bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
793 	    sizeof (odn->dn_next_bonustype));
794 	bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
795 	    sizeof (odn->dn_rm_spillblk));
796 	bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
797 	    sizeof (odn->dn_next_bonuslen));
798 	bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
799 	    sizeof (odn->dn_next_blksz));
800 	for (i = 0; i < TXG_SIZE; i++) {
801 		list_move_tail(&ndn->dn_dirty_records[i],
802 		    &odn->dn_dirty_records[i]);
803 	}
804 	bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
805 	    sizeof (odn->dn_free_ranges));
806 	ndn->dn_allocated_txg = odn->dn_allocated_txg;
807 	ndn->dn_free_txg = odn->dn_free_txg;
808 	ndn->dn_assigned_txg = odn->dn_assigned_txg;
809 	ndn->dn_dirty_txg = odn->dn_dirty_txg;
810 	ndn->dn_dirtyctx = odn->dn_dirtyctx;
811 	ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
812 	ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
813 	zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
814 	ASSERT(avl_is_empty(&ndn->dn_dbufs));
815 	avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
816 	ndn->dn_dbufs_count = odn->dn_dbufs_count;
817 	ndn->dn_bonus = odn->dn_bonus;
818 	ndn->dn_have_spill = odn->dn_have_spill;
819 	ndn->dn_zio = odn->dn_zio;
820 	ndn->dn_oldused = odn->dn_oldused;
821 	ndn->dn_oldflags = odn->dn_oldflags;
822 	ndn->dn_olduid = odn->dn_olduid;
823 	ndn->dn_oldgid = odn->dn_oldgid;
824 	ndn->dn_newuid = odn->dn_newuid;
825 	ndn->dn_newgid = odn->dn_newgid;
826 	ndn->dn_id_flags = odn->dn_id_flags;
827 	dmu_zfetch_init(&ndn->dn_zfetch, NULL);
828 	list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
829 	ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
830 
831 	/*
832 	 * Update back pointers. Updating the handle fixes the back pointer of
833 	 * every descendant dbuf as well as the bonus dbuf.
834 	 */
835 	ASSERT(ndn->dn_handle->dnh_dnode == odn);
836 	ndn->dn_handle->dnh_dnode = ndn;
837 	if (ndn->dn_zfetch.zf_dnode == odn) {
838 		ndn->dn_zfetch.zf_dnode = ndn;
839 	}
840 
841 	/*
842 	 * Invalidate the original dnode by clearing all of its back pointers.
843 	 */
844 	odn->dn_dbuf = NULL;
845 	odn->dn_handle = NULL;
846 	avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
847 	    offsetof(dmu_buf_impl_t, db_link));
848 	odn->dn_dbufs_count = 0;
849 	odn->dn_bonus = NULL;
850 	odn->dn_zfetch.zf_dnode = NULL;
851 
852 	/*
853 	 * Set the low bit of the objset pointer to ensure that dnode_move()
854 	 * recognizes the dnode as invalid in any subsequent callback.
855 	 */
856 	POINTER_INVALIDATE(&odn->dn_objset);
857 
858 	/*
859 	 * Satisfy the destructor.
860 	 */
861 	for (i = 0; i < TXG_SIZE; i++) {
862 		list_create(&odn->dn_dirty_records[i],
863 		    sizeof (dbuf_dirty_record_t),
864 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
865 		odn->dn_free_ranges[i] = NULL;
866 		odn->dn_next_nlevels[i] = 0;
867 		odn->dn_next_indblkshift[i] = 0;
868 		odn->dn_next_bonustype[i] = 0;
869 		odn->dn_rm_spillblk[i] = 0;
870 		odn->dn_next_bonuslen[i] = 0;
871 		odn->dn_next_blksz[i] = 0;
872 	}
873 	odn->dn_allocated_txg = 0;
874 	odn->dn_free_txg = 0;
875 	odn->dn_assigned_txg = 0;
876 	odn->dn_dirty_txg = 0;
877 	odn->dn_dirtyctx = 0;
878 	odn->dn_dirtyctx_firstset = NULL;
879 	odn->dn_have_spill = B_FALSE;
880 	odn->dn_zio = NULL;
881 	odn->dn_oldused = 0;
882 	odn->dn_oldflags = 0;
883 	odn->dn_olduid = 0;
884 	odn->dn_oldgid = 0;
885 	odn->dn_newuid = 0;
886 	odn->dn_newgid = 0;
887 	odn->dn_id_flags = 0;
888 
889 	/*
890 	 * Mark the dnode.
891 	 */
892 	ndn->dn_moved = 1;
893 	odn->dn_moved = (uint8_t)-1;
894 }
895 
896 /*ARGSUSED*/
897 static kmem_cbrc_t
898 dnode_move(void *buf, void *newbuf, size_t size, void *arg)
899 {
900 	dnode_t *odn = buf, *ndn = newbuf;
901 	objset_t *os;
902 	int64_t refcount;
903 	uint32_t dbufs;
904 
905 	/*
906 	 * The dnode is on the objset's list of known dnodes if the objset
907 	 * pointer is valid. We set the low bit of the objset pointer when
908 	 * freeing the dnode to invalidate it, and the memory patterns written
909 	 * by kmem (baddcafe and deadbeef) set at least one of the two low bits.
910 	 * A newly created dnode sets the objset pointer last of all to indicate
911 	 * that the dnode is known and in a valid state to be moved by this
912 	 * function.
913 	 */
914 	os = odn->dn_objset;
915 	if (!POINTER_IS_VALID(os)) {
916 		DNODE_STAT_BUMP(dnode_move_invalid);
917 		return (KMEM_CBRC_DONT_KNOW);
918 	}
919 
920 	/*
921 	 * Ensure that the objset does not go away during the move.
922 	 */
923 	rw_enter(&os_lock, RW_WRITER);
924 	if (os != odn->dn_objset) {
925 		rw_exit(&os_lock);
926 		DNODE_STAT_BUMP(dnode_move_recheck1);
927 		return (KMEM_CBRC_DONT_KNOW);
928 	}
929 
930 	/*
931 	 * If the dnode is still valid, then so is the objset. We know that no
932 	 * valid objset can be freed while we hold os_lock, so we can safely
933 	 * ensure that the objset remains in use.
934 	 */
935 	mutex_enter(&os->os_lock);
936 
937 	/*
938 	 * Recheck the objset pointer in case the dnode was removed just before
939 	 * acquiring the lock.
940 	 */
941 	if (os != odn->dn_objset) {
942 		mutex_exit(&os->os_lock);
943 		rw_exit(&os_lock);
944 		DNODE_STAT_BUMP(dnode_move_recheck2);
945 		return (KMEM_CBRC_DONT_KNOW);
946 	}
947 
948 	/*
949 	 * At this point we know that as long as we hold os->os_lock, the dnode
950 	 * cannot be freed and fields within the dnode can be safely accessed.
951 	 * The objset listing this dnode cannot go away as long as this dnode is
952 	 * on its list.
953 	 */
954 	rw_exit(&os_lock);
955 	if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
956 		mutex_exit(&os->os_lock);
957 		DNODE_STAT_BUMP(dnode_move_special);
958 		return (KMEM_CBRC_NO);
959 	}
960 	ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
961 
962 	/*
963 	 * Lock the dnode handle to prevent the dnode from obtaining any new
964 	 * holds. This also prevents the descendant dbufs and the bonus dbuf
965 	 * from accessing the dnode, so that we can discount their holds. The
966 	 * handle is safe to access because we know that while the dnode cannot
967 	 * go away, neither can its handle. Once we hold dnh_zrlock, we can
968 	 * safely move any dnode referenced only by dbufs.
969 	 */
970 	if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
971 		mutex_exit(&os->os_lock);
972 		DNODE_STAT_BUMP(dnode_move_handle);
973 		return (KMEM_CBRC_LATER);
974 	}
975 
976 	/*
977 	 * Ensure a consistent view of the dnode's holds and the dnode's dbufs.
978 	 * We need to guarantee that there is a hold for every dbuf in order to
979 	 * determine whether the dnode is actively referenced. Falsely matching
980 	 * a dbuf to an active hold would lead to an unsafe move. It's possible
981 	 * that a thread already having an active dnode hold is about to add a
982 	 * dbuf, and we can't compare hold and dbuf counts while the add is in
983 	 * progress.
984 	 */
985 	if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
986 		zrl_exit(&odn->dn_handle->dnh_zrlock);
987 		mutex_exit(&os->os_lock);
988 		DNODE_STAT_BUMP(dnode_move_rwlock);
989 		return (KMEM_CBRC_LATER);
990 	}
991 
992 	/*
993 	 * A dbuf may be removed (evicted) without an active dnode hold. In that
994 	 * case, the dbuf count is decremented under the handle lock before the
995 	 * dbuf's hold is released. This order ensures that if we count the hold
996 	 * after the dbuf is removed but before its hold is released, we will
997 	 * treat the unmatched hold as active and exit safely. If we count the
998 	 * hold before the dbuf is removed, the hold is discounted, and the
999 	 * removal is blocked until the move completes.
1000 	 */
1001 	refcount = zfs_refcount_count(&odn->dn_holds);
1002 	ASSERT(refcount >= 0);
1003 	dbufs = odn->dn_dbufs_count;
1004 
1005 	/* We can't have more dbufs than dnode holds. */
1006 	ASSERT3U(dbufs, <=, refcount);
1007 	DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
1008 	    uint32_t, dbufs);
1009 
1010 	if (refcount > dbufs) {
1011 		rw_exit(&odn->dn_struct_rwlock);
1012 		zrl_exit(&odn->dn_handle->dnh_zrlock);
1013 		mutex_exit(&os->os_lock);
1014 		DNODE_STAT_BUMP(dnode_move_active);
1015 		return (KMEM_CBRC_LATER);
1016 	}
1017 
1018 	rw_exit(&odn->dn_struct_rwlock);
1019 
1020 	/*
1021 	 * At this point we know that anyone with a hold on the dnode is not
1022 	 * actively referencing it. The dnode is known and in a valid state to
1023 	 * move. We're holding the locks needed to execute the critical section.
1024 	 */
1025 	dnode_move_impl(odn, ndn);
1026 
1027 	list_link_replace(&odn->dn_link, &ndn->dn_link);
1028 	/* If the dnode was safe to move, the refcount cannot have changed. */
1029 	ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
1030 	ASSERT(dbufs == ndn->dn_dbufs_count);
1031 	zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
1032 	mutex_exit(&os->os_lock);
1033 
1034 	return (KMEM_CBRC_YES);
1035 }
1036 #endif	/* _KERNEL */
1037 
1038 static void
1039 dnode_slots_hold(dnode_children_t *children, int idx, int slots)
1040 {
1041 	ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1042 
1043 	for (int i = idx; i < idx + slots; i++) {
1044 		dnode_handle_t *dnh = &children->dnc_children[i];
1045 		zrl_add(&dnh->dnh_zrlock);
1046 	}
1047 }
1048 
1049 static void
1050 dnode_slots_rele(dnode_children_t *children, int idx, int slots)
1051 {
1052 	ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1053 
1054 	for (int i = idx; i < idx + slots; i++) {
1055 		dnode_handle_t *dnh = &children->dnc_children[i];
1056 
1057 		if (zrl_is_locked(&dnh->dnh_zrlock))
1058 			zrl_exit(&dnh->dnh_zrlock);
1059 		else
1060 			zrl_remove(&dnh->dnh_zrlock);
1061 	}
1062 }
1063 
1064 static int
1065 dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
1066 {
1067 	ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1068 
1069 	for (int i = idx; i < idx + slots; i++) {
1070 		dnode_handle_t *dnh = &children->dnc_children[i];
1071 
1072 		if (!zrl_tryenter(&dnh->dnh_zrlock)) {
1073 			for (int j = idx; j < i; j++) {
1074 				dnh = &children->dnc_children[j];
1075 				zrl_exit(&dnh->dnh_zrlock);
1076 			}
1077 
1078 			return (0);
1079 		}
1080 	}
1081 
1082 	return (1);
1083 }
1084 
1085 static void
1086 dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
1087 {
1088 	ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1089 
1090 	for (int i = idx; i < idx + slots; i++) {
1091 		dnode_handle_t *dnh = &children->dnc_children[i];
1092 		dnh->dnh_dnode = ptr;
1093 	}
1094 }
1095 
1096 static boolean_t
1097 dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
1098 {
1099 	ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1100 
1101 	/*
1102 	 * If all dnode slots are either already free or
1103 	 * evictable return B_TRUE.
1104 	 */
1105 	for (int i = idx; i < idx + slots; i++) {
1106 		dnode_handle_t *dnh = &children->dnc_children[i];
1107 		dnode_t *dn = dnh->dnh_dnode;
1108 
1109 		if (dn == DN_SLOT_FREE) {
1110 			continue;
1111 		} else if (DN_SLOT_IS_PTR(dn)) {
1112 			mutex_enter(&dn->dn_mtx);
1113 			boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
1114 			    zfs_refcount_is_zero(&dn->dn_holds) &&
1115 			    !DNODE_IS_DIRTY(dn));
1116 			mutex_exit(&dn->dn_mtx);
1117 
1118 			if (!can_free)
1119 				return (B_FALSE);
1120 			else
1121 				continue;
1122 		} else {
1123 			return (B_FALSE);
1124 		}
1125 	}
1126 
1127 	return (B_TRUE);
1128 }
1129 
1130 static void
1131 dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
1132 {
1133 	ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1134 
1135 	for (int i = idx; i < idx + slots; i++) {
1136 		dnode_handle_t *dnh = &children->dnc_children[i];
1137 
1138 		ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
1139 
1140 		if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1141 			ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
1142 			dnode_destroy(dnh->dnh_dnode);
1143 			dnh->dnh_dnode = DN_SLOT_FREE;
1144 		}
1145 	}
1146 }
1147 
1148 void
1149 dnode_free_interior_slots(dnode_t *dn)
1150 {
1151 	dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
1152 	int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
1153 	int idx = (dn->dn_object & (epb - 1)) + 1;
1154 	int slots = dn->dn_num_slots - 1;
1155 
1156 	if (slots == 0)
1157 		return;
1158 
1159 	ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
1160 
1161 	while (!dnode_slots_tryenter(children, idx, slots))
1162 		DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
1163 
1164 	dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
1165 	dnode_slots_rele(children, idx, slots);
1166 }
1167 
1168 void
1169 dnode_special_close(dnode_handle_t *dnh)
1170 {
1171 	dnode_t *dn = dnh->dnh_dnode;
1172 
1173 	/*
1174 	 * Wait for final references to the dnode to clear.  This can
1175 	 * only happen if the arc is asynchronously evicting state that
1176 	 * has a hold on this dnode while we are trying to evict this
1177 	 * dnode.
1178 	 */
1179 	while (zfs_refcount_count(&dn->dn_holds) > 0)
1180 		delay(1);
1181 	ASSERT(dn->dn_dbuf == NULL ||
1182 	    dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
1183 	zrl_add(&dnh->dnh_zrlock);
1184 	dnode_destroy(dn); /* implicit zrl_remove() */
1185 	zrl_destroy(&dnh->dnh_zrlock);
1186 	dnh->dnh_dnode = NULL;
1187 }
1188 
1189 void
1190 dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
1191     dnode_handle_t *dnh)
1192 {
1193 	dnode_t *dn;
1194 
1195 	zrl_init(&dnh->dnh_zrlock);
1196 	zrl_tryenter(&dnh->dnh_zrlock);
1197 
1198 	dn = dnode_create(os, dnp, NULL, object, dnh);
1199 	DNODE_VERIFY(dn);
1200 
1201 	zrl_exit(&dnh->dnh_zrlock);
1202 }
1203 
1204 static void
1205 dnode_buf_evict_async(void *dbu)
1206 {
1207 	dnode_children_t *dnc = dbu;
1208 
1209 	DNODE_STAT_BUMP(dnode_buf_evict);
1210 
1211 	for (int i = 0; i < dnc->dnc_count; i++) {
1212 		dnode_handle_t *dnh = &dnc->dnc_children[i];
1213 		dnode_t *dn;
1214 
1215 		/*
1216 		 * The dnode handle lock guards against the dnode moving to
1217 		 * another valid address, so there is no need here to guard
1218 		 * against changes to or from NULL.
1219 		 */
1220 		if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1221 			zrl_destroy(&dnh->dnh_zrlock);
1222 			dnh->dnh_dnode = DN_SLOT_UNINIT;
1223 			continue;
1224 		}
1225 
1226 		zrl_add(&dnh->dnh_zrlock);
1227 		dn = dnh->dnh_dnode;
1228 		/*
1229 		 * If there are holds on this dnode, then there should
1230 		 * be holds on the dnode's containing dbuf as well; thus
1231 		 * it wouldn't be eligible for eviction and this function
1232 		 * would not have been called.
1233 		 */
1234 		ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
1235 		ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
1236 
1237 		dnode_destroy(dn); /* implicit zrl_remove() for first slot */
1238 		zrl_destroy(&dnh->dnh_zrlock);
1239 		dnh->dnh_dnode = DN_SLOT_UNINIT;
1240 	}
1241 	kmem_free(dnc, sizeof (dnode_children_t) +
1242 	    dnc->dnc_count * sizeof (dnode_handle_t));
1243 }
1244 
1245 /*
1246  * When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
1247  * to ensure the hole at the specified object offset is large enough to
1248  * hold the dnode being created. The slots parameter is also used to ensure
1249  * a dnode does not span multiple dnode blocks. In both of these cases, if
1250  * a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
1251  * are only possible when using DNODE_MUST_BE_FREE.
1252  *
1253  * If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
1254  * dnode_hold_impl() will check if the requested dnode is already consumed
1255  * as an extra dnode slot by an large dnode, in which case it returns
1256  * ENOENT.
1257  *
1258  * errors:
1259  * EINVAL - invalid object number or flags.
1260  * ENOSPC - hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
1261  * EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
1262  *        - Refers to a freeing dnode (DNODE_MUST_BE_FREE)
1263  *        - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
1264  * ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
1265  *        - The requested dnode is being freed (DNODE_MUST_BE_ALLOCATED)
1266  * EIO    - i/o error error when reading the meta dnode dbuf.
1267  * succeeds even for free dnodes.
1268  */
1269 int
1270 dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
1271     void *tag, dnode_t **dnp)
1272 {
1273 	int epb, idx, err;
1274 	int drop_struct_lock = FALSE;
1275 	int type;
1276 	uint64_t blk;
1277 	dnode_t *mdn, *dn;
1278 	dmu_buf_impl_t *db;
1279 	dnode_children_t *dnc;
1280 	dnode_phys_t *dn_block;
1281 	dnode_handle_t *dnh;
1282 
1283 	ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
1284 	ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
1285 
1286 	/*
1287 	 * If you are holding the spa config lock as writer, you shouldn't
1288 	 * be asking the DMU to do *anything* unless it's the root pool
1289 	 * which may require us to read from the root filesystem while
1290 	 * holding some (not all) of the locks as writer.
1291 	 */
1292 	ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
1293 	    (spa_is_root(os->os_spa) &&
1294 	    spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
1295 
1296 	ASSERT((flag & DNODE_MUST_BE_ALLOCATED) || (flag & DNODE_MUST_BE_FREE));
1297 
1298 	if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) {
1299 		dn = (object == DMU_USERUSED_OBJECT) ?
1300 		    DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
1301 		if (dn == NULL)
1302 			return (SET_ERROR(ENOENT));
1303 		type = dn->dn_type;
1304 		if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
1305 			return (SET_ERROR(ENOENT));
1306 		if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
1307 			return (SET_ERROR(EEXIST));
1308 		DNODE_VERIFY(dn);
1309 		(void) zfs_refcount_add(&dn->dn_holds, tag);
1310 		*dnp = dn;
1311 		return (0);
1312 	}
1313 
1314 	if (object == 0 || object >= DN_MAX_OBJECT)
1315 		return (SET_ERROR(EINVAL));
1316 
1317 	mdn = DMU_META_DNODE(os);
1318 	ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
1319 
1320 	DNODE_VERIFY(mdn);
1321 
1322 	if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
1323 		rw_enter(&mdn->dn_struct_rwlock, RW_READER);
1324 		drop_struct_lock = TRUE;
1325 	}
1326 
1327 	blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
1328 
1329 	db = dbuf_hold(mdn, blk, FTAG);
1330 	if (drop_struct_lock)
1331 		rw_exit(&mdn->dn_struct_rwlock);
1332 	if (db == NULL) {
1333 		DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
1334 		return (SET_ERROR(EIO));
1335 	}
1336 	err = dbuf_read(db, NULL, DB_RF_CANFAIL);
1337 	if (err) {
1338 		DNODE_STAT_BUMP(dnode_hold_dbuf_read);
1339 		dbuf_rele(db, FTAG);
1340 		return (err);
1341 	}
1342 
1343 	ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
1344 	epb = db->db.db_size >> DNODE_SHIFT;
1345 
1346 	idx = object & (epb - 1);
1347 	dn_block = (dnode_phys_t *)db->db.db_data;
1348 
1349 	ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
1350 	dnc = dmu_buf_get_user(&db->db);
1351 	dnh = NULL;
1352 	if (dnc == NULL) {
1353 		dnode_children_t *winner;
1354 		int skip = 0;
1355 
1356 		dnc = kmem_zalloc(sizeof (dnode_children_t) +
1357 		    epb * sizeof (dnode_handle_t), KM_SLEEP);
1358 		dnc->dnc_count = epb;
1359 		dnh = &dnc->dnc_children[0];
1360 
1361 		/* Initialize dnode slot status from dnode_phys_t */
1362 		for (int i = 0; i < epb; i++) {
1363 			zrl_init(&dnh[i].dnh_zrlock);
1364 
1365 			if (skip) {
1366 				skip--;
1367 				continue;
1368 			}
1369 
1370 			if (dn_block[i].dn_type != DMU_OT_NONE) {
1371 				int interior = dn_block[i].dn_extra_slots;
1372 
1373 				dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
1374 				dnode_set_slots(dnc, i + 1, interior,
1375 				    DN_SLOT_INTERIOR);
1376 				skip = interior;
1377 			} else {
1378 				dnh[i].dnh_dnode = DN_SLOT_FREE;
1379 				skip = 0;
1380 			}
1381 		}
1382 
1383 		dmu_buf_init_user(&dnc->dnc_dbu, NULL,
1384 		    dnode_buf_evict_async, NULL);
1385 		winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
1386 		if (winner != NULL) {
1387 
1388 			for (int i = 0; i < epb; i++)
1389 				zrl_destroy(&dnh[i].dnh_zrlock);
1390 
1391 			kmem_free(dnc, sizeof (dnode_children_t) +
1392 			    epb * sizeof (dnode_handle_t));
1393 			dnc = winner;
1394 		}
1395 	}
1396 
1397 	ASSERT(dnc->dnc_count == epb);
1398 	dn = DN_SLOT_UNINIT;
1399 
1400 	if (flag & DNODE_MUST_BE_ALLOCATED) {
1401 		slots = 1;
1402 
1403 		while (dn == DN_SLOT_UNINIT) {
1404 			dnode_slots_hold(dnc, idx, slots);
1405 			dnh = &dnc->dnc_children[idx];
1406 
1407 			if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1408 				dn = dnh->dnh_dnode;
1409 				break;
1410 			} else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
1411 				DNODE_STAT_BUMP(dnode_hold_alloc_interior);
1412 				dnode_slots_rele(dnc, idx, slots);
1413 				dbuf_rele(db, FTAG);
1414 				return (SET_ERROR(EEXIST));
1415 			} else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
1416 				DNODE_STAT_BUMP(dnode_hold_alloc_misses);
1417 				dnode_slots_rele(dnc, idx, slots);
1418 				dbuf_rele(db, FTAG);
1419 				return (SET_ERROR(ENOENT));
1420 			}
1421 
1422 			dnode_slots_rele(dnc, idx, slots);
1423 			if (!dnode_slots_tryenter(dnc, idx, slots)) {
1424 				DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
1425 				continue;
1426 			}
1427 
1428 			/*
1429 			 * Someone else won the race and called dnode_create()
1430 			 * after we checked DN_SLOT_IS_PTR() above but before
1431 			 * we acquired the lock.
1432 			 */
1433 			if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1434 				DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
1435 				dn = dnh->dnh_dnode;
1436 			} else {
1437 				dn = dnode_create(os, dn_block + idx, db,
1438 				    object, dnh);
1439 			}
1440 		}
1441 
1442 		mutex_enter(&dn->dn_mtx);
1443 		if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg != 0) {
1444 			DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
1445 			mutex_exit(&dn->dn_mtx);
1446 			dnode_slots_rele(dnc, idx, slots);
1447 			dbuf_rele(db, FTAG);
1448 			return (SET_ERROR(ENOENT));
1449 		}
1450 
1451 		DNODE_STAT_BUMP(dnode_hold_alloc_hits);
1452 	} else if (flag & DNODE_MUST_BE_FREE) {
1453 
1454 		if (idx + slots - 1 >= DNODES_PER_BLOCK) {
1455 			DNODE_STAT_BUMP(dnode_hold_free_overflow);
1456 			dbuf_rele(db, FTAG);
1457 			return (SET_ERROR(ENOSPC));
1458 		}
1459 
1460 		while (dn == DN_SLOT_UNINIT) {
1461 			dnode_slots_hold(dnc, idx, slots);
1462 
1463 			if (!dnode_check_slots_free(dnc, idx, slots)) {
1464 				DNODE_STAT_BUMP(dnode_hold_free_misses);
1465 				dnode_slots_rele(dnc, idx, slots);
1466 				dbuf_rele(db, FTAG);
1467 				return (SET_ERROR(ENOSPC));
1468 			}
1469 
1470 			dnode_slots_rele(dnc, idx, slots);
1471 			if (!dnode_slots_tryenter(dnc, idx, slots)) {
1472 				DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
1473 				continue;
1474 			}
1475 
1476 			if (!dnode_check_slots_free(dnc, idx, slots)) {
1477 				DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
1478 				dnode_slots_rele(dnc, idx, slots);
1479 				dbuf_rele(db, FTAG);
1480 				return (SET_ERROR(ENOSPC));
1481 			}
1482 
1483 			/*
1484 			 * Allocated but otherwise free dnodes which would
1485 			 * be in the interior of a multi-slot dnodes need
1486 			 * to be freed.  Single slot dnodes can be safely
1487 			 * re-purposed as a performance optimization.
1488 			 */
1489 			if (slots > 1)
1490 				dnode_reclaim_slots(dnc, idx + 1, slots - 1);
1491 
1492 			dnh = &dnc->dnc_children[idx];
1493 			if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
1494 				dn = dnh->dnh_dnode;
1495 			} else {
1496 				dn = dnode_create(os, dn_block + idx, db,
1497 				    object, dnh);
1498 			}
1499 		}
1500 
1501 		mutex_enter(&dn->dn_mtx);
1502 		if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
1503 			DNODE_STAT_BUMP(dnode_hold_free_refcount);
1504 			mutex_exit(&dn->dn_mtx);
1505 			dnode_slots_rele(dnc, idx, slots);
1506 			dbuf_rele(db, FTAG);
1507 			return (SET_ERROR(EEXIST));
1508 		}
1509 
1510 		dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
1511 		DNODE_STAT_BUMP(dnode_hold_free_hits);
1512 	} else {
1513 		dbuf_rele(db, FTAG);
1514 		return (SET_ERROR(EINVAL));
1515 	}
1516 
1517 	if (dn->dn_free_txg) {
1518 		DNODE_STAT_BUMP(dnode_hold_free_txg);
1519 		type = dn->dn_type;
1520 		mutex_exit(&dn->dn_mtx);
1521 		dnode_slots_rele(dnc, idx, slots);
1522 		dbuf_rele(db, FTAG);
1523 		return (SET_ERROR((flag & DNODE_MUST_BE_ALLOCATED) ?
1524 		    ENOENT : EEXIST));
1525 	}
1526 
1527 	if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
1528 		dbuf_add_ref(db, dnh);
1529 
1530 	mutex_exit(&dn->dn_mtx);
1531 
1532 	/* Now we can rely on the hold to prevent the dnode from moving. */
1533 	dnode_slots_rele(dnc, idx, slots);
1534 
1535 	DNODE_VERIFY(dn);
1536 	ASSERT3P(dn->dn_dbuf, ==, db);
1537 	ASSERT3U(dn->dn_object, ==, object);
1538 	dbuf_rele(db, FTAG);
1539 
1540 	*dnp = dn;
1541 	return (0);
1542 }
1543 
1544 /*
1545  * Return held dnode if the object is allocated, NULL if not.
1546  */
1547 int
1548 dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
1549 {
1550 	return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
1551 	    dnp));
1552 }
1553 
1554 /*
1555  * Can only add a reference if there is already at least one
1556  * reference on the dnode.  Returns FALSE if unable to add a
1557  * new reference.
1558  */
1559 boolean_t
1560 dnode_add_ref(dnode_t *dn, void *tag)
1561 {
1562 	mutex_enter(&dn->dn_mtx);
1563 	if (zfs_refcount_is_zero(&dn->dn_holds)) {
1564 		mutex_exit(&dn->dn_mtx);
1565 		return (FALSE);
1566 	}
1567 	VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
1568 	mutex_exit(&dn->dn_mtx);
1569 	return (TRUE);
1570 }
1571 
1572 void
1573 dnode_rele(dnode_t *dn, void *tag)
1574 {
1575 	mutex_enter(&dn->dn_mtx);
1576 	dnode_rele_and_unlock(dn, tag, B_FALSE);
1577 }
1578 
1579 void
1580 dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
1581 {
1582 	uint64_t refs;
1583 	/* Get while the hold prevents the dnode from moving. */
1584 	dmu_buf_impl_t *db = dn->dn_dbuf;
1585 	dnode_handle_t *dnh = dn->dn_handle;
1586 
1587 	refs = zfs_refcount_remove(&dn->dn_holds, tag);
1588 	mutex_exit(&dn->dn_mtx);
1589 
1590 	/*
1591 	 * It's unsafe to release the last hold on a dnode by dnode_rele() or
1592 	 * indirectly by dbuf_rele() while relying on the dnode handle to
1593 	 * prevent the dnode from moving, since releasing the last hold could
1594 	 * result in the dnode's parent dbuf evicting its dnode handles. For
1595 	 * that reason anyone calling dnode_rele() or dbuf_rele() without some
1596 	 * other direct or indirect hold on the dnode must first drop the dnode
1597 	 * handle.
1598 	 */
1599 	ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
1600 
1601 	/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
1602 	if (refs == 0 && db != NULL) {
1603 		/*
1604 		 * Another thread could add a hold to the dnode handle in
1605 		 * dnode_hold_impl() while holding the parent dbuf. Since the
1606 		 * hold on the parent dbuf prevents the handle from being
1607 		 * destroyed, the hold on the handle is OK. We can't yet assert
1608 		 * that the handle has zero references, but that will be
1609 		 * asserted anyway when the handle gets destroyed.
1610 		 */
1611 		mutex_enter(&db->db_mtx);
1612 		dbuf_rele_and_unlock(db, dnh, evicting);
1613 	}
1614 }
1615 
1616 void
1617 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
1618 {
1619 	objset_t *os = dn->dn_objset;
1620 	uint64_t txg = tx->tx_txg;
1621 
1622 	if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
1623 		dsl_dataset_dirty(os->os_dsl_dataset, tx);
1624 		return;
1625 	}
1626 
1627 	DNODE_VERIFY(dn);
1628 
1629 #ifdef ZFS_DEBUG
1630 	mutex_enter(&dn->dn_mtx);
1631 	ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
1632 	ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
1633 	mutex_exit(&dn->dn_mtx);
1634 #endif
1635 
1636 	/*
1637 	 * Determine old uid/gid when necessary
1638 	 */
1639 	dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
1640 
1641 	multilist_t *dirtylist = os->os_dirty_dnodes[txg & TXG_MASK];
1642 	multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
1643 
1644 	/*
1645 	 * If we are already marked dirty, we're done.
1646 	 */
1647 	if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
1648 		multilist_sublist_unlock(mls);
1649 		return;
1650 	}
1651 
1652 	ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
1653 	    !avl_is_empty(&dn->dn_dbufs));
1654 	ASSERT(dn->dn_datablksz != 0);
1655 	ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
1656 	ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
1657 	ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
1658 
1659 	dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
1660 	    dn->dn_object, txg);
1661 
1662 	multilist_sublist_insert_head(mls, dn);
1663 
1664 	multilist_sublist_unlock(mls);
1665 
1666 	/*
1667 	 * The dnode maintains a hold on its containing dbuf as
1668 	 * long as there are holds on it.  Each instantiated child
1669 	 * dbuf maintains a hold on the dnode.  When the last child
1670 	 * drops its hold, the dnode will drop its hold on the
1671 	 * containing dbuf. We add a "dirty hold" here so that the
1672 	 * dnode will hang around after we finish processing its
1673 	 * children.
1674 	 */
1675 	VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
1676 
1677 	(void) dbuf_dirty(dn->dn_dbuf, tx);
1678 
1679 	dsl_dataset_dirty(os->os_dsl_dataset, tx);
1680 }
1681 
1682 void
1683 dnode_free(dnode_t *dn, dmu_tx_t *tx)
1684 {
1685 	mutex_enter(&dn->dn_mtx);
1686 	if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
1687 		mutex_exit(&dn->dn_mtx);
1688 		return;
1689 	}
1690 	dn->dn_free_txg = tx->tx_txg;
1691 	mutex_exit(&dn->dn_mtx);
1692 
1693 	dnode_setdirty(dn, tx);
1694 }
1695 
1696 /*
1697  * Try to change the block size for the indicated dnode.  This can only
1698  * succeed if there are no blocks allocated or dirty beyond first block
1699  */
1700 int
1701 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
1702 {
1703 	dmu_buf_impl_t *db;
1704 	int err;
1705 
1706 	ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
1707 	if (size == 0)
1708 		size = SPA_MINBLOCKSIZE;
1709 	else
1710 		size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
1711 
1712 	if (ibs == dn->dn_indblkshift)
1713 		ibs = 0;
1714 
1715 	if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
1716 		return (0);
1717 
1718 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1719 
1720 	/* Check for any allocated blocks beyond the first */
1721 	if (dn->dn_maxblkid != 0)
1722 		goto fail;
1723 
1724 	mutex_enter(&dn->dn_dbufs_mtx);
1725 	for (db = avl_first(&dn->dn_dbufs); db != NULL;
1726 	    db = AVL_NEXT(&dn->dn_dbufs, db)) {
1727 		if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
1728 		    db->db_blkid != DMU_SPILL_BLKID) {
1729 			mutex_exit(&dn->dn_dbufs_mtx);
1730 			goto fail;
1731 		}
1732 	}
1733 	mutex_exit(&dn->dn_dbufs_mtx);
1734 
1735 	if (ibs && dn->dn_nlevels != 1)
1736 		goto fail;
1737 
1738 	/* resize the old block */
1739 	err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
1740 	if (err == 0)
1741 		dbuf_new_size(db, size, tx);
1742 	else if (err != ENOENT)
1743 		goto fail;
1744 
1745 	dnode_setdblksz(dn, size);
1746 	dnode_setdirty(dn, tx);
1747 	dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
1748 	if (ibs) {
1749 		dn->dn_indblkshift = ibs;
1750 		dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
1751 	}
1752 	/* rele after we have fixed the blocksize in the dnode */
1753 	if (db)
1754 		dbuf_rele(db, FTAG);
1755 
1756 	rw_exit(&dn->dn_struct_rwlock);
1757 	return (0);
1758 
1759 fail:
1760 	rw_exit(&dn->dn_struct_rwlock);
1761 	return (SET_ERROR(ENOTSUP));
1762 }
1763 
1764 /* read-holding callers must not rely on the lock being continuously held */
1765 void
1766 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
1767 {
1768 	uint64_t txgoff = tx->tx_txg & TXG_MASK;
1769 	int epbs, new_nlevels;
1770 	uint64_t sz;
1771 
1772 	ASSERT(blkid != DMU_BONUS_BLKID);
1773 
1774 	ASSERT(have_read ?
1775 	    RW_READ_HELD(&dn->dn_struct_rwlock) :
1776 	    RW_WRITE_HELD(&dn->dn_struct_rwlock));
1777 
1778 	/*
1779 	 * if we have a read-lock, check to see if we need to do any work
1780 	 * before upgrading to a write-lock.
1781 	 */
1782 	if (have_read) {
1783 		if (blkid <= dn->dn_maxblkid)
1784 			return;
1785 
1786 		if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
1787 			rw_exit(&dn->dn_struct_rwlock);
1788 			rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1789 		}
1790 	}
1791 
1792 	if (blkid <= dn->dn_maxblkid)
1793 		goto out;
1794 
1795 	dn->dn_maxblkid = blkid;
1796 
1797 	/*
1798 	 * Compute the number of levels necessary to support the new maxblkid.
1799 	 */
1800 	new_nlevels = 1;
1801 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1802 	for (sz = dn->dn_nblkptr;
1803 	    sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
1804 		new_nlevels++;
1805 
1806 	if (new_nlevels > dn->dn_nlevels) {
1807 		int old_nlevels = dn->dn_nlevels;
1808 		dmu_buf_impl_t *db;
1809 		list_t *list;
1810 		dbuf_dirty_record_t *new, *dr, *dr_next;
1811 
1812 		dn->dn_nlevels = new_nlevels;
1813 
1814 		ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
1815 		dn->dn_next_nlevels[txgoff] = new_nlevels;
1816 
1817 		/* dirty the left indirects */
1818 		db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
1819 		ASSERT(db != NULL);
1820 		new = dbuf_dirty(db, tx);
1821 		dbuf_rele(db, FTAG);
1822 
1823 		/* transfer the dirty records to the new indirect */
1824 		mutex_enter(&dn->dn_mtx);
1825 		mutex_enter(&new->dt.di.dr_mtx);
1826 		list = &dn->dn_dirty_records[txgoff];
1827 		for (dr = list_head(list); dr; dr = dr_next) {
1828 			dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
1829 			if (dr->dr_dbuf->db_level != new_nlevels-1 &&
1830 			    dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
1831 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
1832 				ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
1833 				list_remove(&dn->dn_dirty_records[txgoff], dr);
1834 				list_insert_tail(&new->dt.di.dr_children, dr);
1835 				dr->dr_parent = new;
1836 			}
1837 		}
1838 		mutex_exit(&new->dt.di.dr_mtx);
1839 		mutex_exit(&dn->dn_mtx);
1840 	}
1841 
1842 out:
1843 	if (have_read)
1844 		rw_downgrade(&dn->dn_struct_rwlock);
1845 }
1846 
1847 static void
1848 dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
1849 {
1850 	dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
1851 	if (db != NULL) {
1852 		dmu_buf_will_dirty(&db->db, tx);
1853 		dbuf_rele(db, FTAG);
1854 	}
1855 }
1856 
1857 /*
1858  * Dirty all the in-core level-1 dbufs in the range specified by start_blkid
1859  * and end_blkid.
1860  */
1861 static void
1862 dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1863     dmu_tx_t *tx)
1864 {
1865 	dmu_buf_impl_t db_search;
1866 	dmu_buf_impl_t *db;
1867 	avl_index_t where;
1868 
1869 	mutex_enter(&dn->dn_dbufs_mtx);
1870 
1871 	db_search.db_level = 1;
1872 	db_search.db_blkid = start_blkid + 1;
1873 	db_search.db_state = DB_SEARCH;
1874 	for (;;) {
1875 
1876 		db = avl_find(&dn->dn_dbufs, &db_search, &where);
1877 		if (db == NULL)
1878 			db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1879 
1880 		if (db == NULL || db->db_level != 1 ||
1881 		    db->db_blkid >= end_blkid) {
1882 			break;
1883 		}
1884 
1885 		/*
1886 		 * Setup the next blkid we want to search for.
1887 		 */
1888 		db_search.db_blkid = db->db_blkid + 1;
1889 		ASSERT3U(db->db_blkid, >=, start_blkid);
1890 
1891 		/*
1892 		 * If the dbuf transitions to DB_EVICTING while we're trying
1893 		 * to dirty it, then we will be unable to discover it in
1894 		 * the dbuf hash table. This will result in a call to
1895 		 * dbuf_create() which needs to acquire the dn_dbufs_mtx
1896 		 * lock. To avoid a deadlock, we drop the lock before
1897 		 * dirtying the level-1 dbuf.
1898 		 */
1899 		mutex_exit(&dn->dn_dbufs_mtx);
1900 		dnode_dirty_l1(dn, db->db_blkid, tx);
1901 		mutex_enter(&dn->dn_dbufs_mtx);
1902 	}
1903 
1904 #ifdef ZFS_DEBUG
1905 	/*
1906 	 * Walk all the in-core level-1 dbufs and verify they have been dirtied.
1907 	 */
1908 	db_search.db_level = 1;
1909 	db_search.db_blkid = start_blkid + 1;
1910 	db_search.db_state = DB_SEARCH;
1911 	db = avl_find(&dn->dn_dbufs, &db_search, &where);
1912 	if (db == NULL)
1913 		db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1914 	for (; db != NULL; db = AVL_NEXT(&dn->dn_dbufs, db)) {
1915 		if (db->db_level != 1 || db->db_blkid >= end_blkid)
1916 			break;
1917 		ASSERT(db->db_dirtycnt > 0);
1918 	}
1919 #endif
1920 	mutex_exit(&dn->dn_dbufs_mtx);
1921 }
1922 
1923 void
1924 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
1925 {
1926 	dmu_buf_impl_t *db;
1927 	uint64_t blkoff, blkid, nblks;
1928 	int blksz, blkshift, head, tail;
1929 	int trunc = FALSE;
1930 	int epbs;
1931 
1932 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1933 	blksz = dn->dn_datablksz;
1934 	blkshift = dn->dn_datablkshift;
1935 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1936 
1937 	if (len == DMU_OBJECT_END) {
1938 		len = UINT64_MAX - off;
1939 		trunc = TRUE;
1940 	}
1941 
1942 	/*
1943 	 * First, block align the region to free:
1944 	 */
1945 	if (ISP2(blksz)) {
1946 		head = P2NPHASE(off, blksz);
1947 		blkoff = P2PHASE(off, blksz);
1948 		if ((off >> blkshift) > dn->dn_maxblkid)
1949 			goto out;
1950 	} else {
1951 		ASSERT(dn->dn_maxblkid == 0);
1952 		if (off == 0 && len >= blksz) {
1953 			/*
1954 			 * Freeing the whole block; fast-track this request.
1955 			 */
1956 			blkid = 0;
1957 			nblks = 1;
1958 			if (dn->dn_nlevels > 1)
1959 				dnode_dirty_l1(dn, 0, tx);
1960 			goto done;
1961 		} else if (off >= blksz) {
1962 			/* Freeing past end-of-data */
1963 			goto out;
1964 		} else {
1965 			/* Freeing part of the block. */
1966 			head = blksz - off;
1967 			ASSERT3U(head, >, 0);
1968 		}
1969 		blkoff = off;
1970 	}
1971 	/* zero out any partial block data at the start of the range */
1972 	if (head) {
1973 		ASSERT3U(blkoff + head, ==, blksz);
1974 		if (len < head)
1975 			head = len;
1976 		if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off),
1977 		    TRUE, FALSE, FTAG, &db) == 0) {
1978 			caddr_t data;
1979 
1980 			/* don't dirty if it isn't on disk and isn't dirty */
1981 			if (db->db_last_dirty ||
1982 			    (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1983 				rw_exit(&dn->dn_struct_rwlock);
1984 				dmu_buf_will_dirty(&db->db, tx);
1985 				rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1986 				data = db->db.db_data;
1987 				bzero(data + blkoff, head);
1988 			}
1989 			dbuf_rele(db, FTAG);
1990 		}
1991 		off += head;
1992 		len -= head;
1993 	}
1994 
1995 	/* If the range was less than one block, we're done */
1996 	if (len == 0)
1997 		goto out;
1998 
1999 	/* If the remaining range is past end of file, we're done */
2000 	if ((off >> blkshift) > dn->dn_maxblkid)
2001 		goto out;
2002 
2003 	ASSERT(ISP2(blksz));
2004 	if (trunc)
2005 		tail = 0;
2006 	else
2007 		tail = P2PHASE(len, blksz);
2008 
2009 	ASSERT0(P2PHASE(off, blksz));
2010 	/* zero out any partial block data at the end of the range */
2011 	if (tail) {
2012 		if (len < tail)
2013 			tail = len;
2014 		if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len),
2015 		    TRUE, FALSE, FTAG, &db) == 0) {
2016 			/* don't dirty if not on disk and not dirty */
2017 			if (db->db_last_dirty ||
2018 			    (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
2019 				rw_exit(&dn->dn_struct_rwlock);
2020 				dmu_buf_will_dirty(&db->db, tx);
2021 				rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2022 				bzero(db->db.db_data, tail);
2023 			}
2024 			dbuf_rele(db, FTAG);
2025 		}
2026 		len -= tail;
2027 	}
2028 
2029 	/* If the range did not include a full block, we are done */
2030 	if (len == 0)
2031 		goto out;
2032 
2033 	ASSERT(IS_P2ALIGNED(off, blksz));
2034 	ASSERT(trunc || IS_P2ALIGNED(len, blksz));
2035 	blkid = off >> blkshift;
2036 	nblks = len >> blkshift;
2037 	if (trunc)
2038 		nblks += 1;
2039 
2040 	/*
2041 	 * Dirty all the indirect blocks in this range.  Note that only
2042 	 * the first and last indirect blocks can actually be written
2043 	 * (if they were partially freed) -- they must be dirtied, even if
2044 	 * they do not exist on disk yet.  The interior blocks will
2045 	 * be freed by free_children(), so they will not actually be written.
2046 	 * Even though these interior blocks will not be written, we
2047 	 * dirty them for two reasons:
2048 	 *
2049 	 *  - It ensures that the indirect blocks remain in memory until
2050 	 *    syncing context.  (They have already been prefetched by
2051 	 *    dmu_tx_hold_free(), so we don't have to worry about reading
2052 	 *    them serially here.)
2053 	 *
2054 	 *  - The dirty space accounting will put pressure on the txg sync
2055 	 *    mechanism to begin syncing, and to delay transactions if there
2056 	 *    is a large amount of freeing.  Even though these indirect
2057 	 *    blocks will not be written, we could need to write the same
2058 	 *    amount of space if we copy the freed BPs into deadlists.
2059 	 */
2060 	if (dn->dn_nlevels > 1) {
2061 		uint64_t first, last;
2062 
2063 		first = blkid >> epbs;
2064 		dnode_dirty_l1(dn, first, tx);
2065 		if (trunc)
2066 			last = dn->dn_maxblkid >> epbs;
2067 		else
2068 			last = (blkid + nblks - 1) >> epbs;
2069 		if (last != first)
2070 			dnode_dirty_l1(dn, last, tx);
2071 
2072 		dnode_dirty_l1range(dn, first, last, tx);
2073 
2074 		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
2075 		    SPA_BLKPTRSHIFT;
2076 		for (uint64_t i = first + 1; i < last; i++) {
2077 			/*
2078 			 * Set i to the blockid of the next non-hole
2079 			 * level-1 indirect block at or after i.  Note
2080 			 * that dnode_next_offset() operates in terms of
2081 			 * level-0-equivalent bytes.
2082 			 */
2083 			uint64_t ibyte = i << shift;
2084 			int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
2085 			    &ibyte, 2, 1, 0);
2086 			i = ibyte >> shift;
2087 			if (i >= last)
2088 				break;
2089 
2090 			/*
2091 			 * Normally we should not see an error, either
2092 			 * from dnode_next_offset() or dbuf_hold_level()
2093 			 * (except for ESRCH from dnode_next_offset).
2094 			 * If there is an i/o error, then when we read
2095 			 * this block in syncing context, it will use
2096 			 * ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
2097 			 * to the "failmode" property.  dnode_next_offset()
2098 			 * doesn't have a flag to indicate MUSTSUCCEED.
2099 			 */
2100 			if (err != 0)
2101 				break;
2102 
2103 			dnode_dirty_l1(dn, i, tx);
2104 		}
2105 	}
2106 
2107 done:
2108 	/*
2109 	 * Add this range to the dnode range list.
2110 	 * We will finish up this free operation in the syncing phase.
2111 	 */
2112 	mutex_enter(&dn->dn_mtx);
2113 	int txgoff = tx->tx_txg & TXG_MASK;
2114 	if (dn->dn_free_ranges[txgoff] == NULL) {
2115 		dn->dn_free_ranges[txgoff] = range_tree_create(NULL, NULL);
2116 	}
2117 	range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
2118 	range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
2119 	dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
2120 	    blkid, nblks, tx->tx_txg);
2121 	mutex_exit(&dn->dn_mtx);
2122 
2123 	dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
2124 	dnode_setdirty(dn, tx);
2125 out:
2126 
2127 	rw_exit(&dn->dn_struct_rwlock);
2128 }
2129 
2130 static boolean_t
2131 dnode_spill_freed(dnode_t *dn)
2132 {
2133 	int i;
2134 
2135 	mutex_enter(&dn->dn_mtx);
2136 	for (i = 0; i < TXG_SIZE; i++) {
2137 		if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
2138 			break;
2139 	}
2140 	mutex_exit(&dn->dn_mtx);
2141 	return (i < TXG_SIZE);
2142 }
2143 
2144 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
2145 uint64_t
2146 dnode_block_freed(dnode_t *dn, uint64_t blkid)
2147 {
2148 	void *dp = spa_get_dsl(dn->dn_objset->os_spa);
2149 	int i;
2150 
2151 	if (blkid == DMU_BONUS_BLKID)
2152 		return (FALSE);
2153 
2154 	/*
2155 	 * If we're in the process of opening the pool, dp will not be
2156 	 * set yet, but there shouldn't be anything dirty.
2157 	 */
2158 	if (dp == NULL)
2159 		return (FALSE);
2160 
2161 	if (dn->dn_free_txg)
2162 		return (TRUE);
2163 
2164 	if (blkid == DMU_SPILL_BLKID)
2165 		return (dnode_spill_freed(dn));
2166 
2167 	mutex_enter(&dn->dn_mtx);
2168 	for (i = 0; i < TXG_SIZE; i++) {
2169 		if (dn->dn_free_ranges[i] != NULL &&
2170 		    range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
2171 			break;
2172 	}
2173 	mutex_exit(&dn->dn_mtx);
2174 	return (i < TXG_SIZE);
2175 }
2176 
2177 /* call from syncing context when we actually write/free space for this dnode */
2178 void
2179 dnode_diduse_space(dnode_t *dn, int64_t delta)
2180 {
2181 	uint64_t space;
2182 	dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
2183 	    dn, dn->dn_phys,
2184 	    (u_longlong_t)dn->dn_phys->dn_used,
2185 	    (longlong_t)delta);
2186 
2187 	mutex_enter(&dn->dn_mtx);
2188 	space = DN_USED_BYTES(dn->dn_phys);
2189 	if (delta > 0) {
2190 		ASSERT3U(space + delta, >=, space); /* no overflow */
2191 	} else {
2192 		ASSERT3U(space, >=, -delta); /* no underflow */
2193 	}
2194 	space += delta;
2195 	if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
2196 		ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
2197 		ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
2198 		dn->dn_phys->dn_used = space >> DEV_BSHIFT;
2199 	} else {
2200 		dn->dn_phys->dn_used = space;
2201 		dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
2202 	}
2203 	mutex_exit(&dn->dn_mtx);
2204 }
2205 
2206 /*
2207  * Scans a block at the indicated "level" looking for a hole or data,
2208  * depending on 'flags'.
2209  *
2210  * If level > 0, then we are scanning an indirect block looking at its
2211  * pointers.  If level == 0, then we are looking at a block of dnodes.
2212  *
2213  * If we don't find what we are looking for in the block, we return ESRCH.
2214  * Otherwise, return with *offset pointing to the beginning (if searching
2215  * forwards) or end (if searching backwards) of the range covered by the
2216  * block pointer we matched on (or dnode).
2217  *
2218  * The basic search algorithm used below by dnode_next_offset() is to
2219  * use this function to search up the block tree (widen the search) until
2220  * we find something (i.e., we don't return ESRCH) and then search back
2221  * down the tree (narrow the search) until we reach our original search
2222  * level.
2223  */
2224 static int
2225 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
2226     int lvl, uint64_t blkfill, uint64_t txg)
2227 {
2228 	dmu_buf_impl_t *db = NULL;
2229 	void *data = NULL;
2230 	uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2231 	uint64_t epb = 1ULL << epbs;
2232 	uint64_t minfill, maxfill;
2233 	boolean_t hole;
2234 	int i, inc, error, span;
2235 
2236 	dprintf("probing object %llu offset %llx level %d of %u\n",
2237 	    dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
2238 
2239 	hole = ((flags & DNODE_FIND_HOLE) != 0);
2240 	inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
2241 	ASSERT(txg == 0 || !hole);
2242 
2243 	if (lvl == dn->dn_phys->dn_nlevels) {
2244 		error = 0;
2245 		epb = dn->dn_phys->dn_nblkptr;
2246 		data = dn->dn_phys->dn_blkptr;
2247 	} else {
2248 		uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
2249 		error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
2250 		if (error) {
2251 			if (error != ENOENT)
2252 				return (error);
2253 			if (hole)
2254 				return (0);
2255 			/*
2256 			 * This can only happen when we are searching up
2257 			 * the block tree for data.  We don't really need to
2258 			 * adjust the offset, as we will just end up looking
2259 			 * at the pointer to this block in its parent, and its
2260 			 * going to be unallocated, so we will skip over it.
2261 			 */
2262 			return (SET_ERROR(ESRCH));
2263 		}
2264 		error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
2265 		if (error) {
2266 			dbuf_rele(db, FTAG);
2267 			return (error);
2268 		}
2269 		data = db->db.db_data;
2270 	}
2271 
2272 
2273 	if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
2274 	    db->db_blkptr->blk_birth <= txg ||
2275 	    BP_IS_HOLE(db->db_blkptr))) {
2276 		/*
2277 		 * This can only happen when we are searching up the tree
2278 		 * and these conditions mean that we need to keep climbing.
2279 		 */
2280 		error = SET_ERROR(ESRCH);
2281 	} else if (lvl == 0) {
2282 		dnode_phys_t *dnp = data;
2283 
2284 		ASSERT(dn->dn_type == DMU_OT_DNODE);
2285 		ASSERT(!(flags & DNODE_FIND_BACKWARDS));
2286 
2287 		for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
2288 		    i < blkfill; i += dnp[i].dn_extra_slots + 1) {
2289 			if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
2290 				break;
2291 		}
2292 
2293 		if (i == blkfill)
2294 			error = SET_ERROR(ESRCH);
2295 
2296 		*offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
2297 		    (i << DNODE_SHIFT);
2298 	} else {
2299 		blkptr_t *bp = data;
2300 		uint64_t start = *offset;
2301 		span = (lvl - 1) * epbs + dn->dn_datablkshift;
2302 		minfill = 0;
2303 		maxfill = blkfill << ((lvl - 1) * epbs);
2304 
2305 		if (hole)
2306 			maxfill--;
2307 		else
2308 			minfill++;
2309 
2310 		*offset = *offset >> span;
2311 		for (i = BF64_GET(*offset, 0, epbs);
2312 		    i >= 0 && i < epb; i += inc) {
2313 			if (BP_GET_FILL(&bp[i]) >= minfill &&
2314 			    BP_GET_FILL(&bp[i]) <= maxfill &&
2315 			    (hole || bp[i].blk_birth > txg))
2316 				break;
2317 			if (inc > 0 || *offset > 0)
2318 				*offset += inc;
2319 		}
2320 		*offset = *offset << span;
2321 		if (inc < 0) {
2322 			/* traversing backwards; position offset at the end */
2323 			ASSERT3U(*offset, <=, start);
2324 			*offset = MIN(*offset + (1ULL << span) - 1, start);
2325 		} else if (*offset < start) {
2326 			*offset = start;
2327 		}
2328 		if (i < 0 || i >= epb)
2329 			error = SET_ERROR(ESRCH);
2330 	}
2331 
2332 	if (db)
2333 		dbuf_rele(db, FTAG);
2334 
2335 	return (error);
2336 }
2337 
2338 /*
2339  * Find the next hole, data, or sparse region at or after *offset.
2340  * The value 'blkfill' tells us how many items we expect to find
2341  * in an L0 data block; this value is 1 for normal objects,
2342  * DNODES_PER_BLOCK for the meta dnode, and some fraction of
2343  * DNODES_PER_BLOCK when searching for sparse regions thereof.
2344  *
2345  * Examples:
2346  *
2347  * dnode_next_offset(dn, flags, offset, 1, 1, 0);
2348  *	Finds the next/previous hole/data in a file.
2349  *	Used in dmu_offset_next().
2350  *
2351  * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
2352  *	Finds the next free/allocated dnode an objset's meta-dnode.
2353  *	Only finds objects that have new contents since txg (ie.
2354  *	bonus buffer changes and content removal are ignored).
2355  *	Used in dmu_object_next().
2356  *
2357  * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
2358  *	Finds the next L2 meta-dnode bp that's at most 1/4 full.
2359  *	Used in dmu_object_alloc().
2360  */
2361 int
2362 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
2363     int minlvl, uint64_t blkfill, uint64_t txg)
2364 {
2365 	uint64_t initial_offset = *offset;
2366 	int lvl, maxlvl;
2367 	int error = 0;
2368 
2369 	if (!(flags & DNODE_FIND_HAVELOCK))
2370 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2371 
2372 	if (dn->dn_phys->dn_nlevels == 0) {
2373 		error = SET_ERROR(ESRCH);
2374 		goto out;
2375 	}
2376 
2377 	if (dn->dn_datablkshift == 0) {
2378 		if (*offset < dn->dn_datablksz) {
2379 			if (flags & DNODE_FIND_HOLE)
2380 				*offset = dn->dn_datablksz;
2381 		} else {
2382 			error = SET_ERROR(ESRCH);
2383 		}
2384 		goto out;
2385 	}
2386 
2387 	maxlvl = dn->dn_phys->dn_nlevels;
2388 
2389 	for (lvl = minlvl; lvl <= maxlvl; lvl++) {
2390 		error = dnode_next_offset_level(dn,
2391 		    flags, offset, lvl, blkfill, txg);
2392 		if (error != ESRCH)
2393 			break;
2394 	}
2395 
2396 	while (error == 0 && --lvl >= minlvl) {
2397 		error = dnode_next_offset_level(dn,
2398 		    flags, offset, lvl, blkfill, txg);
2399 	}
2400 
2401 	/*
2402 	 * There's always a "virtual hole" at the end of the object, even
2403 	 * if all BP's which physically exist are non-holes.
2404 	 */
2405 	if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
2406 	    minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
2407 		error = 0;
2408 	}
2409 
2410 	if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
2411 	    initial_offset < *offset : initial_offset > *offset))
2412 		error = SET_ERROR(ESRCH);
2413 out:
2414 	if (!(flags & DNODE_FIND_HAVELOCK))
2415 		rw_exit(&dn->dn_struct_rwlock);
2416 
2417 	return (error);
2418 }
2419