xref: /titanic_44/usr/src/uts/common/fs/zfs/dnode.c (revision 71269a2275bf5a143dad6461eee2710a344e7261)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/zfs_context.h>
27 #include <sys/dbuf.h>
28 #include <sys/dnode.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_dataset.h>
35 #include <sys/spa.h>
36 #include <sys/zio.h>
37 #include <sys/dmu_zfetch.h>
38 
39 static int free_range_compar(const void *node1, const void *node2);
40 
41 static kmem_cache_t *dnode_cache;
42 
43 static dnode_phys_t dnode_phys_zero;
44 
45 int zfs_default_bs = SPA_MINBLOCKSHIFT;
46 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
47 
48 /* ARGSUSED */
49 static int
50 dnode_cons(void *arg, void *unused, int kmflag)
51 {
52 	int i;
53 	dnode_t *dn = arg;
54 	bzero(dn, sizeof (dnode_t));
55 
56 	rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
57 	mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
58 	mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
59 	refcount_create(&dn->dn_holds);
60 	refcount_create(&dn->dn_tx_holds);
61 
62 	for (i = 0; i < TXG_SIZE; i++) {
63 		avl_create(&dn->dn_ranges[i], free_range_compar,
64 		    sizeof (free_range_t),
65 		    offsetof(struct free_range, fr_node));
66 		list_create(&dn->dn_dirty_records[i],
67 		    sizeof (dbuf_dirty_record_t),
68 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
69 	}
70 
71 	list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t),
72 	    offsetof(dmu_buf_impl_t, db_link));
73 
74 	return (0);
75 }
76 
77 /* ARGSUSED */
78 static void
79 dnode_dest(void *arg, void *unused)
80 {
81 	int i;
82 	dnode_t *dn = arg;
83 
84 	rw_destroy(&dn->dn_struct_rwlock);
85 	mutex_destroy(&dn->dn_mtx);
86 	mutex_destroy(&dn->dn_dbufs_mtx);
87 	refcount_destroy(&dn->dn_holds);
88 	refcount_destroy(&dn->dn_tx_holds);
89 
90 	for (i = 0; i < TXG_SIZE; i++) {
91 		avl_destroy(&dn->dn_ranges[i]);
92 		list_destroy(&dn->dn_dirty_records[i]);
93 	}
94 
95 	list_destroy(&dn->dn_dbufs);
96 }
97 
98 void
99 dnode_init(void)
100 {
101 	dnode_cache = kmem_cache_create("dnode_t",
102 	    sizeof (dnode_t),
103 	    0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
104 }
105 
106 void
107 dnode_fini(void)
108 {
109 	kmem_cache_destroy(dnode_cache);
110 }
111 
112 
113 #ifdef ZFS_DEBUG
114 void
115 dnode_verify(dnode_t *dn)
116 {
117 	int drop_struct_lock = FALSE;
118 
119 	ASSERT(dn->dn_phys);
120 	ASSERT(dn->dn_objset);
121 
122 	ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES);
123 
124 	if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
125 		return;
126 
127 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
128 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
129 		drop_struct_lock = TRUE;
130 	}
131 	if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
132 		int i;
133 		ASSERT3U(dn->dn_indblkshift, >=, 0);
134 		ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
135 		if (dn->dn_datablkshift) {
136 			ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
137 			ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
138 			ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
139 		}
140 		ASSERT3U(dn->dn_nlevels, <=, 30);
141 		ASSERT3U(dn->dn_type, <=, DMU_OT_NUMTYPES);
142 		ASSERT3U(dn->dn_nblkptr, >=, 1);
143 		ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
144 		ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
145 		ASSERT3U(dn->dn_datablksz, ==,
146 		    dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
147 		ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
148 		ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
149 		    dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
150 		for (i = 0; i < TXG_SIZE; i++) {
151 			ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
152 		}
153 	}
154 	if (dn->dn_phys->dn_type != DMU_OT_NONE)
155 		ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
156 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || dn->dn_dbuf != NULL);
157 	if (dn->dn_dbuf != NULL) {
158 		ASSERT3P(dn->dn_phys, ==,
159 		    (dnode_phys_t *)dn->dn_dbuf->db.db_data +
160 		    (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
161 	}
162 	if (drop_struct_lock)
163 		rw_exit(&dn->dn_struct_rwlock);
164 }
165 #endif
166 
167 void
168 dnode_byteswap(dnode_phys_t *dnp)
169 {
170 	uint64_t *buf64 = (void*)&dnp->dn_blkptr;
171 	int i;
172 
173 	if (dnp->dn_type == DMU_OT_NONE) {
174 		bzero(dnp, sizeof (dnode_phys_t));
175 		return;
176 	}
177 
178 	dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
179 	dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
180 	dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
181 	dnp->dn_used = BSWAP_64(dnp->dn_used);
182 
183 	/*
184 	 * dn_nblkptr is only one byte, so it's OK to read it in either
185 	 * byte order.  We can't read dn_bouslen.
186 	 */
187 	ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
188 	ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
189 	for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
190 		buf64[i] = BSWAP_64(buf64[i]);
191 
192 	/*
193 	 * OK to check dn_bonuslen for zero, because it won't matter if
194 	 * we have the wrong byte order.  This is necessary because the
195 	 * dnode dnode is smaller than a regular dnode.
196 	 */
197 	if (dnp->dn_bonuslen != 0) {
198 		/*
199 		 * Note that the bonus length calculated here may be
200 		 * longer than the actual bonus buffer.  This is because
201 		 * we always put the bonus buffer after the last block
202 		 * pointer (instead of packing it against the end of the
203 		 * dnode buffer).
204 		 */
205 		int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
206 		size_t len = DN_MAX_BONUSLEN - off;
207 		ASSERT3U(dnp->dn_bonustype, <, DMU_OT_NUMTYPES);
208 		dmu_ot[dnp->dn_bonustype].ot_byteswap(dnp->dn_bonus + off, len);
209 	}
210 }
211 
212 void
213 dnode_buf_byteswap(void *vbuf, size_t size)
214 {
215 	dnode_phys_t *buf = vbuf;
216 	int i;
217 
218 	ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
219 	ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
220 
221 	size >>= DNODE_SHIFT;
222 	for (i = 0; i < size; i++) {
223 		dnode_byteswap(buf);
224 		buf++;
225 	}
226 }
227 
228 static int
229 free_range_compar(const void *node1, const void *node2)
230 {
231 	const free_range_t *rp1 = node1;
232 	const free_range_t *rp2 = node2;
233 
234 	if (rp1->fr_blkid < rp2->fr_blkid)
235 		return (-1);
236 	else if (rp1->fr_blkid > rp2->fr_blkid)
237 		return (1);
238 	else return (0);
239 }
240 
241 void
242 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
243 {
244 	ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
245 
246 	dnode_setdirty(dn, tx);
247 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
248 	ASSERT3U(newsize, <=, DN_MAX_BONUSLEN -
249 	    (dn->dn_nblkptr-1) * sizeof (blkptr_t));
250 	dn->dn_bonuslen = newsize;
251 	if (newsize == 0)
252 		dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
253 	else
254 		dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
255 	rw_exit(&dn->dn_struct_rwlock);
256 }
257 
258 static void
259 dnode_setdblksz(dnode_t *dn, int size)
260 {
261 	ASSERT3U(P2PHASE(size, SPA_MINBLOCKSIZE), ==, 0);
262 	ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
263 	ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
264 	ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
265 	    1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
266 	dn->dn_datablksz = size;
267 	dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
268 	dn->dn_datablkshift = ISP2(size) ? highbit(size - 1) : 0;
269 }
270 
271 static dnode_t *
272 dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
273     uint64_t object)
274 {
275 	dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
276 	(void) dnode_cons(dn, NULL, 0); /* XXX */
277 
278 	dn->dn_objset = os;
279 	dn->dn_object = object;
280 	dn->dn_dbuf = db;
281 	dn->dn_phys = dnp;
282 
283 	if (dnp->dn_datablkszsec)
284 		dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
285 	dn->dn_indblkshift = dnp->dn_indblkshift;
286 	dn->dn_nlevels = dnp->dn_nlevels;
287 	dn->dn_type = dnp->dn_type;
288 	dn->dn_nblkptr = dnp->dn_nblkptr;
289 	dn->dn_checksum = dnp->dn_checksum;
290 	dn->dn_compress = dnp->dn_compress;
291 	dn->dn_bonustype = dnp->dn_bonustype;
292 	dn->dn_bonuslen = dnp->dn_bonuslen;
293 	dn->dn_maxblkid = dnp->dn_maxblkid;
294 
295 	dmu_zfetch_init(&dn->dn_zfetch, dn);
296 
297 	ASSERT(dn->dn_phys->dn_type < DMU_OT_NUMTYPES);
298 	mutex_enter(&os->os_lock);
299 	list_insert_head(&os->os_dnodes, dn);
300 	mutex_exit(&os->os_lock);
301 
302 	arc_space_consume(sizeof (dnode_t));
303 	return (dn);
304 }
305 
306 static void
307 dnode_destroy(dnode_t *dn)
308 {
309 	objset_impl_t *os = dn->dn_objset;
310 
311 #ifdef ZFS_DEBUG
312 	int i;
313 
314 	for (i = 0; i < TXG_SIZE; i++) {
315 		ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
316 		ASSERT(NULL == list_head(&dn->dn_dirty_records[i]));
317 		ASSERT(0 == avl_numnodes(&dn->dn_ranges[i]));
318 	}
319 	ASSERT(NULL == list_head(&dn->dn_dbufs));
320 #endif
321 
322 	mutex_enter(&os->os_lock);
323 	list_remove(&os->os_dnodes, dn);
324 	mutex_exit(&os->os_lock);
325 
326 	if (dn->dn_dirtyctx_firstset) {
327 		kmem_free(dn->dn_dirtyctx_firstset, 1);
328 		dn->dn_dirtyctx_firstset = NULL;
329 	}
330 	dmu_zfetch_rele(&dn->dn_zfetch);
331 	if (dn->dn_bonus) {
332 		mutex_enter(&dn->dn_bonus->db_mtx);
333 		dbuf_evict(dn->dn_bonus);
334 		dn->dn_bonus = NULL;
335 	}
336 	kmem_cache_free(dnode_cache, dn);
337 	arc_space_return(sizeof (dnode_t));
338 }
339 
340 void
341 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
342     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
343 {
344 	int i;
345 
346 	if (blocksize == 0)
347 		blocksize = 1 << zfs_default_bs;
348 	else if (blocksize > SPA_MAXBLOCKSIZE)
349 		blocksize = SPA_MAXBLOCKSIZE;
350 	else
351 		blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
352 
353 	if (ibs == 0)
354 		ibs = zfs_default_ibs;
355 
356 	ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
357 
358 	dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
359 	    dn->dn_object, tx->tx_txg, blocksize, ibs);
360 
361 	ASSERT(dn->dn_type == DMU_OT_NONE);
362 	ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
363 	ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
364 	ASSERT(ot != DMU_OT_NONE);
365 	ASSERT3U(ot, <, DMU_OT_NUMTYPES);
366 	ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
367 	    (bonustype != DMU_OT_NONE && bonuslen != 0));
368 	ASSERT3U(bonustype, <, DMU_OT_NUMTYPES);
369 	ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
370 	ASSERT(dn->dn_type == DMU_OT_NONE);
371 	ASSERT3U(dn->dn_maxblkid, ==, 0);
372 	ASSERT3U(dn->dn_allocated_txg, ==, 0);
373 	ASSERT3U(dn->dn_assigned_txg, ==, 0);
374 	ASSERT(refcount_is_zero(&dn->dn_tx_holds));
375 	ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
376 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
377 
378 	for (i = 0; i < TXG_SIZE; i++) {
379 		ASSERT3U(dn->dn_next_nlevels[i], ==, 0);
380 		ASSERT3U(dn->dn_next_indblkshift[i], ==, 0);
381 		ASSERT3U(dn->dn_next_bonuslen[i], ==, 0);
382 		ASSERT3U(dn->dn_next_blksz[i], ==, 0);
383 		ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
384 		ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
385 		ASSERT3U(avl_numnodes(&dn->dn_ranges[i]), ==, 0);
386 	}
387 
388 	dn->dn_type = ot;
389 	dnode_setdblksz(dn, blocksize);
390 	dn->dn_indblkshift = ibs;
391 	dn->dn_nlevels = 1;
392 	dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
393 	dn->dn_bonustype = bonustype;
394 	dn->dn_bonuslen = bonuslen;
395 	dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
396 	dn->dn_compress = ZIO_COMPRESS_INHERIT;
397 	dn->dn_dirtyctx = 0;
398 
399 	dn->dn_free_txg = 0;
400 	if (dn->dn_dirtyctx_firstset) {
401 		kmem_free(dn->dn_dirtyctx_firstset, 1);
402 		dn->dn_dirtyctx_firstset = NULL;
403 	}
404 
405 	dn->dn_allocated_txg = tx->tx_txg;
406 
407 	dnode_setdirty(dn, tx);
408 	dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
409 	dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
410 	dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
411 }
412 
413 void
414 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
415     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
416 {
417 	int i, old_nblkptr;
418 	dmu_buf_impl_t *db = NULL;
419 
420 	ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
421 	ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE);
422 	ASSERT3U(blocksize % SPA_MINBLOCKSIZE, ==, 0);
423 	ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
424 	ASSERT(tx->tx_txg != 0);
425 	ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
426 	    (bonustype != DMU_OT_NONE && bonuslen != 0));
427 	ASSERT3U(bonustype, <, DMU_OT_NUMTYPES);
428 	ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
429 
430 	for (i = 0; i < TXG_SIZE; i++)
431 		ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
432 
433 	/* clean up any unreferenced dbufs */
434 	dnode_evict_dbufs(dn);
435 	ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
436 
437 	/*
438 	 * XXX I should really have a generation number to tell if we
439 	 * need to do this...
440 	 */
441 	if (blocksize != dn->dn_datablksz ||
442 	    dn->dn_bonustype != bonustype || dn->dn_bonuslen != bonuslen) {
443 		/* free all old data */
444 		dnode_free_range(dn, 0, -1ULL, tx);
445 	}
446 
447 	/* change blocksize */
448 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
449 	if (blocksize != dn->dn_datablksz &&
450 	    (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
451 	    list_head(&dn->dn_dbufs) != NULL)) {
452 		db = dbuf_hold(dn, 0, FTAG);
453 		dbuf_new_size(db, blocksize, tx);
454 	}
455 	dnode_setdblksz(dn, blocksize);
456 	dnode_setdirty(dn, tx);
457 	dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
458 	dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
459 	rw_exit(&dn->dn_struct_rwlock);
460 	if (db)
461 		dbuf_rele(db, FTAG);
462 
463 	/* change type */
464 	dn->dn_type = ot;
465 
466 	/* change bonus size and type */
467 	mutex_enter(&dn->dn_mtx);
468 	old_nblkptr = dn->dn_nblkptr;
469 	dn->dn_bonustype = bonustype;
470 	dn->dn_bonuslen = bonuslen;
471 	dn->dn_nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
472 	dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
473 	dn->dn_compress = ZIO_COMPRESS_INHERIT;
474 	ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
475 
476 	/* XXX - for now, we can't make nblkptr smaller */
477 	ASSERT3U(dn->dn_nblkptr, >=, old_nblkptr);
478 
479 	/* fix up the bonus db_size if dn_nblkptr has changed */
480 	if (dn->dn_bonus && dn->dn_bonuslen != old_nblkptr) {
481 		dn->dn_bonus->db.db_size =
482 		    DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t);
483 		ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
484 	}
485 
486 	dn->dn_allocated_txg = tx->tx_txg;
487 	mutex_exit(&dn->dn_mtx);
488 }
489 
490 void
491 dnode_special_close(dnode_t *dn)
492 {
493 	/*
494 	 * Wait for final references to the dnode to clear.  This can
495 	 * only happen if the arc is asyncronously evicting state that
496 	 * has a hold on this dnode while we are trying to evict this
497 	 * dnode.
498 	 */
499 	while (refcount_count(&dn->dn_holds) > 0)
500 		delay(1);
501 	dnode_destroy(dn);
502 }
503 
504 dnode_t *
505 dnode_special_open(objset_impl_t *os, dnode_phys_t *dnp, uint64_t object)
506 {
507 	dnode_t *dn = dnode_create(os, dnp, NULL, object);
508 	DNODE_VERIFY(dn);
509 	return (dn);
510 }
511 
512 static void
513 dnode_buf_pageout(dmu_buf_t *db, void *arg)
514 {
515 	dnode_t **children_dnodes = arg;
516 	int i;
517 	int epb = db->db_size >> DNODE_SHIFT;
518 
519 	for (i = 0; i < epb; i++) {
520 		dnode_t *dn = children_dnodes[i];
521 		int n;
522 
523 		if (dn == NULL)
524 			continue;
525 #ifdef ZFS_DEBUG
526 		/*
527 		 * If there are holds on this dnode, then there should
528 		 * be holds on the dnode's containing dbuf as well; thus
529 		 * it wouldn't be eligable for eviction and this function
530 		 * would not have been called.
531 		 */
532 		ASSERT(refcount_is_zero(&dn->dn_holds));
533 		ASSERT(list_head(&dn->dn_dbufs) == NULL);
534 		ASSERT(refcount_is_zero(&dn->dn_tx_holds));
535 
536 		for (n = 0; n < TXG_SIZE; n++)
537 			ASSERT(!list_link_active(&dn->dn_dirty_link[n]));
538 #endif
539 		children_dnodes[i] = NULL;
540 		dnode_destroy(dn);
541 	}
542 	kmem_free(children_dnodes, epb * sizeof (dnode_t *));
543 }
544 
545 /*
546  * errors:
547  * EINVAL - invalid object number.
548  * EIO - i/o error.
549  * succeeds even for free dnodes.
550  */
551 int
552 dnode_hold_impl(objset_impl_t *os, uint64_t object, int flag,
553     void *tag, dnode_t **dnp)
554 {
555 	int epb, idx, err;
556 	int drop_struct_lock = FALSE;
557 	int type;
558 	uint64_t blk;
559 	dnode_t *mdn, *dn;
560 	dmu_buf_impl_t *db;
561 	dnode_t **children_dnodes;
562 
563 	/*
564 	 * If you are holding the spa config lock as writer, you shouldn't
565 	 * be asking the DMU to do *anything*.
566 	 */
567 	ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0);
568 
569 	if (object == 0 || object >= DN_MAX_OBJECT)
570 		return (EINVAL);
571 
572 	mdn = os->os_meta_dnode;
573 
574 	DNODE_VERIFY(mdn);
575 
576 	if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
577 		rw_enter(&mdn->dn_struct_rwlock, RW_READER);
578 		drop_struct_lock = TRUE;
579 	}
580 
581 	blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t));
582 
583 	db = dbuf_hold(mdn, blk, FTAG);
584 	if (drop_struct_lock)
585 		rw_exit(&mdn->dn_struct_rwlock);
586 	if (db == NULL)
587 		return (EIO);
588 	err = dbuf_read(db, NULL, DB_RF_CANFAIL);
589 	if (err) {
590 		dbuf_rele(db, FTAG);
591 		return (err);
592 	}
593 
594 	ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
595 	epb = db->db.db_size >> DNODE_SHIFT;
596 
597 	idx = object & (epb-1);
598 
599 	children_dnodes = dmu_buf_get_user(&db->db);
600 	if (children_dnodes == NULL) {
601 		dnode_t **winner;
602 		children_dnodes = kmem_zalloc(epb * sizeof (dnode_t *),
603 		    KM_SLEEP);
604 		if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
605 		    dnode_buf_pageout)) {
606 			kmem_free(children_dnodes, epb * sizeof (dnode_t *));
607 			children_dnodes = winner;
608 		}
609 	}
610 
611 	if ((dn = children_dnodes[idx]) == NULL) {
612 		dnode_phys_t *dnp = (dnode_phys_t *)db->db.db_data+idx;
613 		dnode_t *winner;
614 
615 		dn = dnode_create(os, dnp, db, object);
616 		winner = atomic_cas_ptr(&children_dnodes[idx], NULL, dn);
617 		if (winner != NULL) {
618 			dnode_destroy(dn);
619 			dn = winner;
620 		}
621 	}
622 
623 	mutex_enter(&dn->dn_mtx);
624 	type = dn->dn_type;
625 	if (dn->dn_free_txg ||
626 	    ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) ||
627 	    ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)) {
628 		mutex_exit(&dn->dn_mtx);
629 		dbuf_rele(db, FTAG);
630 		return (type == DMU_OT_NONE ? ENOENT : EEXIST);
631 	}
632 	mutex_exit(&dn->dn_mtx);
633 
634 	if (refcount_add(&dn->dn_holds, tag) == 1)
635 		dbuf_add_ref(db, dn);
636 
637 	DNODE_VERIFY(dn);
638 	ASSERT3P(dn->dn_dbuf, ==, db);
639 	ASSERT3U(dn->dn_object, ==, object);
640 	dbuf_rele(db, FTAG);
641 
642 	*dnp = dn;
643 	return (0);
644 }
645 
646 /*
647  * Return held dnode if the object is allocated, NULL if not.
648  */
649 int
650 dnode_hold(objset_impl_t *os, uint64_t object, void *tag, dnode_t **dnp)
651 {
652 	return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp));
653 }
654 
655 /*
656  * Can only add a reference if there is already at least one
657  * reference on the dnode.  Returns FALSE if unable to add a
658  * new reference.
659  */
660 boolean_t
661 dnode_add_ref(dnode_t *dn, void *tag)
662 {
663 	mutex_enter(&dn->dn_mtx);
664 	if (refcount_is_zero(&dn->dn_holds)) {
665 		mutex_exit(&dn->dn_mtx);
666 		return (FALSE);
667 	}
668 	VERIFY(1 < refcount_add(&dn->dn_holds, tag));
669 	mutex_exit(&dn->dn_mtx);
670 	return (TRUE);
671 }
672 
673 void
674 dnode_rele(dnode_t *dn, void *tag)
675 {
676 	uint64_t refs;
677 
678 	mutex_enter(&dn->dn_mtx);
679 	refs = refcount_remove(&dn->dn_holds, tag);
680 	mutex_exit(&dn->dn_mtx);
681 	/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
682 	if (refs == 0 && dn->dn_dbuf)
683 		dbuf_rele(dn->dn_dbuf, dn);
684 }
685 
686 void
687 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
688 {
689 	objset_impl_t *os = dn->dn_objset;
690 	uint64_t txg = tx->tx_txg;
691 
692 	if (dn->dn_object == DMU_META_DNODE_OBJECT)
693 		return;
694 
695 	DNODE_VERIFY(dn);
696 
697 #ifdef ZFS_DEBUG
698 	mutex_enter(&dn->dn_mtx);
699 	ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
700 	/* ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg); */
701 	mutex_exit(&dn->dn_mtx);
702 #endif
703 
704 	mutex_enter(&os->os_lock);
705 
706 	/*
707 	 * If we are already marked dirty, we're done.
708 	 */
709 	if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
710 		mutex_exit(&os->os_lock);
711 		return;
712 	}
713 
714 	ASSERT(!refcount_is_zero(&dn->dn_holds) || list_head(&dn->dn_dbufs));
715 	ASSERT(dn->dn_datablksz != 0);
716 	ASSERT3U(dn->dn_next_bonuslen[txg&TXG_MASK], ==, 0);
717 	ASSERT3U(dn->dn_next_blksz[txg&TXG_MASK], ==, 0);
718 
719 	dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
720 	    dn->dn_object, txg);
721 
722 	if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
723 		list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
724 	} else {
725 		list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
726 	}
727 
728 	mutex_exit(&os->os_lock);
729 
730 	/*
731 	 * The dnode maintains a hold on its containing dbuf as
732 	 * long as there are holds on it.  Each instantiated child
733 	 * dbuf maintaines a hold on the dnode.  When the last child
734 	 * drops its hold, the dnode will drop its hold on the
735 	 * containing dbuf. We add a "dirty hold" here so that the
736 	 * dnode will hang around after we finish processing its
737 	 * children.
738 	 */
739 	VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
740 
741 	(void) dbuf_dirty(dn->dn_dbuf, tx);
742 
743 	dsl_dataset_dirty(os->os_dsl_dataset, tx);
744 }
745 
746 void
747 dnode_free(dnode_t *dn, dmu_tx_t *tx)
748 {
749 	int txgoff = tx->tx_txg & TXG_MASK;
750 
751 	dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
752 
753 	/* we should be the only holder... hopefully */
754 	/* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
755 
756 	mutex_enter(&dn->dn_mtx);
757 	if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
758 		mutex_exit(&dn->dn_mtx);
759 		return;
760 	}
761 	dn->dn_free_txg = tx->tx_txg;
762 	mutex_exit(&dn->dn_mtx);
763 
764 	/*
765 	 * If the dnode is already dirty, it needs to be moved from
766 	 * the dirty list to the free list.
767 	 */
768 	mutex_enter(&dn->dn_objset->os_lock);
769 	if (list_link_active(&dn->dn_dirty_link[txgoff])) {
770 		list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn);
771 		list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn);
772 		mutex_exit(&dn->dn_objset->os_lock);
773 	} else {
774 		mutex_exit(&dn->dn_objset->os_lock);
775 		dnode_setdirty(dn, tx);
776 	}
777 }
778 
779 /*
780  * Try to change the block size for the indicated dnode.  This can only
781  * succeed if there are no blocks allocated or dirty beyond first block
782  */
783 int
784 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
785 {
786 	dmu_buf_impl_t *db, *db_next;
787 	int err;
788 
789 	if (size == 0)
790 		size = SPA_MINBLOCKSIZE;
791 	if (size > SPA_MAXBLOCKSIZE)
792 		size = SPA_MAXBLOCKSIZE;
793 	else
794 		size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
795 
796 	if (ibs == dn->dn_indblkshift)
797 		ibs = 0;
798 
799 	if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
800 		return (0);
801 
802 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
803 
804 	/* Check for any allocated blocks beyond the first */
805 	if (dn->dn_phys->dn_maxblkid != 0)
806 		goto fail;
807 
808 	mutex_enter(&dn->dn_dbufs_mtx);
809 	for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
810 		db_next = list_next(&dn->dn_dbufs, db);
811 
812 		if (db->db_blkid != 0 && db->db_blkid != DB_BONUS_BLKID) {
813 			mutex_exit(&dn->dn_dbufs_mtx);
814 			goto fail;
815 		}
816 	}
817 	mutex_exit(&dn->dn_dbufs_mtx);
818 
819 	if (ibs && dn->dn_nlevels != 1)
820 		goto fail;
821 
822 	/* resize the old block */
823 	err = dbuf_hold_impl(dn, 0, 0, TRUE, FTAG, &db);
824 	if (err == 0)
825 		dbuf_new_size(db, size, tx);
826 	else if (err != ENOENT)
827 		goto fail;
828 
829 	dnode_setdblksz(dn, size);
830 	dnode_setdirty(dn, tx);
831 	dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
832 	if (ibs) {
833 		dn->dn_indblkshift = ibs;
834 		dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
835 	}
836 	/* rele after we have fixed the blocksize in the dnode */
837 	if (db)
838 		dbuf_rele(db, FTAG);
839 
840 	rw_exit(&dn->dn_struct_rwlock);
841 	return (0);
842 
843 fail:
844 	rw_exit(&dn->dn_struct_rwlock);
845 	return (ENOTSUP);
846 }
847 
848 /* read-holding callers must not rely on the lock being continuously held */
849 void
850 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
851 {
852 	uint64_t txgoff = tx->tx_txg & TXG_MASK;
853 	int epbs, new_nlevels;
854 	uint64_t sz;
855 
856 	ASSERT(blkid != DB_BONUS_BLKID);
857 
858 	ASSERT(have_read ?
859 	    RW_READ_HELD(&dn->dn_struct_rwlock) :
860 	    RW_WRITE_HELD(&dn->dn_struct_rwlock));
861 
862 	/*
863 	 * if we have a read-lock, check to see if we need to do any work
864 	 * before upgrading to a write-lock.
865 	 */
866 	if (have_read) {
867 		if (blkid <= dn->dn_maxblkid)
868 			return;
869 
870 		if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
871 			rw_exit(&dn->dn_struct_rwlock);
872 			rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
873 		}
874 	}
875 
876 	if (blkid <= dn->dn_maxblkid)
877 		goto out;
878 
879 	dn->dn_maxblkid = blkid;
880 
881 	/*
882 	 * Compute the number of levels necessary to support the new maxblkid.
883 	 */
884 	new_nlevels = 1;
885 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
886 	for (sz = dn->dn_nblkptr;
887 	    sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
888 		new_nlevels++;
889 
890 	if (new_nlevels > dn->dn_nlevels) {
891 		int old_nlevels = dn->dn_nlevels;
892 		dmu_buf_impl_t *db;
893 		list_t *list;
894 		dbuf_dirty_record_t *new, *dr, *dr_next;
895 
896 		dn->dn_nlevels = new_nlevels;
897 
898 		ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
899 		dn->dn_next_nlevels[txgoff] = new_nlevels;
900 
901 		/* dirty the left indirects */
902 		db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
903 		new = dbuf_dirty(db, tx);
904 		dbuf_rele(db, FTAG);
905 
906 		/* transfer the dirty records to the new indirect */
907 		mutex_enter(&dn->dn_mtx);
908 		mutex_enter(&new->dt.di.dr_mtx);
909 		list = &dn->dn_dirty_records[txgoff];
910 		for (dr = list_head(list); dr; dr = dr_next) {
911 			dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
912 			if (dr->dr_dbuf->db_level != new_nlevels-1 &&
913 			    dr->dr_dbuf->db_blkid != DB_BONUS_BLKID) {
914 				ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
915 				list_remove(&dn->dn_dirty_records[txgoff], dr);
916 				list_insert_tail(&new->dt.di.dr_children, dr);
917 				dr->dr_parent = new;
918 			}
919 		}
920 		mutex_exit(&new->dt.di.dr_mtx);
921 		mutex_exit(&dn->dn_mtx);
922 	}
923 
924 out:
925 	if (have_read)
926 		rw_downgrade(&dn->dn_struct_rwlock);
927 }
928 
929 void
930 dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
931 {
932 	avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
933 	avl_index_t where;
934 	free_range_t *rp;
935 	free_range_t rp_tofind;
936 	uint64_t endblk = blkid + nblks;
937 
938 	ASSERT(MUTEX_HELD(&dn->dn_mtx));
939 	ASSERT(nblks <= UINT64_MAX - blkid); /* no overflow */
940 
941 	dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
942 	    blkid, nblks, tx->tx_txg);
943 	rp_tofind.fr_blkid = blkid;
944 	rp = avl_find(tree, &rp_tofind, &where);
945 	if (rp == NULL)
946 		rp = avl_nearest(tree, where, AVL_BEFORE);
947 	if (rp == NULL)
948 		rp = avl_nearest(tree, where, AVL_AFTER);
949 
950 	while (rp && (rp->fr_blkid <= blkid + nblks)) {
951 		uint64_t fr_endblk = rp->fr_blkid + rp->fr_nblks;
952 		free_range_t *nrp = AVL_NEXT(tree, rp);
953 
954 		if (blkid <= rp->fr_blkid && endblk >= fr_endblk) {
955 			/* clear this entire range */
956 			avl_remove(tree, rp);
957 			kmem_free(rp, sizeof (free_range_t));
958 		} else if (blkid <= rp->fr_blkid &&
959 		    endblk > rp->fr_blkid && endblk < fr_endblk) {
960 			/* clear the beginning of this range */
961 			rp->fr_blkid = endblk;
962 			rp->fr_nblks = fr_endblk - endblk;
963 		} else if (blkid > rp->fr_blkid && blkid < fr_endblk &&
964 		    endblk >= fr_endblk) {
965 			/* clear the end of this range */
966 			rp->fr_nblks = blkid - rp->fr_blkid;
967 		} else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
968 			/* clear a chunk out of this range */
969 			free_range_t *new_rp =
970 			    kmem_alloc(sizeof (free_range_t), KM_SLEEP);
971 
972 			new_rp->fr_blkid = endblk;
973 			new_rp->fr_nblks = fr_endblk - endblk;
974 			avl_insert_here(tree, new_rp, rp, AVL_AFTER);
975 			rp->fr_nblks = blkid - rp->fr_blkid;
976 		}
977 		/* there may be no overlap */
978 		rp = nrp;
979 	}
980 }
981 
982 void
983 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
984 {
985 	dmu_buf_impl_t *db;
986 	uint64_t blkoff, blkid, nblks;
987 	int blksz, blkshift, head, tail;
988 	int trunc = FALSE;
989 	int epbs;
990 
991 	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
992 	blksz = dn->dn_datablksz;
993 	blkshift = dn->dn_datablkshift;
994 	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
995 
996 	if (len == -1ULL) {
997 		len = UINT64_MAX - off;
998 		trunc = TRUE;
999 	}
1000 
1001 	/*
1002 	 * First, block align the region to free:
1003 	 */
1004 	if (ISP2(blksz)) {
1005 		head = P2NPHASE(off, blksz);
1006 		blkoff = P2PHASE(off, blksz);
1007 		if ((off >> blkshift) > dn->dn_maxblkid)
1008 			goto out;
1009 	} else {
1010 		ASSERT(dn->dn_maxblkid == 0);
1011 		if (off == 0 && len >= blksz) {
1012 			/* Freeing the whole block; fast-track this request */
1013 			blkid = 0;
1014 			nblks = 1;
1015 			goto done;
1016 		} else if (off >= blksz) {
1017 			/* Freeing past end-of-data */
1018 			goto out;
1019 		} else {
1020 			/* Freeing part of the block. */
1021 			head = blksz - off;
1022 			ASSERT3U(head, >, 0);
1023 		}
1024 		blkoff = off;
1025 	}
1026 	/* zero out any partial block data at the start of the range */
1027 	if (head) {
1028 		ASSERT3U(blkoff + head, ==, blksz);
1029 		if (len < head)
1030 			head = len;
1031 		if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE,
1032 		    FTAG, &db) == 0) {
1033 			caddr_t data;
1034 
1035 			/* don't dirty if it isn't on disk and isn't dirty */
1036 			if (db->db_last_dirty ||
1037 			    (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1038 				rw_exit(&dn->dn_struct_rwlock);
1039 				dbuf_will_dirty(db, tx);
1040 				rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1041 				data = db->db.db_data;
1042 				bzero(data + blkoff, head);
1043 			}
1044 			dbuf_rele(db, FTAG);
1045 		}
1046 		off += head;
1047 		len -= head;
1048 	}
1049 
1050 	/* If the range was less than one block, we're done */
1051 	if (len == 0)
1052 		goto out;
1053 
1054 	/* If the remaining range is past end of file, we're done */
1055 	if ((off >> blkshift) > dn->dn_maxblkid)
1056 		goto out;
1057 
1058 	ASSERT(ISP2(blksz));
1059 	if (trunc)
1060 		tail = 0;
1061 	else
1062 		tail = P2PHASE(len, blksz);
1063 
1064 	ASSERT3U(P2PHASE(off, blksz), ==, 0);
1065 	/* zero out any partial block data at the end of the range */
1066 	if (tail) {
1067 		if (len < tail)
1068 			tail = len;
1069 		if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len),
1070 		    TRUE, FTAG, &db) == 0) {
1071 			/* don't dirty if not on disk and not dirty */
1072 			if (db->db_last_dirty ||
1073 			    (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
1074 				rw_exit(&dn->dn_struct_rwlock);
1075 				dbuf_will_dirty(db, tx);
1076 				rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1077 				bzero(db->db.db_data, tail);
1078 			}
1079 			dbuf_rele(db, FTAG);
1080 		}
1081 		len -= tail;
1082 	}
1083 
1084 	/* If the range did not include a full block, we are done */
1085 	if (len == 0)
1086 		goto out;
1087 
1088 	ASSERT(IS_P2ALIGNED(off, blksz));
1089 	ASSERT(trunc || IS_P2ALIGNED(len, blksz));
1090 	blkid = off >> blkshift;
1091 	nblks = len >> blkshift;
1092 	if (trunc)
1093 		nblks += 1;
1094 
1095 	/*
1096 	 * Read in and mark all the level-1 indirects dirty,
1097 	 * so that they will stay in memory until syncing phase.
1098 	 * Always dirty the first and last indirect to make sure
1099 	 * we dirty all the partial indirects.
1100 	 */
1101 	if (dn->dn_nlevels > 1) {
1102 		uint64_t i, first, last;
1103 		int shift = epbs + dn->dn_datablkshift;
1104 
1105 		first = blkid >> epbs;
1106 		if (db = dbuf_hold_level(dn, 1, first, FTAG)) {
1107 			dbuf_will_dirty(db, tx);
1108 			dbuf_rele(db, FTAG);
1109 		}
1110 		if (trunc)
1111 			last = dn->dn_maxblkid >> epbs;
1112 		else
1113 			last = (blkid + nblks - 1) >> epbs;
1114 		if (last > first && (db = dbuf_hold_level(dn, 1, last, FTAG))) {
1115 			dbuf_will_dirty(db, tx);
1116 			dbuf_rele(db, FTAG);
1117 		}
1118 		for (i = first + 1; i < last; i++) {
1119 			uint64_t ibyte = i << shift;
1120 			int err;
1121 
1122 			err = dnode_next_offset(dn,
1123 			    DNODE_FIND_HAVELOCK, &ibyte, 1, 1, 0);
1124 			i = ibyte >> shift;
1125 			if (err == ESRCH || i >= last)
1126 				break;
1127 			ASSERT(err == 0);
1128 			db = dbuf_hold_level(dn, 1, i, FTAG);
1129 			if (db) {
1130 				dbuf_will_dirty(db, tx);
1131 				dbuf_rele(db, FTAG);
1132 			}
1133 		}
1134 	}
1135 done:
1136 	/*
1137 	 * Add this range to the dnode range list.
1138 	 * We will finish up this free operation in the syncing phase.
1139 	 */
1140 	mutex_enter(&dn->dn_mtx);
1141 	dnode_clear_range(dn, blkid, nblks, tx);
1142 	{
1143 		free_range_t *rp, *found;
1144 		avl_index_t where;
1145 		avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
1146 
1147 		/* Add new range to dn_ranges */
1148 		rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
1149 		rp->fr_blkid = blkid;
1150 		rp->fr_nblks = nblks;
1151 		found = avl_find(tree, rp, &where);
1152 		ASSERT(found == NULL);
1153 		avl_insert(tree, rp, where);
1154 		dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
1155 		    blkid, nblks, tx->tx_txg);
1156 	}
1157 	mutex_exit(&dn->dn_mtx);
1158 
1159 	dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
1160 	dnode_setdirty(dn, tx);
1161 out:
1162 	if (trunc && dn->dn_maxblkid >= (off >> blkshift))
1163 		dn->dn_maxblkid = (off >> blkshift ? (off >> blkshift) - 1 : 0);
1164 
1165 	rw_exit(&dn->dn_struct_rwlock);
1166 }
1167 
1168 /* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
1169 uint64_t
1170 dnode_block_freed(dnode_t *dn, uint64_t blkid)
1171 {
1172 	free_range_t range_tofind;
1173 	void *dp = spa_get_dsl(dn->dn_objset->os_spa);
1174 	int i;
1175 
1176 	if (blkid == DB_BONUS_BLKID)
1177 		return (FALSE);
1178 
1179 	/*
1180 	 * If we're in the process of opening the pool, dp will not be
1181 	 * set yet, but there shouldn't be anything dirty.
1182 	 */
1183 	if (dp == NULL)
1184 		return (FALSE);
1185 
1186 	if (dn->dn_free_txg)
1187 		return (TRUE);
1188 
1189 	/*
1190 	 * If dn_datablkshift is not set, then there's only a single
1191 	 * block, in which case there will never be a free range so it
1192 	 * won't matter.
1193 	 */
1194 	range_tofind.fr_blkid = blkid;
1195 	mutex_enter(&dn->dn_mtx);
1196 	for (i = 0; i < TXG_SIZE; i++) {
1197 		free_range_t *range_found;
1198 		avl_index_t idx;
1199 
1200 		range_found = avl_find(&dn->dn_ranges[i], &range_tofind, &idx);
1201 		if (range_found) {
1202 			ASSERT(range_found->fr_nblks > 0);
1203 			break;
1204 		}
1205 		range_found = avl_nearest(&dn->dn_ranges[i], idx, AVL_BEFORE);
1206 		if (range_found &&
1207 		    range_found->fr_blkid + range_found->fr_nblks > blkid)
1208 			break;
1209 	}
1210 	mutex_exit(&dn->dn_mtx);
1211 	return (i < TXG_SIZE);
1212 }
1213 
1214 /* call from syncing context when we actually write/free space for this dnode */
1215 void
1216 dnode_diduse_space(dnode_t *dn, int64_t delta)
1217 {
1218 	uint64_t space;
1219 	dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
1220 	    dn, dn->dn_phys,
1221 	    (u_longlong_t)dn->dn_phys->dn_used,
1222 	    (longlong_t)delta);
1223 
1224 	mutex_enter(&dn->dn_mtx);
1225 	space = DN_USED_BYTES(dn->dn_phys);
1226 	if (delta > 0) {
1227 		ASSERT3U(space + delta, >=, space); /* no overflow */
1228 	} else {
1229 		ASSERT3U(space, >=, -delta); /* no underflow */
1230 	}
1231 	space += delta;
1232 	if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
1233 		ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
1234 		ASSERT3U(P2PHASE(space, 1<<DEV_BSHIFT), ==, 0);
1235 		dn->dn_phys->dn_used = space >> DEV_BSHIFT;
1236 	} else {
1237 		dn->dn_phys->dn_used = space;
1238 		dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
1239 	}
1240 	mutex_exit(&dn->dn_mtx);
1241 }
1242 
1243 /*
1244  * Call when we think we're going to write/free space in open context.
1245  * Be conservative (ie. OK to write less than this or free more than
1246  * this, but don't write more or free less).
1247  */
1248 void
1249 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
1250 {
1251 	objset_impl_t *os = dn->dn_objset;
1252 	dsl_dataset_t *ds = os->os_dsl_dataset;
1253 
1254 	if (space > 0)
1255 		space = spa_get_asize(os->os_spa, space);
1256 
1257 	if (ds)
1258 		dsl_dir_willuse_space(ds->ds_dir, space, tx);
1259 
1260 	dmu_tx_willuse_space(tx, space);
1261 }
1262 
1263 static int
1264 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
1265 	int lvl, uint64_t blkfill, uint64_t txg)
1266 {
1267 	dmu_buf_impl_t *db = NULL;
1268 	void *data = NULL;
1269 	uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
1270 	uint64_t epb = 1ULL << epbs;
1271 	uint64_t minfill, maxfill;
1272 	boolean_t hole;
1273 	int i, inc, error, span;
1274 
1275 	dprintf("probing object %llu offset %llx level %d of %u\n",
1276 	    dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
1277 
1278 	hole = flags & DNODE_FIND_HOLE;
1279 	inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
1280 	ASSERT(txg == 0 || !hole);
1281 
1282 	if (lvl == dn->dn_phys->dn_nlevels) {
1283 		error = 0;
1284 		epb = dn->dn_phys->dn_nblkptr;
1285 		data = dn->dn_phys->dn_blkptr;
1286 	} else {
1287 		uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl);
1288 		error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db);
1289 		if (error) {
1290 			if (error != ENOENT)
1291 				return (error);
1292 			if (hole)
1293 				return (0);
1294 			/*
1295 			 * This can only happen when we are searching up
1296 			 * the block tree for data.  We don't really need to
1297 			 * adjust the offset, as we will just end up looking
1298 			 * at the pointer to this block in its parent, and its
1299 			 * going to be unallocated, so we will skip over it.
1300 			 */
1301 			return (ESRCH);
1302 		}
1303 		error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
1304 		if (error) {
1305 			dbuf_rele(db, FTAG);
1306 			return (error);
1307 		}
1308 		data = db->db.db_data;
1309 	}
1310 
1311 	if (db && txg &&
1312 	    (db->db_blkptr == NULL || db->db_blkptr->blk_birth <= txg)) {
1313 		/*
1314 		 * This can only happen when we are searching up the tree
1315 		 * and these conditions mean that we need to keep climbing.
1316 		 */
1317 		error = ESRCH;
1318 	} else if (lvl == 0) {
1319 		dnode_phys_t *dnp = data;
1320 		span = DNODE_SHIFT;
1321 		ASSERT(dn->dn_type == DMU_OT_DNODE);
1322 
1323 		for (i = (*offset >> span) & (blkfill - 1);
1324 		    i >= 0 && i < blkfill; i += inc) {
1325 			boolean_t newcontents = B_TRUE;
1326 			if (txg) {
1327 				int j;
1328 				newcontents = B_FALSE;
1329 				for (j = 0; j < dnp[i].dn_nblkptr; j++) {
1330 					if (dnp[i].dn_blkptr[j].blk_birth > txg)
1331 						newcontents = B_TRUE;
1332 				}
1333 			}
1334 			if (!dnp[i].dn_type == hole && newcontents)
1335 				break;
1336 			*offset += (1ULL << span) * inc;
1337 		}
1338 		if (i < 0 || i == blkfill)
1339 			error = ESRCH;
1340 	} else {
1341 		blkptr_t *bp = data;
1342 		span = (lvl - 1) * epbs + dn->dn_datablkshift;
1343 		minfill = 0;
1344 		maxfill = blkfill << ((lvl - 1) * epbs);
1345 
1346 		if (hole)
1347 			maxfill--;
1348 		else
1349 			minfill++;
1350 
1351 		for (i = (*offset >> span) & ((1ULL << epbs) - 1);
1352 		    i >= 0 && i < epb; i += inc) {
1353 			if (bp[i].blk_fill >= minfill &&
1354 			    bp[i].blk_fill <= maxfill &&
1355 			    (hole || bp[i].blk_birth > txg))
1356 				break;
1357 			if (inc < 0 && *offset < (1ULL << span))
1358 				*offset = 0;
1359 			else
1360 				*offset += (1ULL << span) * inc;
1361 		}
1362 		if (i < 0 || i == epb)
1363 			error = ESRCH;
1364 	}
1365 
1366 	if (db)
1367 		dbuf_rele(db, FTAG);
1368 
1369 	return (error);
1370 }
1371 
1372 /*
1373  * Find the next hole, data, or sparse region at or after *offset.
1374  * The value 'blkfill' tells us how many items we expect to find
1375  * in an L0 data block; this value is 1 for normal objects,
1376  * DNODES_PER_BLOCK for the meta dnode, and some fraction of
1377  * DNODES_PER_BLOCK when searching for sparse regions thereof.
1378  *
1379  * Examples:
1380  *
1381  * dnode_next_offset(dn, flags, offset, 1, 1, 0);
1382  *	Finds the next/previous hole/data in a file.
1383  *	Used in dmu_offset_next().
1384  *
1385  * dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
1386  *	Finds the next free/allocated dnode an objset's meta-dnode.
1387  *	Only finds objects that have new contents since txg (ie.
1388  *	bonus buffer changes and content removal are ignored).
1389  *	Used in dmu_object_next().
1390  *
1391  * dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
1392  *	Finds the next L2 meta-dnode bp that's at most 1/4 full.
1393  *	Used in dmu_object_alloc().
1394  */
1395 int
1396 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
1397     int minlvl, uint64_t blkfill, uint64_t txg)
1398 {
1399 	uint64_t initial_offset = *offset;
1400 	int lvl, maxlvl;
1401 	int error = 0;
1402 
1403 	if (!(flags & DNODE_FIND_HAVELOCK))
1404 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1405 
1406 	if (dn->dn_phys->dn_nlevels == 0) {
1407 		error = ESRCH;
1408 		goto out;
1409 	}
1410 
1411 	if (dn->dn_datablkshift == 0) {
1412 		if (*offset < dn->dn_datablksz) {
1413 			if (flags & DNODE_FIND_HOLE)
1414 				*offset = dn->dn_datablksz;
1415 		} else {
1416 			error = ESRCH;
1417 		}
1418 		goto out;
1419 	}
1420 
1421 	maxlvl = dn->dn_phys->dn_nlevels;
1422 
1423 	for (lvl = minlvl; lvl <= maxlvl; lvl++) {
1424 		error = dnode_next_offset_level(dn,
1425 		    flags, offset, lvl, blkfill, txg);
1426 		if (error != ESRCH)
1427 			break;
1428 	}
1429 
1430 	while (error == 0 && --lvl >= minlvl) {
1431 		error = dnode_next_offset_level(dn,
1432 		    flags, offset, lvl, blkfill, txg);
1433 	}
1434 
1435 	if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
1436 	    initial_offset < *offset : initial_offset > *offset))
1437 		error = ESRCH;
1438 out:
1439 	if (!(flags & DNODE_FIND_HAVELOCK))
1440 		rw_exit(&dn->dn_struct_rwlock);
1441 
1442 	return (error);
1443 }
1444