xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_objset.c (revision d6bb6a8465e557cb946ef49d56ed3202f6218652)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dnode.h>
35 #include <sys/dbuf.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/zio_checksum.h>
38 #include <sys/zap.h>
39 #include <sys/zil.h>
40 #include <sys/dmu_impl.h>
41 
42 
43 spa_t *
44 dmu_objset_spa(objset_t *os)
45 {
46 	return (os->os->os_spa);
47 }
48 
49 zilog_t *
50 dmu_objset_zil(objset_t *os)
51 {
52 	return (os->os->os_zil);
53 }
54 
55 dsl_pool_t *
56 dmu_objset_pool(objset_t *os)
57 {
58 	dsl_dataset_t *ds;
59 
60 	if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir)
61 		return (ds->ds_dir->dd_pool);
62 	else
63 		return (spa_get_dsl(os->os->os_spa));
64 }
65 
66 dsl_dataset_t *
67 dmu_objset_ds(objset_t *os)
68 {
69 	return (os->os->os_dsl_dataset);
70 }
71 
72 dmu_objset_type_t
73 dmu_objset_type(objset_t *os)
74 {
75 	return (os->os->os_phys->os_type);
76 }
77 
78 void
79 dmu_objset_name(objset_t *os, char *buf)
80 {
81 	dsl_dataset_name(os->os->os_dsl_dataset, buf);
82 }
83 
84 uint64_t
85 dmu_objset_id(objset_t *os)
86 {
87 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
88 
89 	return (ds ? ds->ds_object : 0);
90 }
91 
92 static void
93 checksum_changed_cb(void *arg, uint64_t newval)
94 {
95 	objset_impl_t *osi = arg;
96 
97 	/*
98 	 * Inheritance should have been done by now.
99 	 */
100 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
101 
102 	osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
103 }
104 
105 static void
106 compression_changed_cb(void *arg, uint64_t newval)
107 {
108 	objset_impl_t *osi = arg;
109 
110 	/*
111 	 * Inheritance and range checking should have been done by now.
112 	 */
113 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
114 
115 	osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
116 }
117 
118 void
119 dmu_objset_byteswap(void *buf, size_t size)
120 {
121 	objset_phys_t *osp = buf;
122 
123 	ASSERT(size == sizeof (objset_phys_t));
124 	dnode_byteswap(&osp->os_meta_dnode);
125 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
126 	osp->os_type = BSWAP_64(osp->os_type);
127 }
128 
129 int
130 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
131     objset_impl_t **osip)
132 {
133 	objset_impl_t *winner, *osi;
134 	int i, err, checksum;
135 
136 	osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP);
137 	osi->os.os = osi;
138 	osi->os_dsl_dataset = ds;
139 	osi->os_spa = spa;
140 	if (bp)
141 		osi->os_rootbp = *bp;
142 	osi->os_phys = zio_buf_alloc(sizeof (objset_phys_t));
143 	if (!BP_IS_HOLE(&osi->os_rootbp)) {
144 		zbookmark_t zb;
145 		zb.zb_objset = ds ? ds->ds_object : 0;
146 		zb.zb_object = 0;
147 		zb.zb_level = -1;
148 		zb.zb_blkid = 0;
149 
150 		dprintf_bp(&osi->os_rootbp, "reading %s", "");
151 		err = arc_read(NULL, spa, &osi->os_rootbp,
152 		    dmu_ot[DMU_OT_OBJSET].ot_byteswap,
153 		    arc_bcopy_func, osi->os_phys,
154 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, ARC_WAIT, &zb);
155 		if (err) {
156 			zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
157 			kmem_free(osi, sizeof (objset_impl_t));
158 			return (err);
159 		}
160 	} else {
161 		bzero(osi->os_phys, sizeof (objset_phys_t));
162 	}
163 
164 	/*
165 	 * Note: the changed_cb will be called once before the register
166 	 * func returns, thus changing the checksum/compression from the
167 	 * default (fletcher2/off).
168 	 */
169 	if (ds) {
170 		err = dsl_prop_register(ds, "checksum",
171 		    checksum_changed_cb, osi);
172 		if (err == 0)
173 			err = dsl_prop_register(ds, "compression",
174 			    compression_changed_cb, osi);
175 		if (err) {
176 			zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
177 			kmem_free(osi, sizeof (objset_impl_t));
178 			return (err);
179 		}
180 	} else {
181 		/* It's the meta-objset. */
182 		osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
183 		osi->os_compress = ZIO_COMPRESS_LZJB;
184 	}
185 
186 	osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header);
187 
188 	/*
189 	 * Metadata always gets compressed and checksummed.
190 	 * If the data checksum is multi-bit correctable, and it's not
191 	 * a ZBT-style checksum, then it's suitable for metadata as well.
192 	 * Otherwise, the metadata checksum defaults to fletcher4.
193 	 */
194 	checksum = osi->os_checksum;
195 
196 	if (zio_checksum_table[checksum].ci_correctable &&
197 	    !zio_checksum_table[checksum].ci_zbt)
198 		osi->os_md_checksum = checksum;
199 	else
200 		osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4;
201 	osi->os_md_compress = ZIO_COMPRESS_LZJB;
202 
203 	for (i = 0; i < TXG_SIZE; i++) {
204 		list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t),
205 		    offsetof(dnode_t, dn_dirty_link[i]));
206 		list_create(&osi->os_free_dnodes[i], sizeof (dnode_t),
207 		    offsetof(dnode_t, dn_dirty_link[i]));
208 	}
209 	list_create(&osi->os_dnodes, sizeof (dnode_t),
210 	    offsetof(dnode_t, dn_link));
211 	list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
212 	    offsetof(dmu_buf_impl_t, db_link));
213 
214 	osi->os_meta_dnode = dnode_special_open(osi,
215 	    &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
216 
217 	if (ds != NULL) {
218 		winner = dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict);
219 		if (winner) {
220 			dmu_objset_evict(ds, osi);
221 			osi = winner;
222 		}
223 	}
224 
225 	*osip = osi;
226 	return (0);
227 }
228 
229 /* called from zpl */
230 int
231 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode,
232     objset_t **osp)
233 {
234 	dsl_dataset_t *ds;
235 	int err;
236 	objset_t *os;
237 	objset_impl_t *osi;
238 
239 	os = kmem_alloc(sizeof (objset_t), KM_SLEEP);
240 	err = dsl_dataset_open(name, mode, os, &ds);
241 	if (err) {
242 		kmem_free(os, sizeof (objset_t));
243 		return (err);
244 	}
245 
246 	osi = dsl_dataset_get_user_ptr(ds);
247 	if (osi == NULL) {
248 		blkptr_t bp;
249 
250 		dsl_dataset_get_blkptr(ds, &bp);
251 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
252 		    ds, &bp, &osi);
253 		if (err) {
254 			dsl_dataset_close(ds, mode, os);
255 			kmem_free(os, sizeof (objset_t));
256 			return (err);
257 		}
258 	}
259 
260 	os->os = osi;
261 	os->os_mode = mode;
262 
263 	if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) {
264 		dmu_objset_close(os);
265 		return (EINVAL);
266 	}
267 	*osp = os;
268 	return (0);
269 }
270 
271 void
272 dmu_objset_close(objset_t *os)
273 {
274 	dsl_dataset_close(os->os->os_dsl_dataset, os->os_mode, os);
275 	kmem_free(os, sizeof (objset_t));
276 }
277 
278 int
279 dmu_objset_evict_dbufs(objset_t *os, int try)
280 {
281 	objset_impl_t *osi = os->os;
282 	dnode_t *dn;
283 
284 	mutex_enter(&osi->os_lock);
285 
286 	/* process the mdn last, since the other dnodes have holds on it */
287 	list_remove(&osi->os_dnodes, osi->os_meta_dnode);
288 	list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode);
289 
290 	/*
291 	 * Find the first dnode with holds.  We have to do this dance
292 	 * because dnode_add_ref() only works if you already have a
293 	 * hold.  If there are no holds then it has no dbufs so OK to
294 	 * skip.
295 	 */
296 	for (dn = list_head(&osi->os_dnodes);
297 	    dn && refcount_is_zero(&dn->dn_holds);
298 	    dn = list_next(&osi->os_dnodes, dn))
299 		continue;
300 	if (dn)
301 		dnode_add_ref(dn, FTAG);
302 
303 	while (dn) {
304 		dnode_t *next_dn = dn;
305 
306 		do {
307 			next_dn = list_next(&osi->os_dnodes, next_dn);
308 		} while (next_dn && refcount_is_zero(&next_dn->dn_holds));
309 		if (next_dn)
310 			dnode_add_ref(next_dn, FTAG);
311 
312 		mutex_exit(&osi->os_lock);
313 		if (dnode_evict_dbufs(dn, try)) {
314 			dnode_rele(dn, FTAG);
315 			if (next_dn)
316 				dnode_rele(next_dn, FTAG);
317 			return (1);
318 		}
319 		dnode_rele(dn, FTAG);
320 		mutex_enter(&osi->os_lock);
321 		dn = next_dn;
322 	}
323 	mutex_exit(&osi->os_lock);
324 	return (0);
325 }
326 
327 void
328 dmu_objset_evict(dsl_dataset_t *ds, void *arg)
329 {
330 	objset_impl_t *osi = arg;
331 	objset_t os;
332 	int err, i;
333 
334 	for (i = 0; i < TXG_SIZE; i++) {
335 		ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL);
336 		ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL);
337 	}
338 
339 	if (ds) {
340 		err = dsl_prop_unregister(ds, "checksum",
341 		    checksum_changed_cb, osi);
342 		ASSERT(err == 0);
343 
344 		err = dsl_prop_unregister(ds, "compression",
345 		    compression_changed_cb, osi);
346 		ASSERT(err == 0);
347 	}
348 
349 	/*
350 	 * We should need only a single pass over the dnode list, since
351 	 * nothing can be added to the list at this point.
352 	 */
353 	os.os = osi;
354 	(void) dmu_objset_evict_dbufs(&os, 0);
355 
356 	ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode);
357 	ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode);
358 	ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL);
359 
360 	dnode_special_close(osi->os_meta_dnode);
361 	zil_free(osi->os_zil);
362 
363 	zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
364 	kmem_free(osi, sizeof (objset_impl_t));
365 }
366 
367 /* called from dsl for meta-objset */
368 objset_impl_t *
369 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, dmu_objset_type_t type,
370     dmu_tx_t *tx)
371 {
372 	objset_impl_t *osi;
373 	dnode_t *mdn;
374 
375 	ASSERT(dmu_tx_is_syncing(tx));
376 	VERIFY(0 == dmu_objset_open_impl(spa, ds, NULL, &osi));
377 	mdn = osi->os_meta_dnode;
378 
379 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
380 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
381 
382 	/*
383 	 * We don't want to have to increase the meta-dnode's nlevels
384 	 * later, because then we could do it in quescing context while
385 	 * we are also accessing it in open context.
386 	 *
387 	 * This precaution is not necessary for the MOS (ds == NULL),
388 	 * because the MOS is only updated in syncing context.
389 	 * This is most fortunate: the MOS is the only objset that
390 	 * needs to be synced multiple times as spa_sync() iterates
391 	 * to convergence, so minimizing its dn_nlevels matters.
392 	 */
393 	if (ds != NULL) {
394 		int levels = 1;
395 
396 		/*
397 		 * Determine the number of levels necessary for the meta-dnode
398 		 * to contain DN_MAX_OBJECT dnodes.
399 		 */
400 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
401 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
402 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
403 			levels++;
404 
405 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
406 		    mdn->dn_nlevels = levels;
407 	}
408 
409 	ASSERT(type != DMU_OST_NONE);
410 	ASSERT(type != DMU_OST_ANY);
411 	ASSERT(type < DMU_OST_NUMTYPES);
412 	osi->os_phys->os_type = type;
413 
414 	dsl_dataset_dirty(ds, tx);
415 
416 	return (osi);
417 }
418 
419 struct oscarg {
420 	void (*userfunc)(objset_t *os, void *arg, dmu_tx_t *tx);
421 	void *userarg;
422 	dsl_dataset_t *clone_parent;
423 	const char *fullname;
424 	const char *lastname;
425 	dmu_objset_type_t type;
426 };
427 
428 static int
429 dmu_objset_create_sync(dsl_dir_t *dd, void *arg, dmu_tx_t *tx)
430 {
431 	struct oscarg *oa = arg;
432 	dsl_dataset_t *ds;
433 	int err;
434 	blkptr_t bp;
435 
436 	ASSERT(dmu_tx_is_syncing(tx));
437 
438 	err = dsl_dataset_create_sync(dd, oa->fullname, oa->lastname,
439 	    oa->clone_parent, tx);
440 	dprintf_dd(dd, "fn=%s ln=%s err=%d\n",
441 	    oa->fullname, oa->lastname, err);
442 	if (err)
443 		return (err);
444 
445 	VERIFY(0 == dsl_dataset_open_spa(dd->dd_pool->dp_spa, oa->fullname,
446 	    DS_MODE_STANDARD | DS_MODE_READONLY, FTAG, &ds));
447 	dsl_dataset_get_blkptr(ds, &bp);
448 	if (BP_IS_HOLE(&bp)) {
449 		objset_impl_t *osi;
450 
451 		/* This is an empty dmu_objset; not a clone. */
452 		osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
453 		    ds, oa->type, tx);
454 
455 		if (oa->userfunc)
456 			oa->userfunc(&osi->os, oa->userarg, tx);
457 	}
458 	dsl_dataset_close(ds, DS_MODE_STANDARD | DS_MODE_READONLY, FTAG);
459 
460 	return (0);
461 }
462 
463 int
464 dmu_objset_create(const char *name, dmu_objset_type_t type,
465     objset_t *clone_parent,
466     void (*func)(objset_t *os, void *arg, dmu_tx_t *tx), void *arg)
467 {
468 	dsl_dir_t *pds;
469 	const char *tail;
470 	int err = 0;
471 
472 	err = dsl_dir_open(name, FTAG, &pds, &tail);
473 	if (err)
474 		return (err);
475 	if (tail == NULL) {
476 		dsl_dir_close(pds, FTAG);
477 		return (EEXIST);
478 	}
479 
480 	dprintf("name=%s\n", name);
481 
482 	if (tail[0] == '@') {
483 		/*
484 		 * If we're creating a snapshot, make sure everything
485 		 * they might want is on disk.  XXX Sketchy to know
486 		 * about snapshots here, better to put in DSL.
487 		 */
488 		objset_t *os;
489 		size_t plen = strchr(name, '@') - name + 1;
490 		char *pbuf = kmem_alloc(plen, KM_SLEEP);
491 		bcopy(name, pbuf, plen - 1);
492 		pbuf[plen - 1] = '\0';
493 
494 		err = dmu_objset_open(pbuf, DMU_OST_ANY, DS_MODE_STANDARD, &os);
495 		if (err == 0) {
496 			err = zil_suspend(dmu_objset_zil(os));
497 			if (err == 0) {
498 				err = dsl_dir_sync_task(pds,
499 				    dsl_dataset_snapshot_sync,
500 				    (void*)(tail+1), 16*1024);
501 				zil_resume(dmu_objset_zil(os));
502 			}
503 			dmu_objset_close(os);
504 		}
505 		kmem_free(pbuf, plen);
506 	} else {
507 		struct oscarg oa = { 0 };
508 		oa.userfunc = func;
509 		oa.userarg = arg;
510 		oa.fullname = name;
511 		oa.lastname = tail;
512 		oa.type = type;
513 		if (clone_parent != NULL) {
514 			/*
515 			 * You can't clone to a different type.
516 			 */
517 			if (clone_parent->os->os_phys->os_type != type) {
518 				dsl_dir_close(pds, FTAG);
519 				return (EINVAL);
520 			}
521 			oa.clone_parent = clone_parent->os->os_dsl_dataset;
522 		}
523 		err = dsl_dir_sync_task(pds, dmu_objset_create_sync, &oa,
524 		    256*1024);
525 	}
526 	dsl_dir_close(pds, FTAG);
527 	return (err);
528 }
529 
530 int
531 dmu_objset_destroy(const char *name)
532 {
533 	objset_t *os;
534 	int error;
535 
536 	/*
537 	 * If it looks like we'll be able to destroy it, and there's
538 	 * an unplayed replay log sitting around, destroy the log.
539 	 * It would be nicer to do this in dsl_dataset_destroy_sync(),
540 	 * but the replay log objset is modified in open context.
541 	 */
542 	error = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
543 	if (error == 0) {
544 		zil_destroy(dmu_objset_zil(os));
545 		dmu_objset_close(os);
546 	}
547 
548 	/* XXX uncache everything? */
549 	return (dsl_dataset_destroy(name));
550 }
551 
552 int
553 dmu_objset_rollback(const char *name)
554 {
555 	int err;
556 	objset_t *os;
557 
558 	err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
559 	if (err == 0) {
560 		err = zil_suspend(dmu_objset_zil(os));
561 		if (err == 0)
562 			zil_resume(dmu_objset_zil(os));
563 		dmu_objset_close(os);
564 		if (err == 0) {
565 			/* XXX uncache everything? */
566 			err = dsl_dataset_rollback(name);
567 		}
568 	}
569 	return (err);
570 }
571 
572 static void
573 dmu_objset_sync_dnodes(objset_impl_t *os, list_t *list, dmu_tx_t *tx)
574 {
575 	dnode_t *dn = list_head(list);
576 	int level, err;
577 
578 	for (level = 0; dn = list_head(list); level++) {
579 		zio_t *zio;
580 		zio = zio_root(os->os_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
581 
582 		ASSERT3U(level, <=, DN_MAX_LEVELS);
583 
584 		while (dn) {
585 			dnode_t *next = list_next(list, dn);
586 
587 			list_remove(list, dn);
588 			if (dnode_sync(dn, level, zio, tx) == 0) {
589 				/*
590 				 * This dnode requires syncing at higher
591 				 * levels; put it back onto the list.
592 				 */
593 				if (next)
594 					list_insert_before(list, next, dn);
595 				else
596 					list_insert_tail(list, dn);
597 			}
598 			dn = next;
599 		}
600 		err = zio_wait(zio);
601 		ASSERT(err == 0);
602 	}
603 }
604 
605 /* ARGSUSED */
606 static void
607 killer(zio_t *zio, arc_buf_t *abuf, void *arg)
608 {
609 	objset_impl_t *os = arg;
610 	objset_phys_t *osphys = zio->io_data;
611 	dnode_phys_t *dnp = &osphys->os_meta_dnode;
612 	int i;
613 
614 	ASSERT3U(zio->io_error, ==, 0);
615 
616 	/*
617 	 * Update rootbp fill count.
618 	 */
619 	os->os_rootbp.blk_fill = 1;	/* count the meta-dnode */
620 	for (i = 0; i < dnp->dn_nblkptr; i++)
621 		os->os_rootbp.blk_fill += dnp->dn_blkptr[i].blk_fill;
622 
623 	BP_SET_TYPE(zio->io_bp, DMU_OT_OBJSET);
624 	BP_SET_LEVEL(zio->io_bp, 0);
625 
626 	if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp),
627 	    BP_IDENTITY(&zio->io_bp_orig))) {
628 		dsl_dataset_block_kill(os->os_dsl_dataset, &zio->io_bp_orig,
629 		    os->os_synctx);
630 		dsl_dataset_block_born(os->os_dsl_dataset, zio->io_bp,
631 		    os->os_synctx);
632 	}
633 }
634 
635 
636 /* called from dsl */
637 void
638 dmu_objset_sync(objset_impl_t *os, dmu_tx_t *tx)
639 {
640 	extern taskq_t *dbuf_tq;
641 	int txgoff;
642 	list_t *dirty_list;
643 	int err;
644 	zbookmark_t zb;
645 	arc_buf_t *abuf =
646 	    arc_buf_alloc(os->os_spa, sizeof (objset_phys_t), FTAG);
647 
648 	ASSERT(dmu_tx_is_syncing(tx));
649 	ASSERT(os->os_synctx == NULL);
650 	/* XXX the write_done callback should really give us the tx... */
651 	os->os_synctx = tx;
652 
653 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
654 
655 	txgoff = tx->tx_txg & TXG_MASK;
656 
657 	dmu_objset_sync_dnodes(os, &os->os_free_dnodes[txgoff], tx);
658 	dmu_objset_sync_dnodes(os, &os->os_dirty_dnodes[txgoff], tx);
659 
660 	/*
661 	 * Free intent log blocks up to this tx.
662 	 */
663 	zil_sync(os->os_zil, tx);
664 
665 	/*
666 	 * Sync meta-dnode
667 	 */
668 	dirty_list = &os->os_dirty_dnodes[txgoff];
669 	ASSERT(list_head(dirty_list) == NULL);
670 	list_insert_tail(dirty_list, os->os_meta_dnode);
671 	dmu_objset_sync_dnodes(os, dirty_list, tx);
672 
673 	/*
674 	 * Sync the root block.
675 	 */
676 	bcopy(os->os_phys, abuf->b_data, sizeof (objset_phys_t));
677 	zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0;
678 	zb.zb_object = 0;
679 	zb.zb_level = -1;
680 	zb.zb_blkid = 0;
681 	err = arc_write(NULL, os->os_spa, os->os_md_checksum,
682 	    os->os_md_compress, tx->tx_txg, &os->os_rootbp, abuf, killer, os,
683 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, ARC_WAIT, &zb);
684 	ASSERT(err == 0);
685 	VERIFY(arc_buf_remove_ref(abuf, FTAG) == 1);
686 
687 	dsl_dataset_set_blkptr(os->os_dsl_dataset, &os->os_rootbp, tx);
688 
689 	ASSERT3P(os->os_synctx, ==, tx);
690 	taskq_wait(dbuf_tq);
691 	os->os_synctx = NULL;
692 }
693 
694 void
695 dmu_objset_stats(objset_t *os, dmu_objset_stats_t *dds)
696 {
697 	if (os->os->os_dsl_dataset != NULL) {
698 		dsl_dataset_stats(os->os->os_dsl_dataset, dds);
699 	} else {
700 		ASSERT(os->os->os_phys->os_type == DMU_OST_META);
701 		bzero(dds, sizeof (*dds));
702 	}
703 	dds->dds_type = os->os->os_phys->os_type;
704 }
705 
706 int
707 dmu_objset_is_snapshot(objset_t *os)
708 {
709 	if (os->os->os_dsl_dataset != NULL)
710 		return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset));
711 	else
712 		return (B_FALSE);
713 }
714 
715 int
716 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
717     uint64_t *idp, uint64_t *offp)
718 {
719 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
720 	zap_cursor_t cursor;
721 	zap_attribute_t attr;
722 
723 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
724 		return (ENOENT);
725 
726 	zap_cursor_init_serialized(&cursor,
727 	    ds->ds_dir->dd_pool->dp_meta_objset,
728 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
729 
730 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
731 		zap_cursor_fini(&cursor);
732 		return (ENOENT);
733 	}
734 
735 	if (strlen(attr.za_name) + 1 > namelen) {
736 		zap_cursor_fini(&cursor);
737 		return (ENAMETOOLONG);
738 	}
739 
740 	(void) strcpy(name, attr.za_name);
741 	if (idp)
742 		*idp = attr.za_first_integer;
743 	zap_cursor_advance(&cursor);
744 	*offp = zap_cursor_serialize(&cursor);
745 	zap_cursor_fini(&cursor);
746 
747 	return (0);
748 }
749 
750 int
751 dmu_dir_list_next(objset_t *os, int namelen, char *name,
752     uint64_t *idp, uint64_t *offp)
753 {
754 	dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir;
755 	zap_cursor_t cursor;
756 	zap_attribute_t attr;
757 
758 	if (dd->dd_phys->dd_child_dir_zapobj == 0)
759 		return (ENOENT);
760 
761 	/* there is no next dir on a snapshot! */
762 	if (os->os->os_dsl_dataset->ds_object !=
763 	    dd->dd_phys->dd_head_dataset_obj)
764 		return (ENOENT);
765 
766 	zap_cursor_init_serialized(&cursor,
767 	    dd->dd_pool->dp_meta_objset,
768 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
769 
770 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
771 		zap_cursor_fini(&cursor);
772 		return (ENOENT);
773 	}
774 
775 	if (strlen(attr.za_name) + 1 > namelen) {
776 		zap_cursor_fini(&cursor);
777 		return (ENAMETOOLONG);
778 	}
779 
780 	(void) strcpy(name, attr.za_name);
781 	if (idp)
782 		*idp = attr.za_first_integer;
783 	zap_cursor_advance(&cursor);
784 	*offp = zap_cursor_serialize(&cursor);
785 	zap_cursor_fini(&cursor);
786 
787 	return (0);
788 }
789 
790 /*
791  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
792  */
793 void
794 dmu_objset_find(char *name, void func(char *, void *), void *arg, int flags)
795 {
796 	dsl_dir_t *dd;
797 	objset_t *os;
798 	uint64_t snapobj;
799 	zap_cursor_t zc;
800 	zap_attribute_t attr;
801 	char *child;
802 	int do_self, err;
803 
804 	err = dsl_dir_open(name, FTAG, &dd, NULL);
805 	if (err)
806 		return;
807 
808 	do_self = (dd->dd_phys->dd_head_dataset_obj != 0);
809 
810 	/*
811 	 * Iterate over all children.
812 	 */
813 	if (dd->dd_phys->dd_child_dir_zapobj != 0) {
814 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset,
815 		    dd->dd_phys->dd_child_dir_zapobj);
816 		    zap_cursor_retrieve(&zc, &attr) == 0;
817 		    (void) zap_cursor_advance(&zc)) {
818 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
819 			ASSERT(attr.za_num_integers == 1);
820 
821 			/*
822 			 * No separating '/' because parent's name ends in /.
823 			 */
824 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
825 			/* XXX could probably just use name here */
826 			dsl_dir_name(dd, child);
827 			(void) strcat(child, "/");
828 			(void) strcat(child, attr.za_name);
829 			dmu_objset_find(child, func, arg, flags);
830 			kmem_free(child, MAXPATHLEN);
831 		}
832 		zap_cursor_fini(&zc);
833 	}
834 
835 	/*
836 	 * Iterate over all snapshots.
837 	 */
838 	if ((flags & DS_FIND_SNAPSHOTS) &&
839 	    dmu_objset_open(name, DMU_OST_ANY,
840 	    DS_MODE_STANDARD | DS_MODE_READONLY, &os) == 0) {
841 
842 		snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj;
843 		dmu_objset_close(os);
844 
845 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj);
846 		    zap_cursor_retrieve(&zc, &attr) == 0;
847 		    (void) zap_cursor_advance(&zc)) {
848 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
849 			ASSERT(attr.za_num_integers == 1);
850 
851 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
852 			/* XXX could probably just use name here */
853 			dsl_dir_name(dd, child);
854 			(void) strcat(child, "@");
855 			(void) strcat(child, attr.za_name);
856 			func(child, arg);
857 			kmem_free(child, MAXPATHLEN);
858 		}
859 		zap_cursor_fini(&zc);
860 	}
861 
862 	dsl_dir_close(dd, FTAG);
863 
864 	/*
865 	 * Apply to self if appropriate.
866 	 */
867 	if (do_self)
868 		func(name, arg);
869 }
870