xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_objset.c (revision 98677c366f39bc9e671513615d9b1a2c6f15621d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dsl_dir.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_prop.h>
33 #include <sys/dsl_pool.h>
34 #include <sys/dsl_synctask.h>
35 #include <sys/dnode.h>
36 #include <sys/dbuf.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/zio_checksum.h>
39 #include <sys/zap.h>
40 #include <sys/zil.h>
41 #include <sys/dmu_impl.h>
42 
43 
44 spa_t *
45 dmu_objset_spa(objset_t *os)
46 {
47 	return (os->os->os_spa);
48 }
49 
50 zilog_t *
51 dmu_objset_zil(objset_t *os)
52 {
53 	return (os->os->os_zil);
54 }
55 
56 dsl_pool_t *
57 dmu_objset_pool(objset_t *os)
58 {
59 	dsl_dataset_t *ds;
60 
61 	if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir)
62 		return (ds->ds_dir->dd_pool);
63 	else
64 		return (spa_get_dsl(os->os->os_spa));
65 }
66 
67 dsl_dataset_t *
68 dmu_objset_ds(objset_t *os)
69 {
70 	return (os->os->os_dsl_dataset);
71 }
72 
73 dmu_objset_type_t
74 dmu_objset_type(objset_t *os)
75 {
76 	return (os->os->os_phys->os_type);
77 }
78 
79 void
80 dmu_objset_name(objset_t *os, char *buf)
81 {
82 	dsl_dataset_name(os->os->os_dsl_dataset, buf);
83 }
84 
85 uint64_t
86 dmu_objset_id(objset_t *os)
87 {
88 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
89 
90 	return (ds ? ds->ds_object : 0);
91 }
92 
93 static void
94 checksum_changed_cb(void *arg, uint64_t newval)
95 {
96 	objset_impl_t *osi = arg;
97 
98 	/*
99 	 * Inheritance should have been done by now.
100 	 */
101 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
102 
103 	osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
104 }
105 
106 static void
107 compression_changed_cb(void *arg, uint64_t newval)
108 {
109 	objset_impl_t *osi = arg;
110 
111 	/*
112 	 * Inheritance and range checking should have been done by now.
113 	 */
114 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
115 
116 	osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
117 }
118 
119 void
120 dmu_objset_byteswap(void *buf, size_t size)
121 {
122 	objset_phys_t *osp = buf;
123 
124 	ASSERT(size == sizeof (objset_phys_t));
125 	dnode_byteswap(&osp->os_meta_dnode);
126 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
127 	osp->os_type = BSWAP_64(osp->os_type);
128 }
129 
130 int
131 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
132     objset_impl_t **osip)
133 {
134 	objset_impl_t *winner, *osi;
135 	int i, err, checksum;
136 
137 	osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP);
138 	osi->os.os = osi;
139 	osi->os_dsl_dataset = ds;
140 	osi->os_spa = spa;
141 	if (bp)
142 		osi->os_rootbp = *bp;
143 	osi->os_phys = zio_buf_alloc(sizeof (objset_phys_t));
144 	if (!BP_IS_HOLE(&osi->os_rootbp)) {
145 		uint32_t aflags = ARC_WAIT;
146 		zbookmark_t zb;
147 		zb.zb_objset = ds ? ds->ds_object : 0;
148 		zb.zb_object = 0;
149 		zb.zb_level = -1;
150 		zb.zb_blkid = 0;
151 
152 		dprintf_bp(&osi->os_rootbp, "reading %s", "");
153 		err = arc_read(NULL, spa, &osi->os_rootbp,
154 		    dmu_ot[DMU_OT_OBJSET].ot_byteswap,
155 		    arc_bcopy_func, osi->os_phys,
156 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
157 		if (err) {
158 			zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
159 			kmem_free(osi, sizeof (objset_impl_t));
160 			return (err);
161 		}
162 	} else {
163 		bzero(osi->os_phys, sizeof (objset_phys_t));
164 	}
165 
166 	/*
167 	 * Note: the changed_cb will be called once before the register
168 	 * func returns, thus changing the checksum/compression from the
169 	 * default (fletcher2/off).  Snapshots don't need to know, and
170 	 * registering would complicate clone promotion.
171 	 */
172 	if (ds && ds->ds_phys->ds_num_children == 0) {
173 		err = dsl_prop_register(ds, "checksum",
174 		    checksum_changed_cb, osi);
175 		if (err == 0)
176 			err = dsl_prop_register(ds, "compression",
177 			    compression_changed_cb, osi);
178 		if (err) {
179 			zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
180 			kmem_free(osi, sizeof (objset_impl_t));
181 			return (err);
182 		}
183 	} else if (ds == NULL) {
184 		/* It's the meta-objset. */
185 		osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
186 		osi->os_compress = ZIO_COMPRESS_LZJB;
187 	}
188 
189 	osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header);
190 
191 	/*
192 	 * Metadata always gets compressed and checksummed.
193 	 * If the data checksum is multi-bit correctable, and it's not
194 	 * a ZBT-style checksum, then it's suitable for metadata as well.
195 	 * Otherwise, the metadata checksum defaults to fletcher4.
196 	 */
197 	checksum = osi->os_checksum;
198 
199 	if (zio_checksum_table[checksum].ci_correctable &&
200 	    !zio_checksum_table[checksum].ci_zbt)
201 		osi->os_md_checksum = checksum;
202 	else
203 		osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4;
204 	osi->os_md_compress = ZIO_COMPRESS_LZJB;
205 
206 	for (i = 0; i < TXG_SIZE; i++) {
207 		list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t),
208 		    offsetof(dnode_t, dn_dirty_link[i]));
209 		list_create(&osi->os_free_dnodes[i], sizeof (dnode_t),
210 		    offsetof(dnode_t, dn_dirty_link[i]));
211 	}
212 	list_create(&osi->os_dnodes, sizeof (dnode_t),
213 	    offsetof(dnode_t, dn_link));
214 	list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
215 	    offsetof(dmu_buf_impl_t, db_link));
216 
217 	osi->os_meta_dnode = dnode_special_open(osi,
218 	    &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
219 
220 	if (ds != NULL) {
221 		winner = dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict);
222 		if (winner) {
223 			dmu_objset_evict(ds, osi);
224 			osi = winner;
225 		}
226 	}
227 
228 	*osip = osi;
229 	return (0);
230 }
231 
232 /* called from zpl */
233 int
234 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode,
235     objset_t **osp)
236 {
237 	dsl_dataset_t *ds;
238 	int err;
239 	objset_t *os;
240 	objset_impl_t *osi;
241 
242 	os = kmem_alloc(sizeof (objset_t), KM_SLEEP);
243 	err = dsl_dataset_open(name, mode, os, &ds);
244 	if (err) {
245 		kmem_free(os, sizeof (objset_t));
246 		return (err);
247 	}
248 
249 	osi = dsl_dataset_get_user_ptr(ds);
250 	if (osi == NULL) {
251 		blkptr_t bp;
252 
253 		dsl_dataset_get_blkptr(ds, &bp);
254 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
255 		    ds, &bp, &osi);
256 		if (err) {
257 			dsl_dataset_close(ds, mode, os);
258 			kmem_free(os, sizeof (objset_t));
259 			return (err);
260 		}
261 	}
262 
263 	os->os = osi;
264 	os->os_mode = mode;
265 
266 	if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) {
267 		dmu_objset_close(os);
268 		return (EINVAL);
269 	}
270 	*osp = os;
271 	return (0);
272 }
273 
274 void
275 dmu_objset_close(objset_t *os)
276 {
277 	dsl_dataset_close(os->os->os_dsl_dataset, os->os_mode, os);
278 	kmem_free(os, sizeof (objset_t));
279 }
280 
281 int
282 dmu_objset_evict_dbufs(objset_t *os, int try)
283 {
284 	objset_impl_t *osi = os->os;
285 	dnode_t *dn;
286 
287 	mutex_enter(&osi->os_lock);
288 
289 	/* process the mdn last, since the other dnodes have holds on it */
290 	list_remove(&osi->os_dnodes, osi->os_meta_dnode);
291 	list_insert_tail(&osi->os_dnodes, osi->os_meta_dnode);
292 
293 	/*
294 	 * Find the first dnode with holds.  We have to do this dance
295 	 * because dnode_add_ref() only works if you already have a
296 	 * hold.  If there are no holds then it has no dbufs so OK to
297 	 * skip.
298 	 */
299 	for (dn = list_head(&osi->os_dnodes);
300 	    dn && refcount_is_zero(&dn->dn_holds);
301 	    dn = list_next(&osi->os_dnodes, dn))
302 		continue;
303 	if (dn)
304 		dnode_add_ref(dn, FTAG);
305 
306 	while (dn) {
307 		dnode_t *next_dn = dn;
308 
309 		do {
310 			next_dn = list_next(&osi->os_dnodes, next_dn);
311 		} while (next_dn && refcount_is_zero(&next_dn->dn_holds));
312 		if (next_dn)
313 			dnode_add_ref(next_dn, FTAG);
314 
315 		mutex_exit(&osi->os_lock);
316 		if (dnode_evict_dbufs(dn, try)) {
317 			dnode_rele(dn, FTAG);
318 			if (next_dn)
319 				dnode_rele(next_dn, FTAG);
320 			return (1);
321 		}
322 		dnode_rele(dn, FTAG);
323 		mutex_enter(&osi->os_lock);
324 		dn = next_dn;
325 	}
326 	mutex_exit(&osi->os_lock);
327 	return (0);
328 }
329 
330 void
331 dmu_objset_evict(dsl_dataset_t *ds, void *arg)
332 {
333 	objset_impl_t *osi = arg;
334 	objset_t os;
335 	int i;
336 
337 	for (i = 0; i < TXG_SIZE; i++) {
338 		ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL);
339 		ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL);
340 	}
341 
342 	if (ds && ds->ds_phys->ds_num_children == 0) {
343 		VERIFY(0 == dsl_prop_unregister(ds, "checksum",
344 		    checksum_changed_cb, osi));
345 		VERIFY(0 == dsl_prop_unregister(ds, "compression",
346 		    compression_changed_cb, osi));
347 	}
348 
349 	/*
350 	 * We should need only a single pass over the dnode list, since
351 	 * nothing can be added to the list at this point.
352 	 */
353 	os.os = osi;
354 	(void) dmu_objset_evict_dbufs(&os, 0);
355 
356 	ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode);
357 	ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode);
358 	ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL);
359 
360 	dnode_special_close(osi->os_meta_dnode);
361 	zil_free(osi->os_zil);
362 
363 	zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
364 	kmem_free(osi, sizeof (objset_impl_t));
365 }
366 
367 /* called from dsl for meta-objset */
368 objset_impl_t *
369 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, dmu_objset_type_t type,
370     dmu_tx_t *tx)
371 {
372 	objset_impl_t *osi;
373 	dnode_t *mdn;
374 
375 	ASSERT(dmu_tx_is_syncing(tx));
376 	VERIFY(0 == dmu_objset_open_impl(spa, ds, NULL, &osi));
377 	mdn = osi->os_meta_dnode;
378 
379 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
380 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
381 
382 	/*
383 	 * We don't want to have to increase the meta-dnode's nlevels
384 	 * later, because then we could do it in quescing context while
385 	 * we are also accessing it in open context.
386 	 *
387 	 * This precaution is not necessary for the MOS (ds == NULL),
388 	 * because the MOS is only updated in syncing context.
389 	 * This is most fortunate: the MOS is the only objset that
390 	 * needs to be synced multiple times as spa_sync() iterates
391 	 * to convergence, so minimizing its dn_nlevels matters.
392 	 */
393 	if (ds != NULL) {
394 		int levels = 1;
395 
396 		/*
397 		 * Determine the number of levels necessary for the meta-dnode
398 		 * to contain DN_MAX_OBJECT dnodes.
399 		 */
400 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
401 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
402 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
403 			levels++;
404 
405 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
406 		    mdn->dn_nlevels = levels;
407 	}
408 
409 	ASSERT(type != DMU_OST_NONE);
410 	ASSERT(type != DMU_OST_ANY);
411 	ASSERT(type < DMU_OST_NUMTYPES);
412 	osi->os_phys->os_type = type;
413 
414 	dsl_dataset_dirty(ds, tx);
415 
416 	return (osi);
417 }
418 
419 struct oscarg {
420 	void (*userfunc)(objset_t *os, void *arg, dmu_tx_t *tx);
421 	void *userarg;
422 	dsl_dataset_t *clone_parent;
423 	const char *lastname;
424 	dmu_objset_type_t type;
425 };
426 
427 /* ARGSUSED */
428 static int
429 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
430 {
431 	dsl_dir_t *dd = arg1;
432 	struct oscarg *oa = arg2;
433 	objset_t *mos = dd->dd_pool->dp_meta_objset;
434 	int err;
435 	uint64_t ddobj;
436 
437 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
438 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
439 	if (err != ENOENT)
440 		return (err ? err : EEXIST);
441 
442 	if (oa->clone_parent != NULL) {
443 		/*
444 		 * You can't clone across pools.
445 		 */
446 		if (oa->clone_parent->ds_dir->dd_pool != dd->dd_pool)
447 			return (EXDEV);
448 
449 		/*
450 		 * You can only clone snapshots, not the head datasets.
451 		 */
452 		if (oa->clone_parent->ds_phys->ds_num_children == 0)
453 			return (EINVAL);
454 	}
455 	return (0);
456 }
457 
458 static void
459 dmu_objset_create_sync(void *arg1, void *arg2, dmu_tx_t *tx)
460 {
461 	dsl_dir_t *dd = arg1;
462 	struct oscarg *oa = arg2;
463 	dsl_dataset_t *ds;
464 	blkptr_t bp;
465 	uint64_t dsobj;
466 
467 	ASSERT(dmu_tx_is_syncing(tx));
468 
469 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
470 	    oa->clone_parent, tx);
471 
472 	VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, dsobj, NULL,
473 	    DS_MODE_STANDARD | DS_MODE_READONLY, FTAG, &ds));
474 	dsl_dataset_get_blkptr(ds, &bp);
475 	if (BP_IS_HOLE(&bp)) {
476 		objset_impl_t *osi;
477 
478 		/* This is an empty dmu_objset; not a clone. */
479 		osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
480 		    ds, oa->type, tx);
481 
482 		if (oa->userfunc)
483 			oa->userfunc(&osi->os, oa->userarg, tx);
484 	}
485 	dsl_dataset_close(ds, DS_MODE_STANDARD | DS_MODE_READONLY, FTAG);
486 }
487 
488 int
489 dmu_objset_create(const char *name, dmu_objset_type_t type,
490     objset_t *clone_parent,
491     void (*func)(objset_t *os, void *arg, dmu_tx_t *tx), void *arg)
492 {
493 	dsl_dir_t *pdd;
494 	const char *tail;
495 	int err = 0;
496 	struct oscarg oa = { 0 };
497 
498 	ASSERT(strchr(name, '@') == NULL);
499 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
500 	if (err)
501 		return (err);
502 	if (tail == NULL) {
503 		dsl_dir_close(pdd, FTAG);
504 		return (EEXIST);
505 	}
506 
507 	dprintf("name=%s\n", name);
508 
509 	oa.userfunc = func;
510 	oa.userarg = arg;
511 	oa.lastname = tail;
512 	oa.type = type;
513 	if (clone_parent != NULL) {
514 		/*
515 		 * You can't clone to a different type.
516 		 */
517 		if (clone_parent->os->os_phys->os_type != type) {
518 			dsl_dir_close(pdd, FTAG);
519 			return (EINVAL);
520 		}
521 		oa.clone_parent = clone_parent->os->os_dsl_dataset;
522 	}
523 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
524 	    dmu_objset_create_sync, pdd, &oa, 5);
525 	dsl_dir_close(pdd, FTAG);
526 	return (err);
527 }
528 
529 int
530 dmu_objset_destroy(const char *name)
531 {
532 	objset_t *os;
533 	int error;
534 
535 	/*
536 	 * If it looks like we'll be able to destroy it, and there's
537 	 * an unplayed replay log sitting around, destroy the log.
538 	 * It would be nicer to do this in dsl_dataset_destroy_sync(),
539 	 * but the replay log objset is modified in open context.
540 	 */
541 	error = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
542 	if (error == 0) {
543 		zil_destroy(dmu_objset_zil(os), B_FALSE);
544 		dmu_objset_close(os);
545 	}
546 
547 	return (dsl_dataset_destroy(name));
548 }
549 
550 int
551 dmu_objset_rollback(const char *name)
552 {
553 	int err;
554 	objset_t *os;
555 
556 	err = dmu_objset_open(name, DMU_OST_ANY,
557 	    DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, &os);
558 	if (err == 0) {
559 		err = zil_suspend(dmu_objset_zil(os));
560 		if (err == 0)
561 			zil_resume(dmu_objset_zil(os));
562 		if (err == 0) {
563 			/* XXX uncache everything? */
564 			err = dsl_dataset_rollback(os->os->os_dsl_dataset);
565 		}
566 		dmu_objset_close(os);
567 	}
568 	return (err);
569 }
570 
571 struct snaparg {
572 	dsl_sync_task_group_t *dstg;
573 	char *snapname;
574 	char failed[MAXPATHLEN];
575 };
576 
577 static int
578 dmu_objset_snapshot_one(char *name, void *arg)
579 {
580 	struct snaparg *sn = arg;
581 	objset_t *os;
582 	int err;
583 
584 	(void) strcpy(sn->failed, name);
585 
586 	err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_STANDARD, &os);
587 	if (err != 0)
588 		return (err);
589 
590 	/*
591 	 * NB: we need to wait for all in-flight changes to get to disk,
592 	 * so that we snapshot those changes.  zil_suspend does this as
593 	 * a side effect.
594 	 */
595 	err = zil_suspend(dmu_objset_zil(os));
596 	if (err == 0) {
597 		dsl_sync_task_create(sn->dstg, dsl_dataset_snapshot_check,
598 		    dsl_dataset_snapshot_sync, os, sn->snapname, 3);
599 	}
600 	return (err);
601 }
602 
603 int
604 dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive)
605 {
606 	dsl_sync_task_t *dst;
607 	struct snaparg sn = { 0 };
608 	char *cp;
609 	spa_t *spa;
610 	int err;
611 
612 	(void) strcpy(sn.failed, fsname);
613 
614 	cp = strchr(fsname, '/');
615 	if (cp) {
616 		*cp = '\0';
617 		err = spa_open(fsname, &spa, FTAG);
618 		*cp = '/';
619 	} else {
620 		err = spa_open(fsname, &spa, FTAG);
621 	}
622 	if (err)
623 		return (err);
624 
625 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
626 	sn.snapname = snapname;
627 
628 	if (recursive) {
629 		err = dmu_objset_find(fsname,
630 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
631 	} else {
632 		err = dmu_objset_snapshot_one(fsname, &sn);
633 	}
634 
635 	if (err)
636 		goto out;
637 
638 	err = dsl_sync_task_group_wait(sn.dstg);
639 
640 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
641 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
642 		objset_t *os = dst->dst_arg1;
643 		if (dst->dst_err)
644 			dmu_objset_name(os, sn.failed);
645 		zil_resume(dmu_objset_zil(os));
646 		dmu_objset_close(os);
647 	}
648 out:
649 	if (err)
650 		(void) strcpy(fsname, sn.failed);
651 	dsl_sync_task_group_destroy(sn.dstg);
652 	spa_close(spa, FTAG);
653 	return (err);
654 }
655 
656 static void
657 dmu_objset_sync_dnodes(objset_impl_t *os, list_t *list, dmu_tx_t *tx)
658 {
659 	dnode_t *dn = list_head(list);
660 	int level, err;
661 
662 	for (level = 0; dn = list_head(list); level++) {
663 		zio_t *zio;
664 		zio = zio_root(os->os_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
665 
666 		ASSERT3U(level, <=, DN_MAX_LEVELS);
667 
668 		while (dn) {
669 			dnode_t *next = list_next(list, dn);
670 
671 			list_remove(list, dn);
672 			if (dnode_sync(dn, level, zio, tx) == 0) {
673 				/*
674 				 * This dnode requires syncing at higher
675 				 * levels; put it back onto the list.
676 				 */
677 				if (next)
678 					list_insert_before(list, next, dn);
679 				else
680 					list_insert_tail(list, dn);
681 			}
682 			dn = next;
683 		}
684 		err = zio_wait(zio);
685 		ASSERT(err == 0);
686 	}
687 }
688 
689 /* ARGSUSED */
690 static void
691 killer(zio_t *zio, arc_buf_t *abuf, void *arg)
692 {
693 	objset_impl_t *os = arg;
694 	objset_phys_t *osphys = zio->io_data;
695 	dnode_phys_t *dnp = &osphys->os_meta_dnode;
696 	int i;
697 
698 	ASSERT3U(zio->io_error, ==, 0);
699 
700 	/*
701 	 * Update rootbp fill count.
702 	 */
703 	os->os_rootbp.blk_fill = 1;	/* count the meta-dnode */
704 	for (i = 0; i < dnp->dn_nblkptr; i++)
705 		os->os_rootbp.blk_fill += dnp->dn_blkptr[i].blk_fill;
706 
707 	BP_SET_TYPE(zio->io_bp, DMU_OT_OBJSET);
708 	BP_SET_LEVEL(zio->io_bp, 0);
709 
710 	if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp),
711 	    BP_IDENTITY(&zio->io_bp_orig))) {
712 		dsl_dataset_block_kill(os->os_dsl_dataset, &zio->io_bp_orig,
713 		    os->os_synctx);
714 		dsl_dataset_block_born(os->os_dsl_dataset, zio->io_bp,
715 		    os->os_synctx);
716 	}
717 }
718 
719 
720 /* called from dsl */
721 void
722 dmu_objset_sync(objset_impl_t *os, dmu_tx_t *tx)
723 {
724 	extern taskq_t *dbuf_tq;
725 	int txgoff;
726 	list_t *dirty_list;
727 	int err;
728 	zbookmark_t zb;
729 	arc_buf_t *abuf =
730 	    arc_buf_alloc(os->os_spa, sizeof (objset_phys_t), FTAG);
731 
732 	ASSERT(dmu_tx_is_syncing(tx));
733 	ASSERT(os->os_synctx == NULL);
734 	/* XXX the write_done callback should really give us the tx... */
735 	os->os_synctx = tx;
736 
737 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
738 
739 	txgoff = tx->tx_txg & TXG_MASK;
740 
741 	dmu_objset_sync_dnodes(os, &os->os_free_dnodes[txgoff], tx);
742 	dmu_objset_sync_dnodes(os, &os->os_dirty_dnodes[txgoff], tx);
743 
744 	/*
745 	 * Free intent log blocks up to this tx.
746 	 */
747 	zil_sync(os->os_zil, tx);
748 
749 	/*
750 	 * Sync meta-dnode
751 	 */
752 	dirty_list = &os->os_dirty_dnodes[txgoff];
753 	ASSERT(list_head(dirty_list) == NULL);
754 	list_insert_tail(dirty_list, os->os_meta_dnode);
755 	dmu_objset_sync_dnodes(os, dirty_list, tx);
756 
757 	/*
758 	 * Sync the root block.
759 	 */
760 	bcopy(os->os_phys, abuf->b_data, sizeof (objset_phys_t));
761 	zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0;
762 	zb.zb_object = 0;
763 	zb.zb_level = -1;
764 	zb.zb_blkid = 0;
765 	err = arc_write(NULL, os->os_spa, os->os_md_checksum,
766 	    os->os_md_compress,
767 	    dmu_get_replication_level(os->os_spa, &zb, DMU_OT_OBJSET),
768 	    tx->tx_txg, &os->os_rootbp, abuf, killer, os,
769 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, ARC_WAIT, &zb);
770 	ASSERT(err == 0);
771 	VERIFY(arc_buf_remove_ref(abuf, FTAG) == 1);
772 
773 	dsl_dataset_set_blkptr(os->os_dsl_dataset, &os->os_rootbp, tx);
774 
775 	ASSERT3P(os->os_synctx, ==, tx);
776 	taskq_wait(dbuf_tq);
777 	os->os_synctx = NULL;
778 }
779 
780 void
781 dmu_objset_stats(objset_t *os, dmu_objset_stats_t *dds)
782 {
783 	if (os->os->os_dsl_dataset != NULL) {
784 		dsl_dataset_stats(os->os->os_dsl_dataset, dds);
785 	} else {
786 		ASSERT(os->os->os_phys->os_type == DMU_OST_META);
787 		bzero(dds, sizeof (*dds));
788 	}
789 	dds->dds_type = os->os->os_phys->os_type;
790 }
791 
792 int
793 dmu_objset_is_snapshot(objset_t *os)
794 {
795 	if (os->os->os_dsl_dataset != NULL)
796 		return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset));
797 	else
798 		return (B_FALSE);
799 }
800 
801 int
802 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
803     uint64_t *idp, uint64_t *offp)
804 {
805 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
806 	zap_cursor_t cursor;
807 	zap_attribute_t attr;
808 
809 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
810 		return (ENOENT);
811 
812 	zap_cursor_init_serialized(&cursor,
813 	    ds->ds_dir->dd_pool->dp_meta_objset,
814 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
815 
816 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
817 		zap_cursor_fini(&cursor);
818 		return (ENOENT);
819 	}
820 
821 	if (strlen(attr.za_name) + 1 > namelen) {
822 		zap_cursor_fini(&cursor);
823 		return (ENAMETOOLONG);
824 	}
825 
826 	(void) strcpy(name, attr.za_name);
827 	if (idp)
828 		*idp = attr.za_first_integer;
829 	zap_cursor_advance(&cursor);
830 	*offp = zap_cursor_serialize(&cursor);
831 	zap_cursor_fini(&cursor);
832 
833 	return (0);
834 }
835 
836 int
837 dmu_dir_list_next(objset_t *os, int namelen, char *name,
838     uint64_t *idp, uint64_t *offp)
839 {
840 	dsl_dir_t *dd = os->os->os_dsl_dataset->ds_dir;
841 	zap_cursor_t cursor;
842 	zap_attribute_t attr;
843 
844 	/* there is no next dir on a snapshot! */
845 	if (os->os->os_dsl_dataset->ds_object !=
846 	    dd->dd_phys->dd_head_dataset_obj)
847 		return (ENOENT);
848 
849 	zap_cursor_init_serialized(&cursor,
850 	    dd->dd_pool->dp_meta_objset,
851 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
852 
853 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
854 		zap_cursor_fini(&cursor);
855 		return (ENOENT);
856 	}
857 
858 	if (strlen(attr.za_name) + 1 > namelen) {
859 		zap_cursor_fini(&cursor);
860 		return (ENAMETOOLONG);
861 	}
862 
863 	(void) strcpy(name, attr.za_name);
864 	if (idp)
865 		*idp = attr.za_first_integer;
866 	zap_cursor_advance(&cursor);
867 	*offp = zap_cursor_serialize(&cursor);
868 	zap_cursor_fini(&cursor);
869 
870 	return (0);
871 }
872 
873 /*
874  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
875  */
876 int
877 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags)
878 {
879 	dsl_dir_t *dd;
880 	objset_t *os;
881 	uint64_t snapobj;
882 	zap_cursor_t zc;
883 	zap_attribute_t attr;
884 	char *child;
885 	int do_self, err;
886 
887 	err = dsl_dir_open(name, FTAG, &dd, NULL);
888 	if (err)
889 		return (err);
890 
891 	/* NB: the $MOS dir doesn't have a head dataset */
892 	do_self = (dd->dd_phys->dd_head_dataset_obj != 0);
893 
894 	/*
895 	 * Iterate over all children.
896 	 */
897 	if (flags & DS_FIND_CHILDREN) {
898 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset,
899 		    dd->dd_phys->dd_child_dir_zapobj);
900 		    zap_cursor_retrieve(&zc, &attr) == 0;
901 		    (void) zap_cursor_advance(&zc)) {
902 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
903 			ASSERT(attr.za_num_integers == 1);
904 
905 			/*
906 			 * No separating '/' because parent's name ends in /.
907 			 */
908 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
909 			/* XXX could probably just use name here */
910 			dsl_dir_name(dd, child);
911 			(void) strcat(child, "/");
912 			(void) strcat(child, attr.za_name);
913 			err = dmu_objset_find(child, func, arg, flags);
914 			kmem_free(child, MAXPATHLEN);
915 			if (err)
916 				break;
917 		}
918 		zap_cursor_fini(&zc);
919 
920 		if (err) {
921 			dsl_dir_close(dd, FTAG);
922 			return (err);
923 		}
924 	}
925 
926 	/*
927 	 * Iterate over all snapshots.
928 	 */
929 	if ((flags & DS_FIND_SNAPSHOTS) &&
930 	    dmu_objset_open(name, DMU_OST_ANY,
931 	    DS_MODE_STANDARD | DS_MODE_READONLY, &os) == 0) {
932 
933 		snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj;
934 		dmu_objset_close(os);
935 
936 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj);
937 		    zap_cursor_retrieve(&zc, &attr) == 0;
938 		    (void) zap_cursor_advance(&zc)) {
939 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
940 			ASSERT(attr.za_num_integers == 1);
941 
942 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
943 			/* XXX could probably just use name here */
944 			dsl_dir_name(dd, child);
945 			(void) strcat(child, "@");
946 			(void) strcat(child, attr.za_name);
947 			err = func(child, arg);
948 			kmem_free(child, MAXPATHLEN);
949 			if (err)
950 				break;
951 		}
952 		zap_cursor_fini(&zc);
953 	}
954 
955 	dsl_dir_close(dd, FTAG);
956 
957 	if (err)
958 		return (err);
959 
960 	/*
961 	 * Apply to self if appropriate.
962 	 */
963 	if (do_self)
964 		err = func(name, arg);
965 	return (err);
966 }
967