xref: /titanic_51/usr/src/uts/common/fs/zfs/dmu_objset.c (revision 5c88ba20fc79ecf19255b4a04f03d77630b6d0e7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/zfs_context.h>
30 #include <sys/dmu_objset.h>
31 #include <sys/dsl_dir.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_prop.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dnode.h>
36 #include <sys/dbuf.h>
37 #include <sys/dmu_tx.h>
38 #include <sys/zio_checksum.h>
39 #include <sys/zap.h>
40 #include <sys/zil.h>
41 #include <sys/dmu_impl.h>
42 
43 
44 spa_t *
45 dmu_objset_spa(objset_t *os)
46 {
47 	return (os->os->os_spa);
48 }
49 
50 zilog_t *
51 dmu_objset_zil(objset_t *os)
52 {
53 	return (os->os->os_zil);
54 }
55 
56 dsl_pool_t *
57 dmu_objset_pool(objset_t *os)
58 {
59 	dsl_dataset_t *ds;
60 
61 	if ((ds = os->os->os_dsl_dataset) != NULL && ds->ds_dir)
62 		return (ds->ds_dir->dd_pool);
63 	else
64 		return (spa_get_dsl(os->os->os_spa));
65 }
66 
67 dsl_dataset_t *
68 dmu_objset_ds(objset_t *os)
69 {
70 	return (os->os->os_dsl_dataset);
71 }
72 
73 dmu_objset_type_t
74 dmu_objset_type(objset_t *os)
75 {
76 	return (os->os->os_phys->os_type);
77 }
78 
79 void
80 dmu_objset_name(objset_t *os, char *buf)
81 {
82 	dsl_dataset_name(os->os->os_dsl_dataset, buf);
83 }
84 
85 uint64_t
86 dmu_objset_id(objset_t *os)
87 {
88 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
89 
90 	return (ds ? ds->ds_object : 0);
91 }
92 
93 static void
94 checksum_changed_cb(void *arg, uint64_t newval)
95 {
96 	objset_impl_t *osi = arg;
97 
98 	/*
99 	 * Inheritance should have been done by now.
100 	 */
101 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
102 
103 	osi->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
104 }
105 
106 static void
107 compression_changed_cb(void *arg, uint64_t newval)
108 {
109 	objset_impl_t *osi = arg;
110 
111 	/*
112 	 * Inheritance and range checking should have been done by now.
113 	 */
114 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
115 
116 	osi->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
117 }
118 
119 void
120 dmu_objset_byteswap(void *buf, size_t size)
121 {
122 	objset_phys_t *osp = buf;
123 
124 	ASSERT(size == sizeof (objset_phys_t));
125 	dnode_byteswap(&osp->os_meta_dnode);
126 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
127 	osp->os_type = BSWAP_64(osp->os_type);
128 }
129 
130 objset_impl_t *
131 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp)
132 {
133 	objset_impl_t *winner, *osi;
134 	int i, err, checksum;
135 
136 	osi = kmem_zalloc(sizeof (objset_impl_t), KM_SLEEP);
137 	osi->os.os = osi;
138 	osi->os_dsl_dataset = ds;
139 	osi->os_spa = spa;
140 	if (bp)
141 		osi->os_rootbp = *bp;
142 	osi->os_phys = zio_buf_alloc(sizeof (objset_phys_t));
143 	if (!BP_IS_HOLE(&osi->os_rootbp)) {
144 		dprintf_bp(&osi->os_rootbp, "reading %s", "");
145 		(void) arc_read(NULL, spa, &osi->os_rootbp,
146 		    dmu_ot[DMU_OT_OBJSET].ot_byteswap,
147 		    arc_bcopy_func, osi->os_phys,
148 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_MUSTSUCCEED, ARC_WAIT);
149 	} else {
150 		bzero(osi->os_phys, sizeof (objset_phys_t));
151 	}
152 	osi->os_zil = zil_alloc(&osi->os, &osi->os_phys->os_zil_header);
153 
154 	/*
155 	 * Note: the changed_cb will be called once before the register
156 	 * func returns, thus changing the checksum/compression from the
157 	 * default (fletcher2/off).
158 	 */
159 	if (ds) {
160 		err = dsl_prop_register(ds, "checksum",
161 		    checksum_changed_cb, osi);
162 		ASSERT(err == 0);
163 
164 		err = dsl_prop_register(ds, "compression",
165 		    compression_changed_cb, osi);
166 		ASSERT(err == 0);
167 	} else {
168 		/* It's the meta-objset. */
169 		osi->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
170 		osi->os_compress = ZIO_COMPRESS_LZJB;
171 	}
172 
173 	/*
174 	 * Metadata always gets compressed and checksummed.
175 	 * If the data checksum is multi-bit correctable, and it's not
176 	 * a ZBT-style checksum, then it's suitable for metadata as well.
177 	 * Otherwise, the metadata checksum defaults to fletcher4.
178 	 */
179 	checksum = osi->os_checksum;
180 
181 	if (zio_checksum_table[checksum].ci_correctable &&
182 	    !zio_checksum_table[checksum].ci_zbt)
183 		osi->os_md_checksum = checksum;
184 	else
185 		osi->os_md_checksum = ZIO_CHECKSUM_FLETCHER_4;
186 
187 	osi->os_md_compress = ZIO_COMPRESS_LZJB;
188 
189 	for (i = 0; i < TXG_SIZE; i++) {
190 		list_create(&osi->os_dirty_dnodes[i], sizeof (dnode_t),
191 		    offsetof(dnode_t, dn_dirty_link[i]));
192 		list_create(&osi->os_free_dnodes[i], sizeof (dnode_t),
193 		    offsetof(dnode_t, dn_dirty_link[i]));
194 	}
195 	list_create(&osi->os_dnodes, sizeof (dnode_t),
196 	    offsetof(dnode_t, dn_link));
197 	list_create(&osi->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
198 	    offsetof(dmu_buf_impl_t, db_link));
199 
200 	osi->os_meta_dnode = dnode_special_open(osi,
201 	    &osi->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
202 
203 	if (ds != NULL) {
204 		winner = dsl_dataset_set_user_ptr(ds, osi, dmu_objset_evict);
205 		if (winner) {
206 			dmu_objset_evict(ds, osi);
207 			osi = winner;
208 		}
209 	}
210 
211 	return (osi);
212 }
213 
214 /* called from zpl */
215 int
216 dmu_objset_open(const char *name, dmu_objset_type_t type, int mode,
217     objset_t **osp)
218 {
219 	dsl_dataset_t *ds;
220 	int err;
221 	objset_t *os;
222 	objset_impl_t *osi;
223 
224 	os = kmem_alloc(sizeof (objset_t), KM_SLEEP);
225 	err = dsl_dataset_open(name, mode, os, &ds);
226 	if (err) {
227 		kmem_free(os, sizeof (objset_t));
228 		return (err);
229 	}
230 
231 	osi = dsl_dataset_get_user_ptr(ds);
232 	if (osi == NULL) {
233 		blkptr_t bp;
234 
235 		dsl_dataset_get_blkptr(ds, &bp);
236 		osi = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, &bp);
237 	}
238 
239 	os->os = osi;
240 	os->os_mode = mode;
241 
242 	if (type != DMU_OST_ANY && type != os->os->os_phys->os_type) {
243 		dmu_objset_close(os);
244 		return (EINVAL);
245 	}
246 	*osp = os;
247 	return (0);
248 }
249 
250 void
251 dmu_objset_close(objset_t *os)
252 {
253 	dsl_dataset_close(os->os->os_dsl_dataset, os->os_mode, os);
254 	kmem_free(os, sizeof (objset_t));
255 }
256 
257 void
258 dmu_objset_evict(dsl_dataset_t *ds, void *arg)
259 {
260 	objset_impl_t *osi = arg;
261 	int err, i;
262 
263 	for (i = 0; i < TXG_SIZE; i++) {
264 		ASSERT(list_head(&osi->os_dirty_dnodes[i]) == NULL);
265 		ASSERT(list_head(&osi->os_free_dnodes[i]) == NULL);
266 	}
267 
268 	if (ds) {
269 		err = dsl_prop_unregister(ds, "checksum",
270 		    checksum_changed_cb, osi);
271 		ASSERT(err == 0);
272 
273 		err = dsl_prop_unregister(ds, "compression",
274 		    compression_changed_cb, osi);
275 		ASSERT(err == 0);
276 	}
277 
278 	ASSERT3P(list_head(&osi->os_dnodes), ==, osi->os_meta_dnode);
279 	ASSERT3P(list_tail(&osi->os_dnodes), ==, osi->os_meta_dnode);
280 	ASSERT3P(list_head(&osi->os_meta_dnode->dn_dbufs), ==, NULL);
281 
282 	dnode_special_close(osi->os_meta_dnode);
283 	zil_free(osi->os_zil);
284 
285 	zio_buf_free(osi->os_phys, sizeof (objset_phys_t));
286 	kmem_free(osi, sizeof (objset_impl_t));
287 }
288 
289 /* called from dsl for meta-objset */
290 objset_impl_t *
291 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, dmu_objset_type_t type,
292     dmu_tx_t *tx)
293 {
294 	objset_impl_t *osi;
295 	dnode_t *mdn;
296 
297 	ASSERT(dmu_tx_is_syncing(tx));
298 	osi = dmu_objset_open_impl(spa, ds, NULL);
299 	mdn = osi->os_meta_dnode;
300 
301 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
302 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
303 
304 	/*
305 	 * We don't want to have to increase the meta-dnode's nlevels
306 	 * later, because then we could do it in quescing context while
307 	 * we are also accessing it in open context.
308 	 *
309 	 * This precaution is not necessary for the MOS (ds == NULL),
310 	 * because the MOS is only updated in syncing context.
311 	 * This is most fortunate: the MOS is the only objset that
312 	 * needs to be synced multiple times as spa_sync() iterates
313 	 * to convergence, so minimizing its dn_nlevels matters.
314 	 */
315 	if (ds != NULL)
316 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
317 		    mdn->dn_nlevels = DN_META_DNODE_LEVELS;
318 
319 	ASSERT(type != DMU_OST_NONE);
320 	ASSERT(type != DMU_OST_ANY);
321 	ASSERT(type < DMU_OST_NUMTYPES);
322 	osi->os_phys->os_type = type;
323 
324 	dsl_dataset_dirty(ds, tx);
325 
326 	return (osi);
327 }
328 
329 struct oscarg {
330 	void (*userfunc)(objset_t *os, void *arg, dmu_tx_t *tx);
331 	void *userarg;
332 	dsl_dataset_t *clone_parent;
333 	const char *fullname;
334 	const char *lastname;
335 	dmu_objset_type_t type;
336 };
337 
338 static int
339 dmu_objset_create_sync(dsl_dir_t *dd, void *arg, dmu_tx_t *tx)
340 {
341 	struct oscarg *oa = arg;
342 	dsl_dataset_t *ds;
343 	int err;
344 	blkptr_t bp;
345 
346 	ASSERT(dmu_tx_is_syncing(tx));
347 
348 	err = dsl_dataset_create_sync(dd, oa->fullname, oa->lastname,
349 	    oa->clone_parent, tx);
350 	dprintf_dd(dd, "fn=%s ln=%s err=%d\n",
351 	    oa->fullname, oa->lastname, err);
352 	if (err)
353 		return (err);
354 
355 	err = dsl_dataset_open_spa(dd->dd_pool->dp_spa, oa->fullname,
356 	    DS_MODE_STANDARD | DS_MODE_READONLY, FTAG, &ds);
357 	ASSERT3U(err, ==, 0);
358 	dsl_dataset_get_blkptr(ds, &bp);
359 	if (BP_IS_HOLE(&bp)) {
360 		objset_impl_t *osi;
361 
362 		/* This is an empty dmu_objset; not a clone. */
363 		osi = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
364 		    ds, oa->type, tx);
365 
366 		if (oa->userfunc)
367 			oa->userfunc(&osi->os, oa->userarg, tx);
368 	}
369 	dsl_dataset_close(ds, DS_MODE_STANDARD | DS_MODE_READONLY, FTAG);
370 
371 	return (0);
372 }
373 
374 int
375 dmu_objset_create(const char *name, dmu_objset_type_t type,
376     objset_t *clone_parent,
377     void (*func)(objset_t *os, void *arg, dmu_tx_t *tx), void *arg)
378 {
379 	dsl_dir_t *pds;
380 	const char *tail;
381 	int err = 0;
382 
383 	pds = dsl_dir_open(name, FTAG, &tail);
384 	if (pds == NULL)
385 		return (ENOENT);
386 	if (tail == NULL) {
387 		dsl_dir_close(pds, FTAG);
388 		return (EEXIST);
389 	}
390 
391 	dprintf("name=%s\n", name);
392 
393 	if (tail[0] == '@') {
394 		/*
395 		 * If we're creating a snapshot, make sure everything
396 		 * they might want is on disk.  XXX Sketchy to know
397 		 * about snapshots here, better to put in DSL.
398 		 */
399 		objset_t *os;
400 		size_t plen = strchr(name, '@') - name + 1;
401 		char *pbuf = kmem_alloc(plen, KM_SLEEP);
402 		bcopy(name, pbuf, plen - 1);
403 		pbuf[plen - 1] = '\0';
404 
405 		err = dmu_objset_open(pbuf, DMU_OST_ANY, DS_MODE_STANDARD, &os);
406 		if (err == 0) {
407 			err = zil_suspend(dmu_objset_zil(os));
408 			if (err == 0) {
409 				err = dsl_dir_sync_task(pds,
410 				    dsl_dataset_snapshot_sync,
411 				    (void*)(tail+1), 16*1024);
412 				zil_resume(dmu_objset_zil(os));
413 			}
414 			dmu_objset_close(os);
415 		}
416 		kmem_free(pbuf, plen);
417 	} else {
418 		struct oscarg oa = { 0 };
419 		oa.userfunc = func;
420 		oa.userarg = arg;
421 		oa.fullname = name;
422 		oa.lastname = tail;
423 		oa.type = type;
424 		if (clone_parent != NULL) {
425 			/*
426 			 * You can't clone to a different type.
427 			 */
428 			if (clone_parent->os->os_phys->os_type != type) {
429 				dsl_dir_close(pds, FTAG);
430 				return (EINVAL);
431 			}
432 			oa.clone_parent = clone_parent->os->os_dsl_dataset;
433 		}
434 		err = dsl_dir_sync_task(pds, dmu_objset_create_sync, &oa,
435 		    256*1024);
436 	}
437 	dsl_dir_close(pds, FTAG);
438 	return (err);
439 }
440 
441 int
442 dmu_objset_destroy(const char *name)
443 {
444 	objset_t *os;
445 	int error;
446 
447 	/*
448 	 * If it looks like we'll be able to destroy it, and there's
449 	 * an unplayed replay log sitting around, destroy the log.
450 	 * It would be nicer to do this in dsl_dataset_destroy_sync(),
451 	 * but the replay log objset is modified in open context.
452 	 */
453 	error = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
454 	if (error == 0) {
455 		zil_destroy(dmu_objset_zil(os));
456 		dmu_objset_close(os);
457 	}
458 
459 	/* XXX uncache everything? */
460 	return (dsl_dataset_destroy(name));
461 }
462 
463 int
464 dmu_objset_rollback(const char *name)
465 {
466 	int err;
467 	objset_t *os;
468 
469 	err = dmu_objset_open(name, DMU_OST_ANY, DS_MODE_EXCLUSIVE, &os);
470 	if (err == 0) {
471 		err = zil_suspend(dmu_objset_zil(os));
472 		if (err == 0)
473 			zil_resume(dmu_objset_zil(os));
474 		dmu_objset_close(os);
475 		if (err == 0) {
476 			/* XXX uncache everything? */
477 			err = dsl_dataset_rollback(name);
478 		}
479 	}
480 	return (err);
481 }
482 
483 static void
484 dmu_objset_sync_dnodes(objset_impl_t *os, list_t *list, dmu_tx_t *tx)
485 {
486 	dnode_t *dn = list_head(list);
487 	int level, err;
488 
489 	for (level = 0; dn = list_head(list); level++) {
490 		zio_t *zio;
491 		zio = zio_root(os->os_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
492 
493 		ASSERT3U(level, <=, DN_MAX_LEVELS);
494 
495 		while (dn) {
496 			dnode_t *next = list_next(list, dn);
497 
498 			list_remove(list, dn);
499 			if (dnode_sync(dn, level, zio, tx) == 0) {
500 				/*
501 				 * This dnode requires syncing at higher
502 				 * levels; put it back onto the list.
503 				 */
504 				if (next)
505 					list_insert_before(list, next, dn);
506 				else
507 					list_insert_tail(list, dn);
508 			}
509 			dn = next;
510 		}
511 		err = zio_wait(zio);
512 		ASSERT(err == 0);
513 	}
514 }
515 
516 /* ARGSUSED */
517 static void
518 killer(zio_t *zio, arc_buf_t *abuf, void *arg)
519 {
520 	objset_impl_t *os = arg;
521 	objset_phys_t *osphys = zio->io_data;
522 	dnode_phys_t *dnp = &osphys->os_meta_dnode;
523 	int i;
524 
525 	ASSERT3U(zio->io_error, ==, 0);
526 
527 	/*
528 	 * Update rootbp fill count.
529 	 */
530 	os->os_rootbp.blk_fill = 1;	/* count the meta-dnode */
531 	for (i = 0; i < dnp->dn_nblkptr; i++)
532 		os->os_rootbp.blk_fill += dnp->dn_blkptr[i].blk_fill;
533 
534 	BP_SET_TYPE(zio->io_bp, DMU_OT_OBJSET);
535 	BP_SET_LEVEL(zio->io_bp, 0);
536 
537 	if (!DVA_EQUAL(BP_IDENTITY(zio->io_bp),
538 	    BP_IDENTITY(&zio->io_bp_orig))) {
539 		dsl_dataset_block_kill(os->os_dsl_dataset, &zio->io_bp_orig,
540 		    os->os_synctx);
541 		dsl_dataset_block_born(os->os_dsl_dataset, zio->io_bp,
542 		    os->os_synctx);
543 	}
544 }
545 
546 
547 /* called from dsl */
548 void
549 dmu_objset_sync(objset_impl_t *os, dmu_tx_t *tx)
550 {
551 	extern taskq_t *dbuf_tq;
552 	int txgoff;
553 	list_t *dirty_list;
554 	int err;
555 	arc_buf_t *abuf =
556 	    arc_buf_alloc(os->os_spa, sizeof (objset_phys_t), FTAG);
557 
558 	ASSERT(dmu_tx_is_syncing(tx));
559 	ASSERT(os->os_synctx == NULL);
560 	/* XXX the write_done callback should really give us the tx... */
561 	os->os_synctx = tx;
562 
563 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
564 
565 	txgoff = tx->tx_txg & TXG_MASK;
566 
567 	dmu_objset_sync_dnodes(os, &os->os_free_dnodes[txgoff], tx);
568 	dmu_objset_sync_dnodes(os, &os->os_dirty_dnodes[txgoff], tx);
569 
570 	/*
571 	 * Free intent log blocks up to this tx.
572 	 */
573 	zil_sync(os->os_zil, tx);
574 
575 	/*
576 	 * Sync meta-dnode
577 	 */
578 	dirty_list = &os->os_dirty_dnodes[txgoff];
579 	ASSERT(list_head(dirty_list) == NULL);
580 	list_insert_tail(dirty_list, os->os_meta_dnode);
581 	dmu_objset_sync_dnodes(os, dirty_list, tx);
582 
583 	/*
584 	 * Sync the root block.
585 	 */
586 	bcopy(os->os_phys, abuf->b_data, sizeof (objset_phys_t));
587 	err = arc_write(NULL, os->os_spa, os->os_md_checksum,
588 	    os->os_md_compress, tx->tx_txg, &os->os_rootbp, abuf, killer, os,
589 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, ARC_WAIT);
590 	ASSERT(err == 0);
591 	arc_buf_free(abuf, FTAG);
592 
593 	dsl_dataset_set_blkptr(os->os_dsl_dataset, &os->os_rootbp, tx);
594 
595 	ASSERT3P(os->os_synctx, ==, tx);
596 	taskq_wait(dbuf_tq);
597 	os->os_synctx = NULL;
598 }
599 
600 void
601 dmu_objset_stats(objset_t *os, dmu_objset_stats_t *dds)
602 {
603 	if (os->os->os_dsl_dataset != NULL) {
604 		dsl_dataset_stats(os->os->os_dsl_dataset, dds);
605 	} else {
606 		ASSERT(os->os->os_phys->os_type == DMU_OST_META);
607 		bzero(dds, sizeof (*dds));
608 	}
609 	dds->dds_type = os->os->os_phys->os_type;
610 }
611 
612 int
613 dmu_objset_is_snapshot(objset_t *os)
614 {
615 	if (os->os->os_dsl_dataset != NULL)
616 		return (dsl_dataset_is_snapshot(os->os->os_dsl_dataset));
617 	else
618 		return (B_FALSE);
619 }
620 
621 int
622 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
623     uint64_t *id, uint64_t *offp)
624 {
625 	dsl_dataset_t *ds = os->os->os_dsl_dataset;
626 	zap_cursor_t cursor;
627 	zap_attribute_t attr;
628 
629 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
630 		return (ENOENT);
631 
632 	zap_cursor_init_serialized(&cursor,
633 	    ds->ds_dir->dd_pool->dp_meta_objset,
634 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
635 
636 	if (zap_cursor_retrieve(&cursor, &attr) != 0)
637 		return (ENOENT);
638 
639 	if (strlen(attr.za_name) + 1 > namelen)
640 		return (ENAMETOOLONG);
641 
642 	(void) strcpy(name, attr.za_name);
643 	*id = attr.za_first_integer;
644 	zap_cursor_advance(&cursor);
645 	*offp = zap_cursor_serialize(&cursor);
646 
647 	return (0);
648 }
649 
650 /*
651  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
652  */
653 void
654 dmu_objset_find(char *name, void func(char *, void *), void *arg, int flags)
655 {
656 	dsl_dir_t *dd;
657 	objset_t *os;
658 	uint64_t snapobj;
659 	zap_cursor_t zc;
660 	zap_attribute_t attr;
661 	char *child;
662 	int do_self;
663 
664 	dd = dsl_dir_open(name, FTAG, NULL);
665 	if (dd == NULL)
666 		return;
667 
668 	do_self = (dd->dd_phys->dd_head_dataset_obj != 0);
669 
670 	/*
671 	 * Iterate over all children.
672 	 */
673 	if (dd->dd_phys->dd_child_dir_zapobj != 0) {
674 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset,
675 		    dd->dd_phys->dd_child_dir_zapobj);
676 		    zap_cursor_retrieve(&zc, &attr) == 0;
677 		    (void) zap_cursor_advance(&zc)) {
678 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
679 			ASSERT(attr.za_num_integers == 1);
680 
681 			/*
682 			 * No separating '/' because parent's name ends in /.
683 			 */
684 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
685 			/* XXX could probably just use name here */
686 			dsl_dir_name(dd, child);
687 			(void) strcat(child, "/");
688 			(void) strcat(child, attr.za_name);
689 			dmu_objset_find(child, func, arg, flags);
690 			kmem_free(child, MAXPATHLEN);
691 		}
692 	}
693 
694 	/*
695 	 * Iterate over all snapshots.
696 	 */
697 	if ((flags & DS_FIND_SNAPSHOTS) &&
698 	    dmu_objset_open(name, DMU_OST_ANY,
699 	    DS_MODE_STANDARD | DS_MODE_READONLY, &os) == 0) {
700 
701 		snapobj = os->os->os_dsl_dataset->ds_phys->ds_snapnames_zapobj;
702 		dmu_objset_close(os);
703 
704 		for (zap_cursor_init(&zc, dd->dd_pool->dp_meta_objset, snapobj);
705 		    zap_cursor_retrieve(&zc, &attr) == 0;
706 		    (void) zap_cursor_advance(&zc)) {
707 			ASSERT(attr.za_integer_length == sizeof (uint64_t));
708 			ASSERT(attr.za_num_integers == 1);
709 
710 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
711 			/* XXX could probably just use name here */
712 			dsl_dir_name(dd, child);
713 			(void) strcat(child, "@");
714 			(void) strcat(child, attr.za_name);
715 			func(child, arg);
716 			kmem_free(child, MAXPATHLEN);
717 		}
718 	}
719 
720 	dsl_dir_close(dd, FTAG);
721 
722 	/*
723 	 * Apply to self if appropriate.
724 	 */
725 	if (do_self)
726 		func(name, arg);
727 }
728