xref: /titanic_52/usr/src/uts/common/fs/zfs/dmu_objset.c (revision 0a0e9771ca0211c15f3ac4466b661c145feeb9e4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/cred.h>
27 #include <sys/zfs_context.h>
28 #include <sys/dmu_objset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_prop.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dsl_deleg.h>
35 #include <sys/dnode.h>
36 #include <sys/dbuf.h>
37 #include <sys/zvol.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/zio_checksum.h>
40 #include <sys/zap.h>
41 #include <sys/zil.h>
42 #include <sys/dmu_impl.h>
43 #include <sys/zfs_ioctl.h>
44 
45 spa_t *
46 dmu_objset_spa(objset_t *os)
47 {
48 	return (os->os_spa);
49 }
50 
51 zilog_t *
52 dmu_objset_zil(objset_t *os)
53 {
54 	return (os->os_zil);
55 }
56 
57 dsl_pool_t *
58 dmu_objset_pool(objset_t *os)
59 {
60 	dsl_dataset_t *ds;
61 
62 	if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
63 		return (ds->ds_dir->dd_pool);
64 	else
65 		return (spa_get_dsl(os->os_spa));
66 }
67 
68 dsl_dataset_t *
69 dmu_objset_ds(objset_t *os)
70 {
71 	return (os->os_dsl_dataset);
72 }
73 
74 dmu_objset_type_t
75 dmu_objset_type(objset_t *os)
76 {
77 	return (os->os_phys->os_type);
78 }
79 
80 void
81 dmu_objset_name(objset_t *os, char *buf)
82 {
83 	dsl_dataset_name(os->os_dsl_dataset, buf);
84 }
85 
86 uint64_t
87 dmu_objset_id(objset_t *os)
88 {
89 	dsl_dataset_t *ds = os->os_dsl_dataset;
90 
91 	return (ds ? ds->ds_object : 0);
92 }
93 
94 uint64_t
95 dmu_objset_logbias(objset_t *os)
96 {
97 	return (os->os_logbias);
98 }
99 
100 static void
101 checksum_changed_cb(void *arg, uint64_t newval)
102 {
103 	objset_t *os = arg;
104 
105 	/*
106 	 * Inheritance should have been done by now.
107 	 */
108 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
109 
110 	os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
111 }
112 
113 static void
114 compression_changed_cb(void *arg, uint64_t newval)
115 {
116 	objset_t *os = arg;
117 
118 	/*
119 	 * Inheritance and range checking should have been done by now.
120 	 */
121 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
122 
123 	os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
124 }
125 
126 static void
127 copies_changed_cb(void *arg, uint64_t newval)
128 {
129 	objset_t *os = arg;
130 
131 	/*
132 	 * Inheritance and range checking should have been done by now.
133 	 */
134 	ASSERT(newval > 0);
135 	ASSERT(newval <= spa_max_replication(os->os_spa));
136 
137 	os->os_copies = newval;
138 }
139 
140 static void
141 primary_cache_changed_cb(void *arg, uint64_t newval)
142 {
143 	objset_t *os = arg;
144 
145 	/*
146 	 * Inheritance and range checking should have been done by now.
147 	 */
148 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
149 	    newval == ZFS_CACHE_METADATA);
150 
151 	os->os_primary_cache = newval;
152 }
153 
154 static void
155 secondary_cache_changed_cb(void *arg, uint64_t newval)
156 {
157 	objset_t *os = arg;
158 
159 	/*
160 	 * Inheritance and range checking should have been done by now.
161 	 */
162 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
163 	    newval == ZFS_CACHE_METADATA);
164 
165 	os->os_secondary_cache = newval;
166 }
167 
168 static void
169 logbias_changed_cb(void *arg, uint64_t newval)
170 {
171 	objset_t *os = arg;
172 
173 	ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
174 	    newval == ZFS_LOGBIAS_THROUGHPUT);
175 	os->os_logbias = newval;
176 	if (os->os_zil)
177 		zil_set_logbias(os->os_zil, newval);
178 }
179 
180 void
181 dmu_objset_byteswap(void *buf, size_t size)
182 {
183 	objset_phys_t *osp = buf;
184 
185 	ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
186 	dnode_byteswap(&osp->os_meta_dnode);
187 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
188 	osp->os_type = BSWAP_64(osp->os_type);
189 	osp->os_flags = BSWAP_64(osp->os_flags);
190 	if (size == sizeof (objset_phys_t)) {
191 		dnode_byteswap(&osp->os_userused_dnode);
192 		dnode_byteswap(&osp->os_groupused_dnode);
193 	}
194 }
195 
196 int
197 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
198     objset_t **osp)
199 {
200 	objset_t *os;
201 	int i, err;
202 
203 	ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
204 
205 	os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
206 	os->os_dsl_dataset = ds;
207 	os->os_spa = spa;
208 	os->os_rootbp = bp;
209 	if (!BP_IS_HOLE(os->os_rootbp)) {
210 		uint32_t aflags = ARC_WAIT;
211 		zbookmark_t zb;
212 		zb.zb_objset = ds ? ds->ds_object : 0;
213 		zb.zb_object = 0;
214 		zb.zb_level = -1;
215 		zb.zb_blkid = 0;
216 		if (DMU_OS_IS_L2CACHEABLE(os))
217 			aflags |= ARC_L2CACHE;
218 
219 		dprintf_bp(os->os_rootbp, "reading %s", "");
220 		/*
221 		 * NB: when bprewrite scrub can change the bp,
222 		 * and this is called from dmu_objset_open_ds_os, the bp
223 		 * could change, and we'll need a lock.
224 		 */
225 		err = arc_read_nolock(NULL, spa, os->os_rootbp,
226 		    arc_getbuf_func, &os->os_phys_buf,
227 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
228 		if (err) {
229 			kmem_free(os, sizeof (objset_t));
230 			/* convert checksum errors into IO errors */
231 			if (err == ECKSUM)
232 				err = EIO;
233 			return (err);
234 		}
235 
236 		/* Increase the blocksize if we are permitted. */
237 		if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
238 		    arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
239 			arc_buf_t *buf = arc_buf_alloc(spa,
240 			    sizeof (objset_phys_t), &os->os_phys_buf,
241 			    ARC_BUFC_METADATA);
242 			bzero(buf->b_data, sizeof (objset_phys_t));
243 			bcopy(os->os_phys_buf->b_data, buf->b_data,
244 			    arc_buf_size(os->os_phys_buf));
245 			(void) arc_buf_remove_ref(os->os_phys_buf,
246 			    &os->os_phys_buf);
247 			os->os_phys_buf = buf;
248 		}
249 
250 		os->os_phys = os->os_phys_buf->b_data;
251 		os->os_flags = os->os_phys->os_flags;
252 	} else {
253 		int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
254 		    sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
255 		os->os_phys_buf = arc_buf_alloc(spa, size,
256 		    &os->os_phys_buf, ARC_BUFC_METADATA);
257 		os->os_phys = os->os_phys_buf->b_data;
258 		bzero(os->os_phys, size);
259 	}
260 
261 	/*
262 	 * Note: the changed_cb will be called once before the register
263 	 * func returns, thus changing the checksum/compression from the
264 	 * default (fletcher2/off).  Snapshots don't need to know about
265 	 * checksum/compression/copies.
266 	 */
267 	if (ds) {
268 		err = dsl_prop_register(ds, "primarycache",
269 		    primary_cache_changed_cb, os);
270 		if (err == 0)
271 			err = dsl_prop_register(ds, "secondarycache",
272 			    secondary_cache_changed_cb, os);
273 		if (!dsl_dataset_is_snapshot(ds)) {
274 			if (err == 0)
275 				err = dsl_prop_register(ds, "checksum",
276 				    checksum_changed_cb, os);
277 			if (err == 0)
278 				err = dsl_prop_register(ds, "compression",
279 				    compression_changed_cb, os);
280 			if (err == 0)
281 				err = dsl_prop_register(ds, "copies",
282 				    copies_changed_cb, os);
283 			if (err == 0)
284 				err = dsl_prop_register(ds, "logbias",
285 				    logbias_changed_cb, os);
286 		}
287 		if (err) {
288 			VERIFY(arc_buf_remove_ref(os->os_phys_buf,
289 			    &os->os_phys_buf) == 1);
290 			kmem_free(os, sizeof (objset_t));
291 			return (err);
292 		}
293 	} else if (ds == NULL) {
294 		/* It's the meta-objset. */
295 		os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
296 		os->os_compress = ZIO_COMPRESS_LZJB;
297 		os->os_copies = spa_max_replication(spa);
298 		os->os_primary_cache = ZFS_CACHE_ALL;
299 		os->os_secondary_cache = ZFS_CACHE_ALL;
300 	}
301 
302 	os->os_zil_header = os->os_phys->os_zil_header;
303 	os->os_zil = zil_alloc(os, &os->os_zil_header);
304 
305 	for (i = 0; i < TXG_SIZE; i++) {
306 		list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
307 		    offsetof(dnode_t, dn_dirty_link[i]));
308 		list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
309 		    offsetof(dnode_t, dn_dirty_link[i]));
310 	}
311 	list_create(&os->os_dnodes, sizeof (dnode_t),
312 	    offsetof(dnode_t, dn_link));
313 	list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
314 	    offsetof(dmu_buf_impl_t, db_link));
315 
316 	mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
317 	mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
318 	mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
319 
320 	os->os_meta_dnode = dnode_special_open(os,
321 	    &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
322 	if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
323 		os->os_userused_dnode = dnode_special_open(os,
324 		    &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT);
325 		os->os_groupused_dnode = dnode_special_open(os,
326 		    &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT);
327 	}
328 
329 	/*
330 	 * We should be the only thread trying to do this because we
331 	 * have ds_opening_lock
332 	 */
333 	if (ds) {
334 		mutex_enter(&ds->ds_lock);
335 		ASSERT(ds->ds_objset == NULL);
336 		ds->ds_objset = os;
337 		mutex_exit(&ds->ds_lock);
338 	}
339 
340 	*osp = os;
341 	return (0);
342 }
343 
344 int
345 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
346 {
347 	int err = 0;
348 
349 	mutex_enter(&ds->ds_opening_lock);
350 	*osp = ds->ds_objset;
351 	if (*osp == NULL) {
352 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
353 		    ds, &ds->ds_phys->ds_bp, osp);
354 	}
355 	mutex_exit(&ds->ds_opening_lock);
356 	return (err);
357 }
358 
359 /* called from zpl */
360 int
361 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
362 {
363 	dsl_dataset_t *ds;
364 	int err;
365 
366 	err = dsl_dataset_hold(name, tag, &ds);
367 	if (err)
368 		return (err);
369 
370 	err = dmu_objset_from_ds(ds, osp);
371 	if (err)
372 		dsl_dataset_rele(ds, tag);
373 
374 	return (err);
375 }
376 
377 /* called from zpl */
378 int
379 dmu_objset_own(const char *name, dmu_objset_type_t type,
380     boolean_t readonly, void *tag, objset_t **osp)
381 {
382 	dsl_dataset_t *ds;
383 	int err;
384 
385 	err = dsl_dataset_own(name, B_FALSE, tag, &ds);
386 	if (err)
387 		return (err);
388 
389 	err = dmu_objset_from_ds(ds, osp);
390 	if (err) {
391 		dsl_dataset_disown(ds, tag);
392 	} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
393 		dmu_objset_disown(*osp, tag);
394 		return (EINVAL);
395 	} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
396 		dmu_objset_disown(*osp, tag);
397 		return (EROFS);
398 	}
399 	return (err);
400 }
401 
402 void
403 dmu_objset_rele(objset_t *os, void *tag)
404 {
405 	dsl_dataset_rele(os->os_dsl_dataset, tag);
406 }
407 
408 void
409 dmu_objset_disown(objset_t *os, void *tag)
410 {
411 	dsl_dataset_disown(os->os_dsl_dataset, tag);
412 }
413 
414 int
415 dmu_objset_evict_dbufs(objset_t *os)
416 {
417 	dnode_t *dn;
418 
419 	mutex_enter(&os->os_lock);
420 
421 	/* process the mdn last, since the other dnodes have holds on it */
422 	list_remove(&os->os_dnodes, os->os_meta_dnode);
423 	list_insert_tail(&os->os_dnodes, os->os_meta_dnode);
424 
425 	/*
426 	 * Find the first dnode with holds.  We have to do this dance
427 	 * because dnode_add_ref() only works if you already have a
428 	 * hold.  If there are no holds then it has no dbufs so OK to
429 	 * skip.
430 	 */
431 	for (dn = list_head(&os->os_dnodes);
432 	    dn && !dnode_add_ref(dn, FTAG);
433 	    dn = list_next(&os->os_dnodes, dn))
434 		continue;
435 
436 	while (dn) {
437 		dnode_t *next_dn = dn;
438 
439 		do {
440 			next_dn = list_next(&os->os_dnodes, next_dn);
441 		} while (next_dn && !dnode_add_ref(next_dn, FTAG));
442 
443 		mutex_exit(&os->os_lock);
444 		dnode_evict_dbufs(dn);
445 		dnode_rele(dn, FTAG);
446 		mutex_enter(&os->os_lock);
447 		dn = next_dn;
448 	}
449 	mutex_exit(&os->os_lock);
450 	return (list_head(&os->os_dnodes) != os->os_meta_dnode);
451 }
452 
453 void
454 dmu_objset_evict(objset_t *os)
455 {
456 	dsl_dataset_t *ds = os->os_dsl_dataset;
457 	int i;
458 
459 	for (i = 0; i < TXG_SIZE; i++) {
460 		ASSERT(list_head(&os->os_dirty_dnodes[i]) == NULL);
461 		ASSERT(list_head(&os->os_free_dnodes[i]) == NULL);
462 	}
463 
464 	if (ds) {
465 		if (!dsl_dataset_is_snapshot(ds)) {
466 			VERIFY(0 == dsl_prop_unregister(ds, "checksum",
467 			    checksum_changed_cb, os));
468 			VERIFY(0 == dsl_prop_unregister(ds, "compression",
469 			    compression_changed_cb, os));
470 			VERIFY(0 == dsl_prop_unregister(ds, "copies",
471 			    copies_changed_cb, os));
472 			VERIFY(0 == dsl_prop_unregister(ds, "logbias",
473 			    logbias_changed_cb, os));
474 		}
475 		VERIFY(0 == dsl_prop_unregister(ds, "primarycache",
476 		    primary_cache_changed_cb, os));
477 		VERIFY(0 == dsl_prop_unregister(ds, "secondarycache",
478 		    secondary_cache_changed_cb, os));
479 	}
480 
481 	/*
482 	 * We should need only a single pass over the dnode list, since
483 	 * nothing can be added to the list at this point.
484 	 */
485 	(void) dmu_objset_evict_dbufs(os);
486 
487 	dnode_special_close(os->os_meta_dnode);
488 	if (os->os_userused_dnode) {
489 		dnode_special_close(os->os_userused_dnode);
490 		dnode_special_close(os->os_groupused_dnode);
491 	}
492 	zil_free(os->os_zil);
493 
494 	ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
495 
496 	VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf) == 1);
497 	mutex_destroy(&os->os_lock);
498 	mutex_destroy(&os->os_obj_lock);
499 	mutex_destroy(&os->os_user_ptr_lock);
500 	kmem_free(os, sizeof (objset_t));
501 }
502 
503 timestruc_t
504 dmu_objset_snap_cmtime(objset_t *os)
505 {
506 	return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
507 }
508 
509 /* called from dsl for meta-objset */
510 objset_t *
511 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
512     dmu_objset_type_t type, dmu_tx_t *tx)
513 {
514 	objset_t *os;
515 	dnode_t *mdn;
516 
517 	ASSERT(dmu_tx_is_syncing(tx));
518 	if (ds)
519 		mutex_enter(&ds->ds_opening_lock);
520 	VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &os));
521 	if (ds)
522 		mutex_exit(&ds->ds_opening_lock);
523 	mdn = os->os_meta_dnode;
524 
525 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
526 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
527 
528 	/*
529 	 * We don't want to have to increase the meta-dnode's nlevels
530 	 * later, because then we could do it in quescing context while
531 	 * we are also accessing it in open context.
532 	 *
533 	 * This precaution is not necessary for the MOS (ds == NULL),
534 	 * because the MOS is only updated in syncing context.
535 	 * This is most fortunate: the MOS is the only objset that
536 	 * needs to be synced multiple times as spa_sync() iterates
537 	 * to convergence, so minimizing its dn_nlevels matters.
538 	 */
539 	if (ds != NULL) {
540 		int levels = 1;
541 
542 		/*
543 		 * Determine the number of levels necessary for the meta-dnode
544 		 * to contain DN_MAX_OBJECT dnodes.
545 		 */
546 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
547 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
548 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
549 			levels++;
550 
551 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
552 		    mdn->dn_nlevels = levels;
553 	}
554 
555 	ASSERT(type != DMU_OST_NONE);
556 	ASSERT(type != DMU_OST_ANY);
557 	ASSERT(type < DMU_OST_NUMTYPES);
558 	os->os_phys->os_type = type;
559 	if (dmu_objset_userused_enabled(os)) {
560 		os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
561 		os->os_flags = os->os_phys->os_flags;
562 	}
563 
564 	dsl_dataset_dirty(ds, tx);
565 
566 	return (os);
567 }
568 
569 struct oscarg {
570 	void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
571 	void *userarg;
572 	dsl_dataset_t *clone_origin;
573 	const char *lastname;
574 	dmu_objset_type_t type;
575 	uint64_t flags;
576 };
577 
578 /*ARGSUSED*/
579 static int
580 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
581 {
582 	dsl_dir_t *dd = arg1;
583 	struct oscarg *oa = arg2;
584 	objset_t *mos = dd->dd_pool->dp_meta_objset;
585 	int err;
586 	uint64_t ddobj;
587 
588 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
589 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
590 	if (err != ENOENT)
591 		return (err ? err : EEXIST);
592 
593 	if (oa->clone_origin != NULL) {
594 		/* You can't clone across pools. */
595 		if (oa->clone_origin->ds_dir->dd_pool != dd->dd_pool)
596 			return (EXDEV);
597 
598 		/* You can only clone snapshots, not the head datasets. */
599 		if (!dsl_dataset_is_snapshot(oa->clone_origin))
600 			return (EINVAL);
601 	}
602 
603 	return (0);
604 }
605 
606 static void
607 dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
608 {
609 	dsl_dir_t *dd = arg1;
610 	struct oscarg *oa = arg2;
611 	uint64_t dsobj;
612 
613 	ASSERT(dmu_tx_is_syncing(tx));
614 
615 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
616 	    oa->clone_origin, oa->flags, cr, tx);
617 
618 	if (oa->clone_origin == NULL) {
619 		dsl_dataset_t *ds;
620 		blkptr_t *bp;
621 		objset_t *os;
622 
623 		VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj,
624 		    FTAG, &ds));
625 		bp = dsl_dataset_get_blkptr(ds);
626 		ASSERT(BP_IS_HOLE(bp));
627 
628 		os = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
629 		    ds, bp, oa->type, tx);
630 
631 		if (oa->userfunc)
632 			oa->userfunc(os, oa->userarg, cr, tx);
633 		dsl_dataset_rele(ds, FTAG);
634 	}
635 
636 	spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa,
637 	    tx, cr, "dataset = %llu", dsobj);
638 }
639 
640 int
641 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
642     void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
643 {
644 	dsl_dir_t *pdd;
645 	const char *tail;
646 	int err = 0;
647 	struct oscarg oa = { 0 };
648 
649 	ASSERT(strchr(name, '@') == NULL);
650 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
651 	if (err)
652 		return (err);
653 	if (tail == NULL) {
654 		dsl_dir_close(pdd, FTAG);
655 		return (EEXIST);
656 	}
657 
658 	oa.userfunc = func;
659 	oa.userarg = arg;
660 	oa.lastname = tail;
661 	oa.type = type;
662 	oa.flags = flags;
663 
664 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
665 	    dmu_objset_create_sync, pdd, &oa, 5);
666 	dsl_dir_close(pdd, FTAG);
667 	return (err);
668 }
669 
670 int
671 dmu_objset_clone(const char *name, dsl_dataset_t *clone_origin, uint64_t flags)
672 {
673 	dsl_dir_t *pdd;
674 	const char *tail;
675 	int err = 0;
676 	struct oscarg oa = { 0 };
677 
678 	ASSERT(strchr(name, '@') == NULL);
679 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
680 	if (err)
681 		return (err);
682 	if (tail == NULL) {
683 		dsl_dir_close(pdd, FTAG);
684 		return (EEXIST);
685 	}
686 
687 	oa.lastname = tail;
688 	oa.clone_origin = clone_origin;
689 	oa.flags = flags;
690 
691 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
692 	    dmu_objset_create_sync, pdd, &oa, 5);
693 	dsl_dir_close(pdd, FTAG);
694 	return (err);
695 }
696 
697 int
698 dmu_objset_destroy(const char *name, boolean_t defer)
699 {
700 	dsl_dataset_t *ds;
701 	int error;
702 
703 	/*
704 	 * dsl_dataset_destroy() can free any claimed-but-unplayed
705 	 * intent log, but if there is an active log, it has blocks that
706 	 * are allocated, but may not yet be reflected in the on-disk
707 	 * structure.  Only the ZIL knows how to free them, so we have
708 	 * to call into it here.
709 	 */
710 	error = dsl_dataset_own(name, B_TRUE, FTAG, &ds);
711 	if (error == 0) {
712 		objset_t *os;
713 		if (dmu_objset_from_ds(ds, &os) == 0)
714 			zil_destroy(dmu_objset_zil(os), B_FALSE);
715 		error = dsl_dataset_destroy(ds, FTAG, defer);
716 		/* dsl_dataset_destroy() closes the ds. */
717 	}
718 
719 	return (error);
720 }
721 
722 struct snaparg {
723 	dsl_sync_task_group_t *dstg;
724 	char *snapname;
725 	char failed[MAXPATHLEN];
726 	boolean_t checkperms;
727 	nvlist_t *props;
728 };
729 
730 static int
731 snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
732 {
733 	objset_t *os = arg1;
734 	struct snaparg *sn = arg2;
735 
736 	/* The props have already been checked by zfs_check_userprops(). */
737 
738 	return (dsl_dataset_snapshot_check(os->os_dsl_dataset,
739 	    sn->snapname, tx));
740 }
741 
742 static void
743 snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
744 {
745 	objset_t *os = arg1;
746 	dsl_dataset_t *ds = os->os_dsl_dataset;
747 	struct snaparg *sn = arg2;
748 
749 	dsl_dataset_snapshot_sync(ds, sn->snapname, cr, tx);
750 
751 	if (sn->props)
752 		dsl_props_set_sync(ds->ds_prev, sn->props, cr, tx);
753 }
754 
755 static int
756 dmu_objset_snapshot_one(char *name, void *arg)
757 {
758 	struct snaparg *sn = arg;
759 	objset_t *os;
760 	int err;
761 
762 	(void) strcpy(sn->failed, name);
763 
764 	/*
765 	 * Check permissions only when requested.  This only applies when
766 	 * doing a recursive snapshot.  The permission checks for the starting
767 	 * dataset have already been performed in zfs_secpolicy_snapshot()
768 	 */
769 	if (sn->checkperms == B_TRUE &&
770 	    (err = zfs_secpolicy_snapshot_perms(name, CRED())))
771 		return (err);
772 
773 	err = dmu_objset_hold(name, sn, &os);
774 	if (err != 0)
775 		return (err);
776 
777 	/* If the objset is in an inconsistent state, return busy */
778 	if (os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) {
779 		dmu_objset_rele(os, sn);
780 		return (EBUSY);
781 	}
782 
783 	/*
784 	 * NB: we need to wait for all in-flight changes to get to disk,
785 	 * so that we snapshot those changes.  zil_suspend does this as
786 	 * a side effect.
787 	 */
788 	err = zil_suspend(dmu_objset_zil(os));
789 	if (err == 0) {
790 		dsl_sync_task_create(sn->dstg, snapshot_check,
791 		    snapshot_sync, os, sn, 3);
792 	} else {
793 		dmu_objset_rele(os, sn);
794 	}
795 
796 	return (err);
797 }
798 
799 int
800 dmu_objset_snapshot(char *fsname, char *snapname,
801     nvlist_t *props, boolean_t recursive)
802 {
803 	dsl_sync_task_t *dst;
804 	struct snaparg sn;
805 	spa_t *spa;
806 	int err;
807 
808 	(void) strcpy(sn.failed, fsname);
809 
810 	err = spa_open(fsname, &spa, FTAG);
811 	if (err)
812 		return (err);
813 
814 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
815 	sn.snapname = snapname;
816 	sn.props = props;
817 
818 	if (recursive) {
819 		sn.checkperms = B_TRUE;
820 		err = dmu_objset_find(fsname,
821 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
822 	} else {
823 		sn.checkperms = B_FALSE;
824 		err = dmu_objset_snapshot_one(fsname, &sn);
825 	}
826 
827 	if (err == 0)
828 		err = dsl_sync_task_group_wait(sn.dstg);
829 
830 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
831 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
832 		objset_t *os = dst->dst_arg1;
833 		dsl_dataset_t *ds = os->os_dsl_dataset;
834 		if (dst->dst_err)
835 			dsl_dataset_name(ds, sn.failed);
836 		zil_resume(dmu_objset_zil(os));
837 		dmu_objset_rele(os, &sn);
838 	}
839 
840 	if (err)
841 		(void) strcpy(fsname, sn.failed);
842 	dsl_sync_task_group_destroy(sn.dstg);
843 	spa_close(spa, FTAG);
844 	return (err);
845 }
846 
847 static void
848 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
849 {
850 	dnode_t *dn;
851 
852 	while (dn = list_head(list)) {
853 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
854 		ASSERT(dn->dn_dbuf->db_data_pending);
855 		/*
856 		 * Initialize dn_zio outside dnode_sync() because the
857 		 * meta-dnode needs to set it ouside dnode_sync().
858 		 */
859 		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
860 		ASSERT(dn->dn_zio);
861 
862 		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
863 		list_remove(list, dn);
864 
865 		if (newlist) {
866 			(void) dnode_add_ref(dn, newlist);
867 			list_insert_tail(newlist, dn);
868 		}
869 
870 		dnode_sync(dn, tx);
871 	}
872 }
873 
874 /* ARGSUSED */
875 static void
876 ready(zio_t *zio, arc_buf_t *abuf, void *arg)
877 {
878 	blkptr_t *bp = zio->io_bp;
879 	blkptr_t *bp_orig = &zio->io_bp_orig;
880 	objset_t *os = arg;
881 	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
882 
883 	ASSERT(bp == os->os_rootbp);
884 	ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET);
885 	ASSERT(BP_GET_LEVEL(bp) == 0);
886 
887 	/*
888 	 * Update rootbp fill count: it should be the number of objects
889 	 * allocated in the object set (not counting the "special"
890 	 * objects that are stored in the objset_phys_t -- the meta
891 	 * dnode and user/group accounting objects).
892 	 */
893 	bp->blk_fill = 0;
894 	for (int i = 0; i < dnp->dn_nblkptr; i++)
895 		bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
896 
897 	if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
898 		ASSERT(DVA_EQUAL(BP_IDENTITY(bp), BP_IDENTITY(bp_orig)));
899 	} else {
900 		if (zio->io_bp_orig.blk_birth == os->os_synctx->tx_txg)
901 			(void) dsl_dataset_block_kill(os->os_dsl_dataset,
902 			    &zio->io_bp_orig, zio, os->os_synctx);
903 		dsl_dataset_block_born(os->os_dsl_dataset, bp, os->os_synctx);
904 	}
905 }
906 
907 /* called from dsl */
908 void
909 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
910 {
911 	int txgoff;
912 	zbookmark_t zb;
913 	writeprops_t wp = { 0 };
914 	zio_t *zio;
915 	list_t *list;
916 	list_t *newlist = NULL;
917 	dbuf_dirty_record_t *dr;
918 
919 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
920 
921 	ASSERT(dmu_tx_is_syncing(tx));
922 	/* XXX the write_done callback should really give us the tx... */
923 	os->os_synctx = tx;
924 
925 	if (os->os_dsl_dataset == NULL) {
926 		/*
927 		 * This is the MOS.  If we have upgraded,
928 		 * spa_max_replication() could change, so reset
929 		 * os_copies here.
930 		 */
931 		os->os_copies = spa_max_replication(os->os_spa);
932 	}
933 
934 	/*
935 	 * Create the root block IO
936 	 */
937 	zb.zb_objset = os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : 0;
938 	zb.zb_object = 0;
939 	zb.zb_level = -1;	/* for block ordering; it's level 0 on disk */
940 	zb.zb_blkid = 0;
941 
942 	wp.wp_type = DMU_OT_OBJSET;
943 	wp.wp_level = 0;	/* on-disk BP level; see above */
944 	wp.wp_copies = os->os_copies;
945 	wp.wp_oschecksum = os->os_checksum;
946 	wp.wp_oscompress = os->os_compress;
947 
948 	if (BP_IS_OLDER(os->os_rootbp, tx->tx_txg)) {
949 		(void) dsl_dataset_block_kill(os->os_dsl_dataset,
950 		    os->os_rootbp, pio, tx);
951 	}
952 
953 	arc_release(os->os_phys_buf, &os->os_phys_buf);
954 
955 	zio = arc_write(pio, os->os_spa, &wp, DMU_OS_IS_L2CACHEABLE(os),
956 	    tx->tx_txg, os->os_rootbp, os->os_phys_buf, ready, NULL, os,
957 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
958 
959 	/*
960 	 * Sync special dnodes - the parent IO for the sync is the root block
961 	 */
962 	os->os_meta_dnode->dn_zio = zio;
963 	dnode_sync(os->os_meta_dnode, tx);
964 
965 	os->os_phys->os_flags = os->os_flags;
966 
967 	if (os->os_userused_dnode &&
968 	    os->os_userused_dnode->dn_type != DMU_OT_NONE) {
969 		os->os_userused_dnode->dn_zio = zio;
970 		dnode_sync(os->os_userused_dnode, tx);
971 		os->os_groupused_dnode->dn_zio = zio;
972 		dnode_sync(os->os_groupused_dnode, tx);
973 	}
974 
975 	txgoff = tx->tx_txg & TXG_MASK;
976 
977 	if (dmu_objset_userused_enabled(os)) {
978 		newlist = &os->os_synced_dnodes;
979 		/*
980 		 * We must create the list here because it uses the
981 		 * dn_dirty_link[] of this txg.
982 		 */
983 		list_create(newlist, sizeof (dnode_t),
984 		    offsetof(dnode_t, dn_dirty_link[txgoff]));
985 	}
986 
987 	dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
988 	dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
989 
990 	list = &os->os_meta_dnode->dn_dirty_records[txgoff];
991 	while (dr = list_head(list)) {
992 		ASSERT(dr->dr_dbuf->db_level == 0);
993 		list_remove(list, dr);
994 		if (dr->dr_zio)
995 			zio_nowait(dr->dr_zio);
996 	}
997 	/*
998 	 * Free intent log blocks up to this tx.
999 	 */
1000 	zil_sync(os->os_zil, tx);
1001 	os->os_phys->os_zil_header = os->os_zil_header;
1002 	zio_nowait(zio);
1003 }
1004 
1005 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1006 
1007 void
1008 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1009 {
1010 	used_cbs[ost] = cb;
1011 }
1012 
1013 boolean_t
1014 dmu_objset_userused_enabled(objset_t *os)
1015 {
1016 	return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1017 	    used_cbs[os->os_phys->os_type] &&
1018 	    os->os_userused_dnode);
1019 }
1020 
1021 static void
1022 do_userquota_callback(objset_t *os, dnode_phys_t *dnp,
1023     boolean_t subtract, dmu_tx_t *tx)
1024 {
1025 	static const char zerobuf[DN_MAX_BONUSLEN] = {0};
1026 	uint64_t user, group;
1027 
1028 	ASSERT(dnp->dn_type != 0 ||
1029 	    (bcmp(DN_BONUS(dnp), zerobuf, DN_MAX_BONUSLEN) == 0 &&
1030 	    DN_USED_BYTES(dnp) == 0));
1031 
1032 	if ((dnp->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) &&
1033 	    0 == used_cbs[os->os_phys->os_type](dnp->dn_bonustype,
1034 	    DN_BONUS(dnp), &user, &group)) {
1035 		int64_t delta = DNODE_SIZE + DN_USED_BYTES(dnp);
1036 		if (subtract)
1037 			delta = -delta;
1038 		VERIFY(0 == zap_increment_int(os, DMU_USERUSED_OBJECT,
1039 		    user, delta, tx));
1040 		VERIFY(0 == zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1041 		    group, delta, tx));
1042 	}
1043 }
1044 
1045 void
1046 dmu_objset_do_userquota_callbacks(objset_t *os, dmu_tx_t *tx)
1047 {
1048 	dnode_t *dn;
1049 	list_t *list = &os->os_synced_dnodes;
1050 
1051 	ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1052 
1053 	while (dn = list_head(list)) {
1054 		ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1055 		ASSERT(dn->dn_oldphys);
1056 		ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1057 		    dn->dn_phys->dn_flags &
1058 		    DNODE_FLAG_USERUSED_ACCOUNTED);
1059 
1060 		/* Allocate the user/groupused objects if necessary. */
1061 		if (os->os_userused_dnode->dn_type == DMU_OT_NONE) {
1062 			VERIFY(0 == zap_create_claim(os,
1063 			    DMU_USERUSED_OBJECT,
1064 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1065 			VERIFY(0 == zap_create_claim(os,
1066 			    DMU_GROUPUSED_OBJECT,
1067 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1068 		}
1069 
1070 		/*
1071 		 * We intentionally modify the zap object even if the
1072 		 * net delta (due to phys-oldphys) is zero.  Otherwise
1073 		 * the block of the zap obj could be shared between
1074 		 * datasets but need to be different between them after
1075 		 * a bprewrite.
1076 		 */
1077 		do_userquota_callback(os, dn->dn_oldphys, B_TRUE, tx);
1078 		do_userquota_callback(os, dn->dn_phys, B_FALSE, tx);
1079 
1080 		/*
1081 		 * The mutex is needed here for interlock with dnode_allocate.
1082 		 */
1083 		mutex_enter(&dn->dn_mtx);
1084 		zio_buf_free(dn->dn_oldphys, sizeof (dnode_phys_t));
1085 		dn->dn_oldphys = NULL;
1086 		mutex_exit(&dn->dn_mtx);
1087 
1088 		list_remove(list, dn);
1089 		dnode_rele(dn, list);
1090 	}
1091 }
1092 
1093 boolean_t
1094 dmu_objset_userspace_present(objset_t *os)
1095 {
1096 	return (os->os_phys->os_flags &
1097 	    OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1098 }
1099 
1100 int
1101 dmu_objset_userspace_upgrade(objset_t *os)
1102 {
1103 	uint64_t obj;
1104 	int err = 0;
1105 
1106 	if (dmu_objset_userspace_present(os))
1107 		return (0);
1108 	if (!dmu_objset_userused_enabled(os))
1109 		return (ENOTSUP);
1110 	if (dmu_objset_is_snapshot(os))
1111 		return (EINVAL);
1112 
1113 	/*
1114 	 * We simply need to mark every object dirty, so that it will be
1115 	 * synced out and now accounted.  If this is called
1116 	 * concurrently, or if we already did some work before crashing,
1117 	 * that's fine, since we track each object's accounted state
1118 	 * independently.
1119 	 */
1120 
1121 	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1122 		dmu_tx_t *tx;
1123 		dmu_buf_t *db;
1124 		int objerr;
1125 
1126 		if (issig(JUSTLOOKING) && issig(FORREAL))
1127 			return (EINTR);
1128 
1129 		objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1130 		if (objerr)
1131 			continue;
1132 		tx = dmu_tx_create(os);
1133 		dmu_tx_hold_bonus(tx, obj);
1134 		objerr = dmu_tx_assign(tx, TXG_WAIT);
1135 		if (objerr) {
1136 			dmu_tx_abort(tx);
1137 			continue;
1138 		}
1139 		dmu_buf_will_dirty(db, tx);
1140 		dmu_buf_rele(db, FTAG);
1141 		dmu_tx_commit(tx);
1142 	}
1143 
1144 	os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1145 	txg_wait_synced(dmu_objset_pool(os), 0);
1146 	return (0);
1147 }
1148 
1149 void
1150 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1151     uint64_t *usedobjsp, uint64_t *availobjsp)
1152 {
1153 	dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1154 	    usedobjsp, availobjsp);
1155 }
1156 
1157 uint64_t
1158 dmu_objset_fsid_guid(objset_t *os)
1159 {
1160 	return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1161 }
1162 
1163 void
1164 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1165 {
1166 	stat->dds_type = os->os_phys->os_type;
1167 	if (os->os_dsl_dataset)
1168 		dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1169 }
1170 
1171 void
1172 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1173 {
1174 	ASSERT(os->os_dsl_dataset ||
1175 	    os->os_phys->os_type == DMU_OST_META);
1176 
1177 	if (os->os_dsl_dataset != NULL)
1178 		dsl_dataset_stats(os->os_dsl_dataset, nv);
1179 
1180 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1181 	    os->os_phys->os_type);
1182 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1183 	    dmu_objset_userspace_present(os));
1184 }
1185 
1186 int
1187 dmu_objset_is_snapshot(objset_t *os)
1188 {
1189 	if (os->os_dsl_dataset != NULL)
1190 		return (dsl_dataset_is_snapshot(os->os_dsl_dataset));
1191 	else
1192 		return (B_FALSE);
1193 }
1194 
1195 int
1196 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1197     boolean_t *conflict)
1198 {
1199 	dsl_dataset_t *ds = os->os_dsl_dataset;
1200 	uint64_t ignored;
1201 
1202 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1203 		return (ENOENT);
1204 
1205 	return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1206 	    ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
1207 	    real, maxlen, conflict));
1208 }
1209 
1210 int
1211 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1212     uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1213 {
1214 	dsl_dataset_t *ds = os->os_dsl_dataset;
1215 	zap_cursor_t cursor;
1216 	zap_attribute_t attr;
1217 
1218 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1219 		return (ENOENT);
1220 
1221 	zap_cursor_init_serialized(&cursor,
1222 	    ds->ds_dir->dd_pool->dp_meta_objset,
1223 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
1224 
1225 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1226 		zap_cursor_fini(&cursor);
1227 		return (ENOENT);
1228 	}
1229 
1230 	if (strlen(attr.za_name) + 1 > namelen) {
1231 		zap_cursor_fini(&cursor);
1232 		return (ENAMETOOLONG);
1233 	}
1234 
1235 	(void) strcpy(name, attr.za_name);
1236 	if (idp)
1237 		*idp = attr.za_first_integer;
1238 	if (case_conflict)
1239 		*case_conflict = attr.za_normalization_conflict;
1240 	zap_cursor_advance(&cursor);
1241 	*offp = zap_cursor_serialize(&cursor);
1242 	zap_cursor_fini(&cursor);
1243 
1244 	return (0);
1245 }
1246 
1247 int
1248 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1249     uint64_t *idp, uint64_t *offp)
1250 {
1251 	dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1252 	zap_cursor_t cursor;
1253 	zap_attribute_t attr;
1254 
1255 	/* there is no next dir on a snapshot! */
1256 	if (os->os_dsl_dataset->ds_object !=
1257 	    dd->dd_phys->dd_head_dataset_obj)
1258 		return (ENOENT);
1259 
1260 	zap_cursor_init_serialized(&cursor,
1261 	    dd->dd_pool->dp_meta_objset,
1262 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
1263 
1264 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1265 		zap_cursor_fini(&cursor);
1266 		return (ENOENT);
1267 	}
1268 
1269 	if (strlen(attr.za_name) + 1 > namelen) {
1270 		zap_cursor_fini(&cursor);
1271 		return (ENAMETOOLONG);
1272 	}
1273 
1274 	(void) strcpy(name, attr.za_name);
1275 	if (idp)
1276 		*idp = attr.za_first_integer;
1277 	zap_cursor_advance(&cursor);
1278 	*offp = zap_cursor_serialize(&cursor);
1279 	zap_cursor_fini(&cursor);
1280 
1281 	return (0);
1282 }
1283 
1284 struct findarg {
1285 	int (*func)(char *, void *);
1286 	void *arg;
1287 };
1288 
1289 /* ARGSUSED */
1290 static int
1291 findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
1292 {
1293 	struct findarg *fa = arg;
1294 	return (fa->func((char *)dsname, fa->arg));
1295 }
1296 
1297 /*
1298  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1299  * Perhaps change all callers to use dmu_objset_find_spa()?
1300  */
1301 int
1302 dmu_objset_find(char *name, int func(char *, void *), void *arg, int flags)
1303 {
1304 	struct findarg fa;
1305 	fa.func = func;
1306 	fa.arg = arg;
1307 	return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags));
1308 }
1309 
1310 /*
1311  * Find all objsets under name, call func on each
1312  */
1313 int
1314 dmu_objset_find_spa(spa_t *spa, const char *name,
1315     int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags)
1316 {
1317 	dsl_dir_t *dd;
1318 	dsl_pool_t *dp;
1319 	dsl_dataset_t *ds;
1320 	zap_cursor_t zc;
1321 	zap_attribute_t *attr;
1322 	char *child;
1323 	uint64_t thisobj;
1324 	int err;
1325 
1326 	if (name == NULL)
1327 		name = spa_name(spa);
1328 	err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL);
1329 	if (err)
1330 		return (err);
1331 
1332 	/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1333 	if (dd->dd_myname[0] == '$') {
1334 		dsl_dir_close(dd, FTAG);
1335 		return (0);
1336 	}
1337 
1338 	thisobj = dd->dd_phys->dd_head_dataset_obj;
1339 	attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1340 	dp = dd->dd_pool;
1341 
1342 	/*
1343 	 * Iterate over all children.
1344 	 */
1345 	if (flags & DS_FIND_CHILDREN) {
1346 		for (zap_cursor_init(&zc, dp->dp_meta_objset,
1347 		    dd->dd_phys->dd_child_dir_zapobj);
1348 		    zap_cursor_retrieve(&zc, attr) == 0;
1349 		    (void) zap_cursor_advance(&zc)) {
1350 			ASSERT(attr->za_integer_length == sizeof (uint64_t));
1351 			ASSERT(attr->za_num_integers == 1);
1352 
1353 			child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1354 			(void) strcpy(child, name);
1355 			(void) strcat(child, "/");
1356 			(void) strcat(child, attr->za_name);
1357 			err = dmu_objset_find_spa(spa, child, func, arg, flags);
1358 			kmem_free(child, MAXPATHLEN);
1359 			if (err)
1360 				break;
1361 		}
1362 		zap_cursor_fini(&zc);
1363 
1364 		if (err) {
1365 			dsl_dir_close(dd, FTAG);
1366 			kmem_free(attr, sizeof (zap_attribute_t));
1367 			return (err);
1368 		}
1369 	}
1370 
1371 	/*
1372 	 * Iterate over all snapshots.
1373 	 */
1374 	if (flags & DS_FIND_SNAPSHOTS) {
1375 		if (!dsl_pool_sync_context(dp))
1376 			rw_enter(&dp->dp_config_rwlock, RW_READER);
1377 		err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1378 		if (!dsl_pool_sync_context(dp))
1379 			rw_exit(&dp->dp_config_rwlock);
1380 
1381 		if (err == 0) {
1382 			uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
1383 			dsl_dataset_rele(ds, FTAG);
1384 
1385 			for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1386 			    zap_cursor_retrieve(&zc, attr) == 0;
1387 			    (void) zap_cursor_advance(&zc)) {
1388 				ASSERT(attr->za_integer_length ==
1389 				    sizeof (uint64_t));
1390 				ASSERT(attr->za_num_integers == 1);
1391 
1392 				child = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1393 				(void) strcpy(child, name);
1394 				(void) strcat(child, "@");
1395 				(void) strcat(child, attr->za_name);
1396 				err = func(spa, attr->za_first_integer,
1397 				    child, arg);
1398 				kmem_free(child, MAXPATHLEN);
1399 				if (err)
1400 					break;
1401 			}
1402 			zap_cursor_fini(&zc);
1403 		}
1404 	}
1405 
1406 	dsl_dir_close(dd, FTAG);
1407 	kmem_free(attr, sizeof (zap_attribute_t));
1408 
1409 	if (err)
1410 		return (err);
1411 
1412 	/*
1413 	 * Apply to self if appropriate.
1414 	 */
1415 	err = func(spa, thisobj, name, arg);
1416 	return (err);
1417 }
1418 
1419 /* ARGSUSED */
1420 int
1421 dmu_objset_prefetch(char *name, void *arg)
1422 {
1423 	dsl_dataset_t *ds;
1424 
1425 	if (dsl_dataset_hold(name, FTAG, &ds))
1426 		return (0);
1427 
1428 	if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) {
1429 		mutex_enter(&ds->ds_opening_lock);
1430 		if (ds->ds_objset == NULL) {
1431 			uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1432 			zbookmark_t zb;
1433 
1434 			zb.zb_objset = ds->ds_object;
1435 			zb.zb_object = 0;
1436 			zb.zb_level = -1;
1437 			zb.zb_blkid = 0;
1438 
1439 			(void) arc_read_nolock(NULL, dsl_dataset_get_spa(ds),
1440 			    &ds->ds_phys->ds_bp, NULL, NULL,
1441 			    ZIO_PRIORITY_ASYNC_READ,
1442 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1443 			    &aflags, &zb);
1444 		}
1445 		mutex_exit(&ds->ds_opening_lock);
1446 	}
1447 
1448 	dsl_dataset_rele(ds, FTAG);
1449 	return (0);
1450 }
1451 
1452 void
1453 dmu_objset_set_user(objset_t *os, void *user_ptr)
1454 {
1455 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1456 	os->os_user_ptr = user_ptr;
1457 }
1458 
1459 void *
1460 dmu_objset_get_user(objset_t *os)
1461 {
1462 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1463 	return (os->os_user_ptr);
1464 }
1465