xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_objset.c (revision fb2a9bae0030340ad72b9c26ba1ffee2ee3cafec)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/cred.h>
27 #include <sys/zfs_context.h>
28 #include <sys/dmu_objset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_dataset.h>
31 #include <sys/dsl_prop.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dsl_deleg.h>
35 #include <sys/dnode.h>
36 #include <sys/dbuf.h>
37 #include <sys/zvol.h>
38 #include <sys/dmu_tx.h>
39 #include <sys/zap.h>
40 #include <sys/zil.h>
41 #include <sys/dmu_impl.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/sunddi.h>
44 #include <sys/sa.h>
45 
46 spa_t *
47 dmu_objset_spa(objset_t *os)
48 {
49 	return (os->os_spa);
50 }
51 
52 zilog_t *
53 dmu_objset_zil(objset_t *os)
54 {
55 	return (os->os_zil);
56 }
57 
58 dsl_pool_t *
59 dmu_objset_pool(objset_t *os)
60 {
61 	dsl_dataset_t *ds;
62 
63 	if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
64 		return (ds->ds_dir->dd_pool);
65 	else
66 		return (spa_get_dsl(os->os_spa));
67 }
68 
69 dsl_dataset_t *
70 dmu_objset_ds(objset_t *os)
71 {
72 	return (os->os_dsl_dataset);
73 }
74 
75 dmu_objset_type_t
76 dmu_objset_type(objset_t *os)
77 {
78 	return (os->os_phys->os_type);
79 }
80 
81 void
82 dmu_objset_name(objset_t *os, char *buf)
83 {
84 	dsl_dataset_name(os->os_dsl_dataset, buf);
85 }
86 
87 uint64_t
88 dmu_objset_id(objset_t *os)
89 {
90 	dsl_dataset_t *ds = os->os_dsl_dataset;
91 
92 	return (ds ? ds->ds_object : 0);
93 }
94 
95 uint64_t
96 dmu_objset_logbias(objset_t *os)
97 {
98 	return (os->os_logbias);
99 }
100 
101 static void
102 checksum_changed_cb(void *arg, uint64_t newval)
103 {
104 	objset_t *os = arg;
105 
106 	/*
107 	 * Inheritance should have been done by now.
108 	 */
109 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
110 
111 	os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
112 }
113 
114 static void
115 compression_changed_cb(void *arg, uint64_t newval)
116 {
117 	objset_t *os = arg;
118 
119 	/*
120 	 * Inheritance and range checking should have been done by now.
121 	 */
122 	ASSERT(newval != ZIO_COMPRESS_INHERIT);
123 
124 	os->os_compress = zio_compress_select(newval, ZIO_COMPRESS_ON_VALUE);
125 }
126 
127 static void
128 copies_changed_cb(void *arg, uint64_t newval)
129 {
130 	objset_t *os = arg;
131 
132 	/*
133 	 * Inheritance and range checking should have been done by now.
134 	 */
135 	ASSERT(newval > 0);
136 	ASSERT(newval <= spa_max_replication(os->os_spa));
137 
138 	os->os_copies = newval;
139 }
140 
141 static void
142 dedup_changed_cb(void *arg, uint64_t newval)
143 {
144 	objset_t *os = arg;
145 	spa_t *spa = os->os_spa;
146 	enum zio_checksum checksum;
147 
148 	/*
149 	 * Inheritance should have been done by now.
150 	 */
151 	ASSERT(newval != ZIO_CHECKSUM_INHERIT);
152 
153 	checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
154 
155 	os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
156 	os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
157 }
158 
159 static void
160 primary_cache_changed_cb(void *arg, uint64_t newval)
161 {
162 	objset_t *os = arg;
163 
164 	/*
165 	 * Inheritance and range checking should have been done by now.
166 	 */
167 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
168 	    newval == ZFS_CACHE_METADATA);
169 
170 	os->os_primary_cache = newval;
171 }
172 
173 static void
174 secondary_cache_changed_cb(void *arg, uint64_t newval)
175 {
176 	objset_t *os = arg;
177 
178 	/*
179 	 * Inheritance and range checking should have been done by now.
180 	 */
181 	ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
182 	    newval == ZFS_CACHE_METADATA);
183 
184 	os->os_secondary_cache = newval;
185 }
186 
187 static void
188 logbias_changed_cb(void *arg, uint64_t newval)
189 {
190 	objset_t *os = arg;
191 
192 	ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
193 	    newval == ZFS_LOGBIAS_THROUGHPUT);
194 	os->os_logbias = newval;
195 	if (os->os_zil)
196 		zil_set_logbias(os->os_zil, newval);
197 }
198 
199 void
200 dmu_objset_byteswap(void *buf, size_t size)
201 {
202 	objset_phys_t *osp = buf;
203 
204 	ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t));
205 	dnode_byteswap(&osp->os_meta_dnode);
206 	byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
207 	osp->os_type = BSWAP_64(osp->os_type);
208 	osp->os_flags = BSWAP_64(osp->os_flags);
209 	if (size == sizeof (objset_phys_t)) {
210 		dnode_byteswap(&osp->os_userused_dnode);
211 		dnode_byteswap(&osp->os_groupused_dnode);
212 	}
213 }
214 
215 int
216 dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
217     objset_t **osp)
218 {
219 	objset_t *os;
220 	int i, err;
221 
222 	ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
223 
224 	os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
225 	os->os_dsl_dataset = ds;
226 	os->os_spa = spa;
227 	os->os_rootbp = bp;
228 	if (!BP_IS_HOLE(os->os_rootbp)) {
229 		uint32_t aflags = ARC_WAIT;
230 		zbookmark_t zb;
231 		SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
232 		    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
233 
234 		if (DMU_OS_IS_L2CACHEABLE(os))
235 			aflags |= ARC_L2CACHE;
236 
237 		dprintf_bp(os->os_rootbp, "reading %s", "");
238 		/*
239 		 * NB: when bprewrite scrub can change the bp,
240 		 * and this is called from dmu_objset_open_ds_os, the bp
241 		 * could change, and we'll need a lock.
242 		 */
243 		err = arc_read_nolock(NULL, spa, os->os_rootbp,
244 		    arc_getbuf_func, &os->os_phys_buf,
245 		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
246 		if (err) {
247 			kmem_free(os, sizeof (objset_t));
248 			/* convert checksum errors into IO errors */
249 			if (err == ECKSUM)
250 				err = EIO;
251 			return (err);
252 		}
253 
254 		/* Increase the blocksize if we are permitted. */
255 		if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
256 		    arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
257 			arc_buf_t *buf = arc_buf_alloc(spa,
258 			    sizeof (objset_phys_t), &os->os_phys_buf,
259 			    ARC_BUFC_METADATA);
260 			bzero(buf->b_data, sizeof (objset_phys_t));
261 			bcopy(os->os_phys_buf->b_data, buf->b_data,
262 			    arc_buf_size(os->os_phys_buf));
263 			(void) arc_buf_remove_ref(os->os_phys_buf,
264 			    &os->os_phys_buf);
265 			os->os_phys_buf = buf;
266 		}
267 
268 		os->os_phys = os->os_phys_buf->b_data;
269 		os->os_flags = os->os_phys->os_flags;
270 	} else {
271 		int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
272 		    sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
273 		os->os_phys_buf = arc_buf_alloc(spa, size,
274 		    &os->os_phys_buf, ARC_BUFC_METADATA);
275 		os->os_phys = os->os_phys_buf->b_data;
276 		bzero(os->os_phys, size);
277 	}
278 
279 	/*
280 	 * Note: the changed_cb will be called once before the register
281 	 * func returns, thus changing the checksum/compression from the
282 	 * default (fletcher2/off).  Snapshots don't need to know about
283 	 * checksum/compression/copies.
284 	 */
285 	if (ds) {
286 		err = dsl_prop_register(ds, "primarycache",
287 		    primary_cache_changed_cb, os);
288 		if (err == 0)
289 			err = dsl_prop_register(ds, "secondarycache",
290 			    secondary_cache_changed_cb, os);
291 		if (!dsl_dataset_is_snapshot(ds)) {
292 			if (err == 0)
293 				err = dsl_prop_register(ds, "checksum",
294 				    checksum_changed_cb, os);
295 			if (err == 0)
296 				err = dsl_prop_register(ds, "compression",
297 				    compression_changed_cb, os);
298 			if (err == 0)
299 				err = dsl_prop_register(ds, "copies",
300 				    copies_changed_cb, os);
301 			if (err == 0)
302 				err = dsl_prop_register(ds, "dedup",
303 				    dedup_changed_cb, os);
304 			if (err == 0)
305 				err = dsl_prop_register(ds, "logbias",
306 				    logbias_changed_cb, os);
307 		}
308 		if (err) {
309 			VERIFY(arc_buf_remove_ref(os->os_phys_buf,
310 			    &os->os_phys_buf) == 1);
311 			kmem_free(os, sizeof (objset_t));
312 			return (err);
313 		}
314 	} else if (ds == NULL) {
315 		/* It's the meta-objset. */
316 		os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
317 		os->os_compress = ZIO_COMPRESS_LZJB;
318 		os->os_copies = spa_max_replication(spa);
319 		os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
320 		os->os_dedup_verify = 0;
321 		os->os_logbias = 0;
322 		os->os_primary_cache = ZFS_CACHE_ALL;
323 		os->os_secondary_cache = ZFS_CACHE_ALL;
324 	}
325 
326 	os->os_zil_header = os->os_phys->os_zil_header;
327 	os->os_zil = zil_alloc(os, &os->os_zil_header);
328 
329 	for (i = 0; i < TXG_SIZE; i++) {
330 		list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
331 		    offsetof(dnode_t, dn_dirty_link[i]));
332 		list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
333 		    offsetof(dnode_t, dn_dirty_link[i]));
334 	}
335 	list_create(&os->os_dnodes, sizeof (dnode_t),
336 	    offsetof(dnode_t, dn_link));
337 	list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
338 	    offsetof(dmu_buf_impl_t, db_link));
339 
340 	mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
341 	mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
342 	mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
343 
344 	os->os_meta_dnode = dnode_special_open(os,
345 	    &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT);
346 	if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
347 		os->os_userused_dnode = dnode_special_open(os,
348 		    &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT);
349 		os->os_groupused_dnode = dnode_special_open(os,
350 		    &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT);
351 	}
352 
353 	/*
354 	 * We should be the only thread trying to do this because we
355 	 * have ds_opening_lock
356 	 */
357 	if (ds) {
358 		mutex_enter(&ds->ds_lock);
359 		ASSERT(ds->ds_objset == NULL);
360 		ds->ds_objset = os;
361 		mutex_exit(&ds->ds_lock);
362 	}
363 
364 	*osp = os;
365 	return (0);
366 }
367 
368 int
369 dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
370 {
371 	int err = 0;
372 
373 	mutex_enter(&ds->ds_opening_lock);
374 	*osp = ds->ds_objset;
375 	if (*osp == NULL) {
376 		err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
377 		    ds, &ds->ds_phys->ds_bp, osp);
378 	}
379 	mutex_exit(&ds->ds_opening_lock);
380 	return (err);
381 }
382 
383 /* called from zpl */
384 int
385 dmu_objset_hold(const char *name, void *tag, objset_t **osp)
386 {
387 	dsl_dataset_t *ds;
388 	int err;
389 
390 	err = dsl_dataset_hold(name, tag, &ds);
391 	if (err)
392 		return (err);
393 
394 	err = dmu_objset_from_ds(ds, osp);
395 	if (err)
396 		dsl_dataset_rele(ds, tag);
397 
398 	return (err);
399 }
400 
401 /* called from zpl */
402 int
403 dmu_objset_own(const char *name, dmu_objset_type_t type,
404     boolean_t readonly, void *tag, objset_t **osp)
405 {
406 	dsl_dataset_t *ds;
407 	int err;
408 
409 	err = dsl_dataset_own(name, B_FALSE, tag, &ds);
410 	if (err)
411 		return (err);
412 
413 	err = dmu_objset_from_ds(ds, osp);
414 	if (err) {
415 		dsl_dataset_disown(ds, tag);
416 	} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
417 		dmu_objset_disown(*osp, tag);
418 		return (EINVAL);
419 	} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
420 		dmu_objset_disown(*osp, tag);
421 		return (EROFS);
422 	}
423 	return (err);
424 }
425 
426 void
427 dmu_objset_rele(objset_t *os, void *tag)
428 {
429 	dsl_dataset_rele(os->os_dsl_dataset, tag);
430 }
431 
432 void
433 dmu_objset_disown(objset_t *os, void *tag)
434 {
435 	dsl_dataset_disown(os->os_dsl_dataset, tag);
436 }
437 
438 int
439 dmu_objset_evict_dbufs(objset_t *os)
440 {
441 	dnode_t *dn;
442 
443 	mutex_enter(&os->os_lock);
444 
445 	/* process the mdn last, since the other dnodes have holds on it */
446 	list_remove(&os->os_dnodes, os->os_meta_dnode);
447 	list_insert_tail(&os->os_dnodes, os->os_meta_dnode);
448 
449 	/*
450 	 * Find the first dnode with holds.  We have to do this dance
451 	 * because dnode_add_ref() only works if you already have a
452 	 * hold.  If there are no holds then it has no dbufs so OK to
453 	 * skip.
454 	 */
455 	for (dn = list_head(&os->os_dnodes);
456 	    dn && !dnode_add_ref(dn, FTAG);
457 	    dn = list_next(&os->os_dnodes, dn))
458 		continue;
459 
460 	while (dn) {
461 		dnode_t *next_dn = dn;
462 
463 		do {
464 			next_dn = list_next(&os->os_dnodes, next_dn);
465 		} while (next_dn && !dnode_add_ref(next_dn, FTAG));
466 
467 		mutex_exit(&os->os_lock);
468 		dnode_evict_dbufs(dn);
469 		dnode_rele(dn, FTAG);
470 		mutex_enter(&os->os_lock);
471 		dn = next_dn;
472 	}
473 	mutex_exit(&os->os_lock);
474 	return (list_head(&os->os_dnodes) != os->os_meta_dnode);
475 }
476 
477 void
478 dmu_objset_evict(objset_t *os)
479 {
480 	dsl_dataset_t *ds = os->os_dsl_dataset;
481 
482 	for (int t = 0; t < TXG_SIZE; t++)
483 		ASSERT(!dmu_objset_is_dirty(os, t));
484 
485 	if (ds) {
486 		if (!dsl_dataset_is_snapshot(ds)) {
487 			VERIFY(0 == dsl_prop_unregister(ds, "checksum",
488 			    checksum_changed_cb, os));
489 			VERIFY(0 == dsl_prop_unregister(ds, "compression",
490 			    compression_changed_cb, os));
491 			VERIFY(0 == dsl_prop_unregister(ds, "copies",
492 			    copies_changed_cb, os));
493 			VERIFY(0 == dsl_prop_unregister(ds, "dedup",
494 			    dedup_changed_cb, os));
495 			VERIFY(0 == dsl_prop_unregister(ds, "logbias",
496 			    logbias_changed_cb, os));
497 		}
498 		VERIFY(0 == dsl_prop_unregister(ds, "primarycache",
499 		    primary_cache_changed_cb, os));
500 		VERIFY(0 == dsl_prop_unregister(ds, "secondarycache",
501 		    secondary_cache_changed_cb, os));
502 	}
503 
504 	if (os->os_sa)
505 		sa_tear_down(os);
506 
507 	/*
508 	 * We should need only a single pass over the dnode list, since
509 	 * nothing can be added to the list at this point.
510 	 */
511 	(void) dmu_objset_evict_dbufs(os);
512 
513 	dnode_special_close(os->os_meta_dnode);
514 	if (os->os_userused_dnode) {
515 		dnode_special_close(os->os_userused_dnode);
516 		dnode_special_close(os->os_groupused_dnode);
517 	}
518 	zil_free(os->os_zil);
519 
520 	ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
521 
522 	VERIFY(arc_buf_remove_ref(os->os_phys_buf, &os->os_phys_buf) == 1);
523 	mutex_destroy(&os->os_lock);
524 	mutex_destroy(&os->os_obj_lock);
525 	mutex_destroy(&os->os_user_ptr_lock);
526 	kmem_free(os, sizeof (objset_t));
527 }
528 
529 timestruc_t
530 dmu_objset_snap_cmtime(objset_t *os)
531 {
532 	return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
533 }
534 
535 /* called from dsl for meta-objset */
536 objset_t *
537 dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
538     dmu_objset_type_t type, dmu_tx_t *tx)
539 {
540 	objset_t *os;
541 	dnode_t *mdn;
542 
543 	ASSERT(dmu_tx_is_syncing(tx));
544 	if (ds)
545 		mutex_enter(&ds->ds_opening_lock);
546 	VERIFY(0 == dmu_objset_open_impl(spa, ds, bp, &os));
547 	if (ds)
548 		mutex_exit(&ds->ds_opening_lock);
549 	mdn = os->os_meta_dnode;
550 
551 	dnode_allocate(mdn, DMU_OT_DNODE, 1 << DNODE_BLOCK_SHIFT,
552 	    DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, tx);
553 
554 	/*
555 	 * We don't want to have to increase the meta-dnode's nlevels
556 	 * later, because then we could do it in quescing context while
557 	 * we are also accessing it in open context.
558 	 *
559 	 * This precaution is not necessary for the MOS (ds == NULL),
560 	 * because the MOS is only updated in syncing context.
561 	 * This is most fortunate: the MOS is the only objset that
562 	 * needs to be synced multiple times as spa_sync() iterates
563 	 * to convergence, so minimizing its dn_nlevels matters.
564 	 */
565 	if (ds != NULL) {
566 		int levels = 1;
567 
568 		/*
569 		 * Determine the number of levels necessary for the meta-dnode
570 		 * to contain DN_MAX_OBJECT dnodes.
571 		 */
572 		while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift +
573 		    (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
574 		    DN_MAX_OBJECT * sizeof (dnode_phys_t))
575 			levels++;
576 
577 		mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
578 		    mdn->dn_nlevels = levels;
579 	}
580 
581 	ASSERT(type != DMU_OST_NONE);
582 	ASSERT(type != DMU_OST_ANY);
583 	ASSERT(type < DMU_OST_NUMTYPES);
584 	os->os_phys->os_type = type;
585 	if (dmu_objset_userused_enabled(os)) {
586 		os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
587 		os->os_flags = os->os_phys->os_flags;
588 	}
589 
590 	dsl_dataset_dirty(ds, tx);
591 
592 	return (os);
593 }
594 
595 struct oscarg {
596 	void (*userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
597 	void *userarg;
598 	dsl_dataset_t *clone_origin;
599 	const char *lastname;
600 	dmu_objset_type_t type;
601 	uint64_t flags;
602 };
603 
604 /*ARGSUSED*/
605 static int
606 dmu_objset_create_check(void *arg1, void *arg2, dmu_tx_t *tx)
607 {
608 	dsl_dir_t *dd = arg1;
609 	struct oscarg *oa = arg2;
610 	objset_t *mos = dd->dd_pool->dp_meta_objset;
611 	int err;
612 	uint64_t ddobj;
613 
614 	err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
615 	    oa->lastname, sizeof (uint64_t), 1, &ddobj);
616 	if (err != ENOENT)
617 		return (err ? err : EEXIST);
618 
619 	if (oa->clone_origin != NULL) {
620 		/* You can't clone across pools. */
621 		if (oa->clone_origin->ds_dir->dd_pool != dd->dd_pool)
622 			return (EXDEV);
623 
624 		/* You can only clone snapshots, not the head datasets. */
625 		if (!dsl_dataset_is_snapshot(oa->clone_origin))
626 			return (EINVAL);
627 	}
628 
629 	return (0);
630 }
631 
632 static void
633 dmu_objset_create_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
634 {
635 	dsl_dir_t *dd = arg1;
636 	struct oscarg *oa = arg2;
637 	uint64_t dsobj;
638 
639 	ASSERT(dmu_tx_is_syncing(tx));
640 
641 	dsobj = dsl_dataset_create_sync(dd, oa->lastname,
642 	    oa->clone_origin, oa->flags, cr, tx);
643 
644 	if (oa->clone_origin == NULL) {
645 		dsl_dataset_t *ds;
646 		blkptr_t *bp;
647 		objset_t *os;
648 
649 		VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, dsobj,
650 		    FTAG, &ds));
651 		bp = dsl_dataset_get_blkptr(ds);
652 		ASSERT(BP_IS_HOLE(bp));
653 
654 		os = dmu_objset_create_impl(dsl_dataset_get_spa(ds),
655 		    ds, bp, oa->type, tx);
656 
657 		if (oa->userfunc)
658 			oa->userfunc(os, oa->userarg, cr, tx);
659 		dsl_dataset_rele(ds, FTAG);
660 	}
661 
662 	spa_history_internal_log(LOG_DS_CREATE, dd->dd_pool->dp_spa,
663 	    tx, cr, "dataset = %llu", dsobj);
664 }
665 
666 int
667 dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
668     void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg)
669 {
670 	dsl_dir_t *pdd;
671 	const char *tail;
672 	int err = 0;
673 	struct oscarg oa = { 0 };
674 
675 	ASSERT(strchr(name, '@') == NULL);
676 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
677 	if (err)
678 		return (err);
679 	if (tail == NULL) {
680 		dsl_dir_close(pdd, FTAG);
681 		return (EEXIST);
682 	}
683 
684 	oa.userfunc = func;
685 	oa.userarg = arg;
686 	oa.lastname = tail;
687 	oa.type = type;
688 	oa.flags = flags;
689 
690 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
691 	    dmu_objset_create_sync, pdd, &oa, 5);
692 	dsl_dir_close(pdd, FTAG);
693 	return (err);
694 }
695 
696 int
697 dmu_objset_clone(const char *name, dsl_dataset_t *clone_origin, uint64_t flags)
698 {
699 	dsl_dir_t *pdd;
700 	const char *tail;
701 	int err = 0;
702 	struct oscarg oa = { 0 };
703 
704 	ASSERT(strchr(name, '@') == NULL);
705 	err = dsl_dir_open(name, FTAG, &pdd, &tail);
706 	if (err)
707 		return (err);
708 	if (tail == NULL) {
709 		dsl_dir_close(pdd, FTAG);
710 		return (EEXIST);
711 	}
712 
713 	oa.lastname = tail;
714 	oa.clone_origin = clone_origin;
715 	oa.flags = flags;
716 
717 	err = dsl_sync_task_do(pdd->dd_pool, dmu_objset_create_check,
718 	    dmu_objset_create_sync, pdd, &oa, 5);
719 	dsl_dir_close(pdd, FTAG);
720 	return (err);
721 }
722 
723 int
724 dmu_objset_destroy(const char *name, boolean_t defer)
725 {
726 	dsl_dataset_t *ds;
727 	int error;
728 
729 	/*
730 	 * dsl_dataset_destroy() can free any claimed-but-unplayed
731 	 * intent log, but if there is an active log, it has blocks that
732 	 * are allocated, but may not yet be reflected in the on-disk
733 	 * structure.  Only the ZIL knows how to free them, so we have
734 	 * to call into it here.
735 	 */
736 	error = dsl_dataset_own(name, B_TRUE, FTAG, &ds);
737 	if (error == 0) {
738 		objset_t *os;
739 		if (dmu_objset_from_ds(ds, &os) == 0)
740 			zil_destroy(dmu_objset_zil(os), B_FALSE);
741 		error = dsl_dataset_destroy(ds, FTAG, defer);
742 		/* dsl_dataset_destroy() closes the ds. */
743 	}
744 
745 	return (error);
746 }
747 
748 struct snaparg {
749 	dsl_sync_task_group_t *dstg;
750 	char *snapname;
751 	char failed[MAXPATHLEN];
752 	boolean_t recursive;
753 	nvlist_t *props;
754 };
755 
756 static int
757 snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
758 {
759 	objset_t *os = arg1;
760 	struct snaparg *sn = arg2;
761 
762 	/* The props have already been checked by zfs_check_userprops(). */
763 
764 	return (dsl_dataset_snapshot_check(os->os_dsl_dataset,
765 	    sn->snapname, tx));
766 }
767 
768 static void
769 snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
770 {
771 	objset_t *os = arg1;
772 	dsl_dataset_t *ds = os->os_dsl_dataset;
773 	struct snaparg *sn = arg2;
774 
775 	dsl_dataset_snapshot_sync(ds, sn->snapname, cr, tx);
776 
777 	if (sn->props) {
778 		dsl_props_arg_t pa;
779 		pa.pa_props = sn->props;
780 		pa.pa_source = ZPROP_SRC_LOCAL;
781 		dsl_props_set_sync(ds->ds_prev, &pa, cr, tx);
782 	}
783 }
784 
785 static int
786 dmu_objset_snapshot_one(const char *name, void *arg)
787 {
788 	struct snaparg *sn = arg;
789 	objset_t *os;
790 	int err;
791 	char *cp;
792 
793 	/*
794 	 * If the objset starts with a '%', then ignore it unless it was
795 	 * explicitly named (ie, not recursive).  These hidden datasets
796 	 * are always inconsistent, and by not opening them here, we can
797 	 * avoid a race with dsl_dir_destroy_check().
798 	 */
799 	cp = strrchr(name, '/');
800 	if (cp && cp[1] == '%' && sn->recursive)
801 		return (0);
802 
803 	(void) strcpy(sn->failed, name);
804 
805 	/*
806 	 * Check permissions if we are doing a recursive snapshot.  The
807 	 * permission checks for the starting dataset have already been
808 	 * performed in zfs_secpolicy_snapshot()
809 	 */
810 	if (sn->recursive && (err = zfs_secpolicy_snapshot_perms(name, CRED())))
811 		return (err);
812 
813 	err = dmu_objset_hold(name, sn, &os);
814 	if (err != 0)
815 		return (err);
816 
817 	/*
818 	 * If the objset is in an inconsistent state (eg, in the process
819 	 * of being destroyed), don't snapshot it.  As with %hidden
820 	 * datasets, we return EBUSY if this name was explicitly
821 	 * requested (ie, not recursive), and otherwise ignore it.
822 	 */
823 	if (os->os_dsl_dataset->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) {
824 		dmu_objset_rele(os, sn);
825 		return (sn->recursive ? 0 : EBUSY);
826 	}
827 
828 	/*
829 	 * NB: we need to wait for all in-flight changes to get to disk,
830 	 * so that we snapshot those changes.  zil_suspend does this as
831 	 * a side effect.
832 	 */
833 	err = zil_suspend(dmu_objset_zil(os));
834 	if (err == 0) {
835 		dsl_sync_task_create(sn->dstg, snapshot_check,
836 		    snapshot_sync, os, sn, 3);
837 	} else {
838 		dmu_objset_rele(os, sn);
839 	}
840 
841 	return (err);
842 }
843 
844 int
845 dmu_objset_snapshot(char *fsname, char *snapname,
846     nvlist_t *props, boolean_t recursive)
847 {
848 	dsl_sync_task_t *dst;
849 	struct snaparg sn;
850 	spa_t *spa;
851 	int err;
852 
853 	(void) strcpy(sn.failed, fsname);
854 
855 	err = spa_open(fsname, &spa, FTAG);
856 	if (err)
857 		return (err);
858 
859 	sn.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
860 	sn.snapname = snapname;
861 	sn.props = props;
862 	sn.recursive = recursive;
863 
864 	if (recursive) {
865 		err = dmu_objset_find(fsname,
866 		    dmu_objset_snapshot_one, &sn, DS_FIND_CHILDREN);
867 	} else {
868 		err = dmu_objset_snapshot_one(fsname, &sn);
869 	}
870 
871 	if (err == 0)
872 		err = dsl_sync_task_group_wait(sn.dstg);
873 
874 	for (dst = list_head(&sn.dstg->dstg_tasks); dst;
875 	    dst = list_next(&sn.dstg->dstg_tasks, dst)) {
876 		objset_t *os = dst->dst_arg1;
877 		dsl_dataset_t *ds = os->os_dsl_dataset;
878 		if (dst->dst_err)
879 			dsl_dataset_name(ds, sn.failed);
880 		zil_resume(dmu_objset_zil(os));
881 		dmu_objset_rele(os, &sn);
882 	}
883 
884 	if (err)
885 		(void) strcpy(fsname, sn.failed);
886 	dsl_sync_task_group_destroy(sn.dstg);
887 	spa_close(spa, FTAG);
888 	return (err);
889 }
890 
891 static void
892 dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
893 {
894 	dnode_t *dn;
895 
896 	while (dn = list_head(list)) {
897 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
898 		ASSERT(dn->dn_dbuf->db_data_pending);
899 		/*
900 		 * Initialize dn_zio outside dnode_sync() because the
901 		 * meta-dnode needs to set it ouside dnode_sync().
902 		 */
903 		dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
904 		ASSERT(dn->dn_zio);
905 
906 		ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
907 		list_remove(list, dn);
908 
909 		if (newlist) {
910 			(void) dnode_add_ref(dn, newlist);
911 			list_insert_tail(newlist, dn);
912 		}
913 
914 		dnode_sync(dn, tx);
915 	}
916 }
917 
918 /* ARGSUSED */
919 static void
920 dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
921 {
922 	blkptr_t *bp = zio->io_bp;
923 	objset_t *os = arg;
924 	dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
925 
926 	ASSERT(bp == os->os_rootbp);
927 	ASSERT(BP_GET_TYPE(bp) == DMU_OT_OBJSET);
928 	ASSERT(BP_GET_LEVEL(bp) == 0);
929 
930 	/*
931 	 * Update rootbp fill count: it should be the number of objects
932 	 * allocated in the object set (not counting the "special"
933 	 * objects that are stored in the objset_phys_t -- the meta
934 	 * dnode and user/group accounting objects).
935 	 */
936 	bp->blk_fill = 0;
937 	for (int i = 0; i < dnp->dn_nblkptr; i++)
938 		bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
939 }
940 
941 /* ARGSUSED */
942 static void
943 dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
944 {
945 	blkptr_t *bp = zio->io_bp;
946 	blkptr_t *bp_orig = &zio->io_bp_orig;
947 	objset_t *os = arg;
948 
949 	if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
950 		ASSERT(BP_EQUAL(bp, bp_orig));
951 	} else {
952 		dsl_dataset_t *ds = os->os_dsl_dataset;
953 		dmu_tx_t *tx = os->os_synctx;
954 
955 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
956 		dsl_dataset_block_born(ds, bp, tx);
957 	}
958 }
959 
960 /* called from dsl */
961 void
962 dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
963 {
964 	int txgoff;
965 	zbookmark_t zb;
966 	zio_prop_t zp;
967 	zio_t *zio;
968 	list_t *list;
969 	list_t *newlist = NULL;
970 	dbuf_dirty_record_t *dr;
971 
972 	dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
973 
974 	ASSERT(dmu_tx_is_syncing(tx));
975 	/* XXX the write_done callback should really give us the tx... */
976 	os->os_synctx = tx;
977 
978 	if (os->os_dsl_dataset == NULL) {
979 		/*
980 		 * This is the MOS.  If we have upgraded,
981 		 * spa_max_replication() could change, so reset
982 		 * os_copies here.
983 		 */
984 		os->os_copies = spa_max_replication(os->os_spa);
985 	}
986 
987 	/*
988 	 * Create the root block IO
989 	 */
990 	arc_release(os->os_phys_buf, &os->os_phys_buf);
991 
992 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
993 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
994 	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
995 
996 	dmu_write_policy(os, NULL, 0, 0, &zp);
997 
998 	zio = arc_write(pio, os->os_spa, tx->tx_txg,
999 	    os->os_rootbp, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp,
1000 	    dmu_objset_write_ready, dmu_objset_write_done, os,
1001 	    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
1002 
1003 	/*
1004 	 * Sync special dnodes - the parent IO for the sync is the root block
1005 	 */
1006 	os->os_meta_dnode->dn_zio = zio;
1007 	dnode_sync(os->os_meta_dnode, tx);
1008 
1009 	os->os_phys->os_flags = os->os_flags;
1010 
1011 	if (os->os_userused_dnode &&
1012 	    os->os_userused_dnode->dn_type != DMU_OT_NONE) {
1013 		os->os_userused_dnode->dn_zio = zio;
1014 		dnode_sync(os->os_userused_dnode, tx);
1015 		os->os_groupused_dnode->dn_zio = zio;
1016 		dnode_sync(os->os_groupused_dnode, tx);
1017 	}
1018 
1019 	txgoff = tx->tx_txg & TXG_MASK;
1020 
1021 	if (dmu_objset_userused_enabled(os)) {
1022 		newlist = &os->os_synced_dnodes;
1023 		/*
1024 		 * We must create the list here because it uses the
1025 		 * dn_dirty_link[] of this txg.
1026 		 */
1027 		list_create(newlist, sizeof (dnode_t),
1028 		    offsetof(dnode_t, dn_dirty_link[txgoff]));
1029 	}
1030 
1031 	dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx);
1032 	dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
1033 
1034 	list = &os->os_meta_dnode->dn_dirty_records[txgoff];
1035 	while (dr = list_head(list)) {
1036 		ASSERT(dr->dr_dbuf->db_level == 0);
1037 		list_remove(list, dr);
1038 		if (dr->dr_zio)
1039 			zio_nowait(dr->dr_zio);
1040 	}
1041 	/*
1042 	 * Free intent log blocks up to this tx.
1043 	 */
1044 	zil_sync(os->os_zil, tx);
1045 	os->os_phys->os_zil_header = os->os_zil_header;
1046 	zio_nowait(zio);
1047 }
1048 
1049 boolean_t
1050 dmu_objset_is_dirty(objset_t *os, uint64_t txg)
1051 {
1052 	return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) ||
1053 	    !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK]));
1054 }
1055 
1056 static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
1057 
1058 void
1059 dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
1060 {
1061 	used_cbs[ost] = cb;
1062 }
1063 
1064 boolean_t
1065 dmu_objset_userused_enabled(objset_t *os)
1066 {
1067 	return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
1068 	    used_cbs[os->os_phys->os_type] &&
1069 	    os->os_userused_dnode);
1070 }
1071 
1072 static void
1073 do_userquota_update(objset_t *os, uint64_t used, uint64_t flags,
1074     uint64_t user, uint64_t group, boolean_t subtract, dmu_tx_t *tx)
1075 {
1076 	if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) {
1077 		int64_t delta = DNODE_SIZE + used;
1078 		if (subtract)
1079 			delta = -delta;
1080 		VERIFY3U(0, ==, zap_increment_int(os, DMU_USERUSED_OBJECT,
1081 		    user, delta, tx));
1082 		VERIFY3U(0, ==, zap_increment_int(os, DMU_GROUPUSED_OBJECT,
1083 		    group, delta, tx));
1084 	}
1085 }
1086 
1087 void
1088 dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
1089 {
1090 	dnode_t *dn;
1091 	list_t *list = &os->os_synced_dnodes;
1092 
1093 	ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
1094 
1095 	while (dn = list_head(list)) {
1096 		ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
1097 		ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
1098 		    dn->dn_phys->dn_flags &
1099 		    DNODE_FLAG_USERUSED_ACCOUNTED);
1100 
1101 		/* Allocate the user/groupused objects if necessary. */
1102 		if (os->os_userused_dnode->dn_type == DMU_OT_NONE) {
1103 			VERIFY(0 == zap_create_claim(os,
1104 			    DMU_USERUSED_OBJECT,
1105 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1106 			VERIFY(0 == zap_create_claim(os,
1107 			    DMU_GROUPUSED_OBJECT,
1108 			    DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
1109 		}
1110 
1111 		/*
1112 		 * We intentionally modify the zap object even if the
1113 		 * net delta is zero.  Otherwise
1114 		 * the block of the zap obj could be shared between
1115 		 * datasets but need to be different between them after
1116 		 * a bprewrite.
1117 		 */
1118 
1119 		/*
1120 		 * The mutex is needed here for interlock with dnode_allocate.
1121 		 */
1122 		mutex_enter(&dn->dn_mtx);
1123 		ASSERT(dn->dn_id_flags);
1124 		if (dn->dn_id_flags & DN_ID_OLD_EXIST)  {
1125 			do_userquota_update(os, dn->dn_oldused, dn->dn_oldflags,
1126 			    dn->dn_olduid, dn->dn_oldgid, B_TRUE, tx);
1127 		}
1128 		if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1129 			do_userquota_update(os, DN_USED_BYTES(dn->dn_phys),
1130 			    dn->dn_phys->dn_flags,  dn->dn_newuid,
1131 			    dn->dn_newgid, B_FALSE, tx);
1132 		}
1133 
1134 		dn->dn_oldused = 0;
1135 		dn->dn_oldflags = 0;
1136 		if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
1137 			dn->dn_olduid = dn->dn_newuid;
1138 			dn->dn_oldgid = dn->dn_newgid;
1139 			dn->dn_id_flags |= DN_ID_OLD_EXIST;
1140 			if (dn->dn_bonuslen == 0)
1141 				dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1142 			else
1143 				dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1144 		}
1145 		dn->dn_id_flags &= ~(DN_ID_NEW_EXIST|DN_ID_SYNC);
1146 		mutex_exit(&dn->dn_mtx);
1147 
1148 		list_remove(list, dn);
1149 		dnode_rele(dn, list);
1150 	}
1151 }
1152 
1153 void
1154 dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before)
1155 {
1156 	objset_t *os = dn->dn_objset;
1157 	void *data = NULL;
1158 	dmu_buf_t *spilldb = NULL;
1159 	uint64_t *user, *group;
1160 	int flags = dn->dn_id_flags;
1161 	int error;
1162 
1163 	if (!dmu_objset_userused_enabled(dn->dn_objset))
1164 		return;
1165 
1166 	if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
1167 	    DN_ID_CHKED_SPILL)))
1168 		return;
1169 
1170 	if (before && dn->dn_bonuslen != 0)
1171 		data = DN_BONUS(dn->dn_phys);
1172 	else if (!before && dn->dn_bonuslen != 0)
1173 		data = dn->dn_bonus != NULL ?
1174 		    dn->dn_bonus->db.db_data : DN_BONUS(dn->dn_phys);
1175 	else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
1176 			int rf = 0;
1177 
1178 			if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
1179 				rf |= DB_RF_HAVESTRUCT;
1180 			error = dmu_spill_hold_by_dnode(dn, rf, FTAG, &spilldb);
1181 			ASSERT(error == 0);
1182 			data = spilldb->db_data;
1183 	} else {
1184 		mutex_enter(&dn->dn_mtx);
1185 		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1186 		mutex_exit(&dn->dn_mtx);
1187 		return;
1188 	}
1189 
1190 	if (before) {
1191 		user = &dn->dn_olduid;
1192 		group = &dn->dn_oldgid;
1193 	} else {
1194 		user = &dn->dn_newuid;
1195 		group = &dn->dn_newgid;
1196 	}
1197 
1198 	ASSERT(data);
1199 	error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
1200 	    user, group);
1201 
1202 	mutex_enter(&dn->dn_mtx);
1203 	if (error == 0 && before)
1204 		dn->dn_id_flags |= DN_ID_OLD_EXIST;
1205 	if (error == 0 && !before)
1206 		dn->dn_id_flags |= DN_ID_NEW_EXIST;
1207 
1208 	if (spilldb) {
1209 		dn->dn_id_flags |= DN_ID_CHKED_SPILL;
1210 	} else {
1211 		dn->dn_id_flags |= DN_ID_CHKED_BONUS;
1212 	}
1213 	mutex_exit(&dn->dn_mtx);
1214 	if (spilldb)
1215 		dmu_buf_rele(spilldb, FTAG);
1216 }
1217 
1218 boolean_t
1219 dmu_objset_userspace_present(objset_t *os)
1220 {
1221 	return (os->os_phys->os_flags &
1222 	    OBJSET_FLAG_USERACCOUNTING_COMPLETE);
1223 }
1224 
1225 int
1226 dmu_objset_userspace_upgrade(objset_t *os)
1227 {
1228 	uint64_t obj;
1229 	int err = 0;
1230 
1231 	if (dmu_objset_userspace_present(os))
1232 		return (0);
1233 	if (!dmu_objset_userused_enabled(os))
1234 		return (ENOTSUP);
1235 	if (dmu_objset_is_snapshot(os))
1236 		return (EINVAL);
1237 
1238 	/*
1239 	 * We simply need to mark every object dirty, so that it will be
1240 	 * synced out and now accounted.  If this is called
1241 	 * concurrently, or if we already did some work before crashing,
1242 	 * that's fine, since we track each object's accounted state
1243 	 * independently.
1244 	 */
1245 
1246 	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
1247 		dmu_tx_t *tx;
1248 		dmu_buf_t *db;
1249 		int objerr;
1250 
1251 		if (issig(JUSTLOOKING) && issig(FORREAL))
1252 			return (EINTR);
1253 
1254 		objerr = dmu_bonus_hold(os, obj, FTAG, &db);
1255 		if (objerr)
1256 			continue;
1257 		tx = dmu_tx_create(os);
1258 		dmu_tx_hold_bonus(tx, obj);
1259 		objerr = dmu_tx_assign(tx, TXG_WAIT);
1260 		if (objerr) {
1261 			dmu_tx_abort(tx);
1262 			continue;
1263 		}
1264 		dmu_buf_will_dirty(db, tx);
1265 		dmu_buf_rele(db, FTAG);
1266 		dmu_tx_commit(tx);
1267 	}
1268 
1269 	os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
1270 	txg_wait_synced(dmu_objset_pool(os), 0);
1271 	return (0);
1272 }
1273 
1274 void
1275 dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
1276     uint64_t *usedobjsp, uint64_t *availobjsp)
1277 {
1278 	dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
1279 	    usedobjsp, availobjsp);
1280 }
1281 
1282 uint64_t
1283 dmu_objset_fsid_guid(objset_t *os)
1284 {
1285 	return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
1286 }
1287 
1288 void
1289 dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
1290 {
1291 	stat->dds_type = os->os_phys->os_type;
1292 	if (os->os_dsl_dataset)
1293 		dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
1294 }
1295 
1296 void
1297 dmu_objset_stats(objset_t *os, nvlist_t *nv)
1298 {
1299 	ASSERT(os->os_dsl_dataset ||
1300 	    os->os_phys->os_type == DMU_OST_META);
1301 
1302 	if (os->os_dsl_dataset != NULL)
1303 		dsl_dataset_stats(os->os_dsl_dataset, nv);
1304 
1305 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
1306 	    os->os_phys->os_type);
1307 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
1308 	    dmu_objset_userspace_present(os));
1309 }
1310 
1311 int
1312 dmu_objset_is_snapshot(objset_t *os)
1313 {
1314 	if (os->os_dsl_dataset != NULL)
1315 		return (dsl_dataset_is_snapshot(os->os_dsl_dataset));
1316 	else
1317 		return (B_FALSE);
1318 }
1319 
1320 int
1321 dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
1322     boolean_t *conflict)
1323 {
1324 	dsl_dataset_t *ds = os->os_dsl_dataset;
1325 	uint64_t ignored;
1326 
1327 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1328 		return (ENOENT);
1329 
1330 	return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
1331 	    ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
1332 	    real, maxlen, conflict));
1333 }
1334 
1335 int
1336 dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
1337     uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
1338 {
1339 	dsl_dataset_t *ds = os->os_dsl_dataset;
1340 	zap_cursor_t cursor;
1341 	zap_attribute_t attr;
1342 
1343 	if (ds->ds_phys->ds_snapnames_zapobj == 0)
1344 		return (ENOENT);
1345 
1346 	zap_cursor_init_serialized(&cursor,
1347 	    ds->ds_dir->dd_pool->dp_meta_objset,
1348 	    ds->ds_phys->ds_snapnames_zapobj, *offp);
1349 
1350 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1351 		zap_cursor_fini(&cursor);
1352 		return (ENOENT);
1353 	}
1354 
1355 	if (strlen(attr.za_name) + 1 > namelen) {
1356 		zap_cursor_fini(&cursor);
1357 		return (ENAMETOOLONG);
1358 	}
1359 
1360 	(void) strcpy(name, attr.za_name);
1361 	if (idp)
1362 		*idp = attr.za_first_integer;
1363 	if (case_conflict)
1364 		*case_conflict = attr.za_normalization_conflict;
1365 	zap_cursor_advance(&cursor);
1366 	*offp = zap_cursor_serialize(&cursor);
1367 	zap_cursor_fini(&cursor);
1368 
1369 	return (0);
1370 }
1371 
1372 int
1373 dmu_dir_list_next(objset_t *os, int namelen, char *name,
1374     uint64_t *idp, uint64_t *offp)
1375 {
1376 	dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
1377 	zap_cursor_t cursor;
1378 	zap_attribute_t attr;
1379 
1380 	/* there is no next dir on a snapshot! */
1381 	if (os->os_dsl_dataset->ds_object !=
1382 	    dd->dd_phys->dd_head_dataset_obj)
1383 		return (ENOENT);
1384 
1385 	zap_cursor_init_serialized(&cursor,
1386 	    dd->dd_pool->dp_meta_objset,
1387 	    dd->dd_phys->dd_child_dir_zapobj, *offp);
1388 
1389 	if (zap_cursor_retrieve(&cursor, &attr) != 0) {
1390 		zap_cursor_fini(&cursor);
1391 		return (ENOENT);
1392 	}
1393 
1394 	if (strlen(attr.za_name) + 1 > namelen) {
1395 		zap_cursor_fini(&cursor);
1396 		return (ENAMETOOLONG);
1397 	}
1398 
1399 	(void) strcpy(name, attr.za_name);
1400 	if (idp)
1401 		*idp = attr.za_first_integer;
1402 	zap_cursor_advance(&cursor);
1403 	*offp = zap_cursor_serialize(&cursor);
1404 	zap_cursor_fini(&cursor);
1405 
1406 	return (0);
1407 }
1408 
1409 struct findarg {
1410 	int (*func)(const char *, void *);
1411 	void *arg;
1412 };
1413 
1414 /* ARGSUSED */
1415 static int
1416 findfunc(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
1417 {
1418 	struct findarg *fa = arg;
1419 	return (fa->func(dsname, fa->arg));
1420 }
1421 
1422 /*
1423  * Find all objsets under name, and for each, call 'func(child_name, arg)'.
1424  * Perhaps change all callers to use dmu_objset_find_spa()?
1425  */
1426 int
1427 dmu_objset_find(char *name, int func(const char *, void *), void *arg,
1428     int flags)
1429 {
1430 	struct findarg fa;
1431 	fa.func = func;
1432 	fa.arg = arg;
1433 	return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags));
1434 }
1435 
1436 /*
1437  * Find all objsets under name, call func on each
1438  */
1439 int
1440 dmu_objset_find_spa(spa_t *spa, const char *name,
1441     int func(spa_t *, uint64_t, const char *, void *), void *arg, int flags)
1442 {
1443 	dsl_dir_t *dd;
1444 	dsl_pool_t *dp;
1445 	dsl_dataset_t *ds;
1446 	zap_cursor_t zc;
1447 	zap_attribute_t *attr;
1448 	char *child;
1449 	uint64_t thisobj;
1450 	int err;
1451 
1452 	if (name == NULL)
1453 		name = spa_name(spa);
1454 	err = dsl_dir_open_spa(spa, name, FTAG, &dd, NULL);
1455 	if (err)
1456 		return (err);
1457 
1458 	/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
1459 	if (dd->dd_myname[0] == '$') {
1460 		dsl_dir_close(dd, FTAG);
1461 		return (0);
1462 	}
1463 
1464 	thisobj = dd->dd_phys->dd_head_dataset_obj;
1465 	attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1466 	dp = dd->dd_pool;
1467 
1468 	/*
1469 	 * Iterate over all children.
1470 	 */
1471 	if (flags & DS_FIND_CHILDREN) {
1472 		for (zap_cursor_init(&zc, dp->dp_meta_objset,
1473 		    dd->dd_phys->dd_child_dir_zapobj);
1474 		    zap_cursor_retrieve(&zc, attr) == 0;
1475 		    (void) zap_cursor_advance(&zc)) {
1476 			ASSERT(attr->za_integer_length == sizeof (uint64_t));
1477 			ASSERT(attr->za_num_integers == 1);
1478 
1479 			child = kmem_asprintf("%s/%s", name, attr->za_name);
1480 			err = dmu_objset_find_spa(spa, child, func, arg, flags);
1481 			strfree(child);
1482 			if (err)
1483 				break;
1484 		}
1485 		zap_cursor_fini(&zc);
1486 
1487 		if (err) {
1488 			dsl_dir_close(dd, FTAG);
1489 			kmem_free(attr, sizeof (zap_attribute_t));
1490 			return (err);
1491 		}
1492 	}
1493 
1494 	/*
1495 	 * Iterate over all snapshots.
1496 	 */
1497 	if (flags & DS_FIND_SNAPSHOTS) {
1498 		if (!dsl_pool_sync_context(dp))
1499 			rw_enter(&dp->dp_config_rwlock, RW_READER);
1500 		err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
1501 		if (!dsl_pool_sync_context(dp))
1502 			rw_exit(&dp->dp_config_rwlock);
1503 
1504 		if (err == 0) {
1505 			uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
1506 			dsl_dataset_rele(ds, FTAG);
1507 
1508 			for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
1509 			    zap_cursor_retrieve(&zc, attr) == 0;
1510 			    (void) zap_cursor_advance(&zc)) {
1511 				ASSERT(attr->za_integer_length ==
1512 				    sizeof (uint64_t));
1513 				ASSERT(attr->za_num_integers == 1);
1514 
1515 				child = kmem_asprintf("%s@%s",
1516 				    name, attr->za_name);
1517 				err = func(spa, attr->za_first_integer,
1518 				    child, arg);
1519 				strfree(child);
1520 				if (err)
1521 					break;
1522 			}
1523 			zap_cursor_fini(&zc);
1524 		}
1525 	}
1526 
1527 	dsl_dir_close(dd, FTAG);
1528 	kmem_free(attr, sizeof (zap_attribute_t));
1529 
1530 	if (err)
1531 		return (err);
1532 
1533 	/*
1534 	 * Apply to self if appropriate.
1535 	 */
1536 	err = func(spa, thisobj, name, arg);
1537 	return (err);
1538 }
1539 
1540 /* ARGSUSED */
1541 int
1542 dmu_objset_prefetch(const char *name, void *arg)
1543 {
1544 	dsl_dataset_t *ds;
1545 
1546 	if (dsl_dataset_hold(name, FTAG, &ds))
1547 		return (0);
1548 
1549 	if (!BP_IS_HOLE(&ds->ds_phys->ds_bp)) {
1550 		mutex_enter(&ds->ds_opening_lock);
1551 		if (ds->ds_objset == NULL) {
1552 			uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1553 			zbookmark_t zb;
1554 
1555 			SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
1556 			    ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
1557 
1558 			(void) arc_read_nolock(NULL, dsl_dataset_get_spa(ds),
1559 			    &ds->ds_phys->ds_bp, NULL, NULL,
1560 			    ZIO_PRIORITY_ASYNC_READ,
1561 			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1562 			    &aflags, &zb);
1563 		}
1564 		mutex_exit(&ds->ds_opening_lock);
1565 	}
1566 
1567 	dsl_dataset_rele(ds, FTAG);
1568 	return (0);
1569 }
1570 
1571 void
1572 dmu_objset_set_user(objset_t *os, void *user_ptr)
1573 {
1574 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1575 	os->os_user_ptr = user_ptr;
1576 }
1577 
1578 void *
1579 dmu_objset_get_user(objset_t *os)
1580 {
1581 	ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
1582 	return (os->os_user_ptr);
1583 }
1584