xref: /titanic_51/usr/src/uts/common/fs/zfs/dsl_dataset.c (revision 77372cb0f35e8d3615ca2e16044f033397e88e21)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012 by Delphix. All rights reserved.
24  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25  */
26 
27 #include <sys/dmu_objset.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dmu_traverse.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/arc.h>
36 #include <sys/zio.h>
37 #include <sys/zap.h>
38 #include <sys/zfeature.h>
39 #include <sys/unique.h>
40 #include <sys/zfs_context.h>
41 #include <sys/zfs_ioctl.h>
42 #include <sys/spa.h>
43 #include <sys/zfs_znode.h>
44 #include <sys/zfs_onexit.h>
45 #include <sys/zvol.h>
46 #include <sys/dsl_scan.h>
47 #include <sys/dsl_deadlist.h>
48 
49 static char *dsl_reaper = "the grim reaper";
50 
51 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
52 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
53 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
54 
55 #define	SWITCH64(x, y) \
56 	{ \
57 		uint64_t __tmp = (x); \
58 		(x) = (y); \
59 		(y) = __tmp; \
60 	}
61 
62 #define	DS_REF_MAX	(1ULL << 62)
63 
64 #define	DSL_DEADLIST_BLOCKSIZE	SPA_MAXBLOCKSIZE
65 
66 #define	DSL_DATASET_IS_DESTROYED(ds)	((ds)->ds_owner == dsl_reaper)
67 
68 
69 /*
70  * Figure out how much of this delta should be propogated to the dsl_dir
71  * layer.  If there's a refreservation, that space has already been
72  * partially accounted for in our ancestors.
73  */
74 static int64_t
75 parent_delta(dsl_dataset_t *ds, int64_t delta)
76 {
77 	uint64_t old_bytes, new_bytes;
78 
79 	if (ds->ds_reserved == 0)
80 		return (delta);
81 
82 	old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
83 	new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
84 
85 	ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
86 	return (new_bytes - old_bytes);
87 }
88 
89 void
90 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
91 {
92 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
93 	int compressed = BP_GET_PSIZE(bp);
94 	int uncompressed = BP_GET_UCSIZE(bp);
95 	int64_t delta;
96 
97 	dprintf_bp(bp, "ds=%p", ds);
98 
99 	ASSERT(dmu_tx_is_syncing(tx));
100 	/* It could have been compressed away to nothing */
101 	if (BP_IS_HOLE(bp))
102 		return;
103 	ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
104 	ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
105 	if (ds == NULL) {
106 		dsl_pool_mos_diduse_space(tx->tx_pool,
107 		    used, compressed, uncompressed);
108 		return;
109 	}
110 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
111 
112 	mutex_enter(&ds->ds_dir->dd_lock);
113 	mutex_enter(&ds->ds_lock);
114 	delta = parent_delta(ds, used);
115 	ds->ds_phys->ds_referenced_bytes += used;
116 	ds->ds_phys->ds_compressed_bytes += compressed;
117 	ds->ds_phys->ds_uncompressed_bytes += uncompressed;
118 	ds->ds_phys->ds_unique_bytes += used;
119 	mutex_exit(&ds->ds_lock);
120 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
121 	    compressed, uncompressed, tx);
122 	dsl_dir_transfer_space(ds->ds_dir, used - delta,
123 	    DD_USED_REFRSRV, DD_USED_HEAD, tx);
124 	mutex_exit(&ds->ds_dir->dd_lock);
125 }
126 
127 int
128 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
129     boolean_t async)
130 {
131 	if (BP_IS_HOLE(bp))
132 		return (0);
133 
134 	ASSERT(dmu_tx_is_syncing(tx));
135 	ASSERT(bp->blk_birth <= tx->tx_txg);
136 
137 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
138 	int compressed = BP_GET_PSIZE(bp);
139 	int uncompressed = BP_GET_UCSIZE(bp);
140 
141 	ASSERT(used > 0);
142 	if (ds == NULL) {
143 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
144 		dsl_pool_mos_diduse_space(tx->tx_pool,
145 		    -used, -compressed, -uncompressed);
146 		return (used);
147 	}
148 	ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
149 
150 	ASSERT(!dsl_dataset_is_snapshot(ds));
151 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
152 
153 	if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
154 		int64_t delta;
155 
156 		dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
157 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
158 
159 		mutex_enter(&ds->ds_dir->dd_lock);
160 		mutex_enter(&ds->ds_lock);
161 		ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
162 		    !DS_UNIQUE_IS_ACCURATE(ds));
163 		delta = parent_delta(ds, -used);
164 		ds->ds_phys->ds_unique_bytes -= used;
165 		mutex_exit(&ds->ds_lock);
166 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
167 		    delta, -compressed, -uncompressed, tx);
168 		dsl_dir_transfer_space(ds->ds_dir, -used - delta,
169 		    DD_USED_REFRSRV, DD_USED_HEAD, tx);
170 		mutex_exit(&ds->ds_dir->dd_lock);
171 	} else {
172 		dprintf_bp(bp, "putting on dead list: %s", "");
173 		if (async) {
174 			/*
175 			 * We are here as part of zio's write done callback,
176 			 * which means we're a zio interrupt thread.  We can't
177 			 * call dsl_deadlist_insert() now because it may block
178 			 * waiting for I/O.  Instead, put bp on the deferred
179 			 * queue and let dsl_pool_sync() finish the job.
180 			 */
181 			bplist_append(&ds->ds_pending_deadlist, bp);
182 		} else {
183 			dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
184 		}
185 		ASSERT3U(ds->ds_prev->ds_object, ==,
186 		    ds->ds_phys->ds_prev_snap_obj);
187 		ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
188 		/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
189 		if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
190 		    ds->ds_object && bp->blk_birth >
191 		    ds->ds_prev->ds_phys->ds_prev_snap_txg) {
192 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
193 			mutex_enter(&ds->ds_prev->ds_lock);
194 			ds->ds_prev->ds_phys->ds_unique_bytes += used;
195 			mutex_exit(&ds->ds_prev->ds_lock);
196 		}
197 		if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
198 			dsl_dir_transfer_space(ds->ds_dir, used,
199 			    DD_USED_HEAD, DD_USED_SNAP, tx);
200 		}
201 	}
202 	mutex_enter(&ds->ds_lock);
203 	ASSERT3U(ds->ds_phys->ds_referenced_bytes, >=, used);
204 	ds->ds_phys->ds_referenced_bytes -= used;
205 	ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
206 	ds->ds_phys->ds_compressed_bytes -= compressed;
207 	ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
208 	ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
209 	mutex_exit(&ds->ds_lock);
210 
211 	return (used);
212 }
213 
214 uint64_t
215 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
216 {
217 	uint64_t trysnap = 0;
218 
219 	if (ds == NULL)
220 		return (0);
221 	/*
222 	 * The snapshot creation could fail, but that would cause an
223 	 * incorrect FALSE return, which would only result in an
224 	 * overestimation of the amount of space that an operation would
225 	 * consume, which is OK.
226 	 *
227 	 * There's also a small window where we could miss a pending
228 	 * snapshot, because we could set the sync task in the quiescing
229 	 * phase.  So this should only be used as a guess.
230 	 */
231 	if (ds->ds_trysnap_txg >
232 	    spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
233 		trysnap = ds->ds_trysnap_txg;
234 	return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
235 }
236 
237 boolean_t
238 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
239     uint64_t blk_birth)
240 {
241 	if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
242 		return (B_FALSE);
243 
244 	ddt_prefetch(dsl_dataset_get_spa(ds), bp);
245 
246 	return (B_TRUE);
247 }
248 
249 /* ARGSUSED */
250 static void
251 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
252 {
253 	dsl_dataset_t *ds = dsv;
254 
255 	ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
256 
257 	unique_remove(ds->ds_fsid_guid);
258 
259 	if (ds->ds_objset != NULL)
260 		dmu_objset_evict(ds->ds_objset);
261 
262 	if (ds->ds_prev) {
263 		dsl_dataset_drop_ref(ds->ds_prev, ds);
264 		ds->ds_prev = NULL;
265 	}
266 
267 	bplist_destroy(&ds->ds_pending_deadlist);
268 	if (db != NULL) {
269 		dsl_deadlist_close(&ds->ds_deadlist);
270 	} else {
271 		ASSERT(ds->ds_deadlist.dl_dbuf == NULL);
272 		ASSERT(!ds->ds_deadlist.dl_oldfmt);
273 	}
274 	if (ds->ds_dir)
275 		dsl_dir_close(ds->ds_dir, ds);
276 
277 	ASSERT(!list_link_active(&ds->ds_synced_link));
278 
279 	mutex_destroy(&ds->ds_lock);
280 	mutex_destroy(&ds->ds_recvlock);
281 	mutex_destroy(&ds->ds_opening_lock);
282 	rw_destroy(&ds->ds_rwlock);
283 	cv_destroy(&ds->ds_exclusive_cv);
284 
285 	kmem_free(ds, sizeof (dsl_dataset_t));
286 }
287 
288 static int
289 dsl_dataset_get_snapname(dsl_dataset_t *ds)
290 {
291 	dsl_dataset_phys_t *headphys;
292 	int err;
293 	dmu_buf_t *headdbuf;
294 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
295 	objset_t *mos = dp->dp_meta_objset;
296 
297 	if (ds->ds_snapname[0])
298 		return (0);
299 	if (ds->ds_phys->ds_next_snap_obj == 0)
300 		return (0);
301 
302 	err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
303 	    FTAG, &headdbuf);
304 	if (err)
305 		return (err);
306 	headphys = headdbuf->db_data;
307 	err = zap_value_search(dp->dp_meta_objset,
308 	    headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
309 	dmu_buf_rele(headdbuf, FTAG);
310 	return (err);
311 }
312 
313 static int
314 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
315 {
316 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
317 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
318 	matchtype_t mt;
319 	int err;
320 
321 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
322 		mt = MT_FIRST;
323 	else
324 		mt = MT_EXACT;
325 
326 	err = zap_lookup_norm(mos, snapobj, name, 8, 1,
327 	    value, mt, NULL, 0, NULL);
328 	if (err == ENOTSUP && mt == MT_FIRST)
329 		err = zap_lookup(mos, snapobj, name, 8, 1, value);
330 	return (err);
331 }
332 
333 static int
334 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
335 {
336 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
337 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
338 	matchtype_t mt;
339 	int err;
340 
341 	dsl_dir_snap_cmtime_update(ds->ds_dir);
342 
343 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
344 		mt = MT_FIRST;
345 	else
346 		mt = MT_EXACT;
347 
348 	err = zap_remove_norm(mos, snapobj, name, mt, tx);
349 	if (err == ENOTSUP && mt == MT_FIRST)
350 		err = zap_remove(mos, snapobj, name, tx);
351 	return (err);
352 }
353 
354 static int
355 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
356     dsl_dataset_t **dsp)
357 {
358 	objset_t *mos = dp->dp_meta_objset;
359 	dmu_buf_t *dbuf;
360 	dsl_dataset_t *ds;
361 	int err;
362 	dmu_object_info_t doi;
363 
364 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
365 	    dsl_pool_sync_context(dp));
366 
367 	err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
368 	if (err)
369 		return (err);
370 
371 	/* Make sure dsobj has the correct object type. */
372 	dmu_object_info_from_db(dbuf, &doi);
373 	if (doi.doi_type != DMU_OT_DSL_DATASET)
374 		return (EINVAL);
375 
376 	ds = dmu_buf_get_user(dbuf);
377 	if (ds == NULL) {
378 		dsl_dataset_t *winner = NULL;
379 
380 		ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
381 		ds->ds_dbuf = dbuf;
382 		ds->ds_object = dsobj;
383 		ds->ds_phys = dbuf->db_data;
384 
385 		mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
386 		mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
387 		mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
388 		mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
389 
390 		rw_init(&ds->ds_rwlock, 0, 0, 0);
391 		cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
392 
393 		bplist_create(&ds->ds_pending_deadlist);
394 		dsl_deadlist_open(&ds->ds_deadlist,
395 		    mos, ds->ds_phys->ds_deadlist_obj);
396 
397 		list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
398 		    offsetof(dmu_sendarg_t, dsa_link));
399 
400 		if (err == 0) {
401 			err = dsl_dir_open_obj(dp,
402 			    ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
403 		}
404 		if (err) {
405 			mutex_destroy(&ds->ds_lock);
406 			mutex_destroy(&ds->ds_recvlock);
407 			mutex_destroy(&ds->ds_opening_lock);
408 			rw_destroy(&ds->ds_rwlock);
409 			cv_destroy(&ds->ds_exclusive_cv);
410 			bplist_destroy(&ds->ds_pending_deadlist);
411 			dsl_deadlist_close(&ds->ds_deadlist);
412 			kmem_free(ds, sizeof (dsl_dataset_t));
413 			dmu_buf_rele(dbuf, tag);
414 			return (err);
415 		}
416 
417 		if (!dsl_dataset_is_snapshot(ds)) {
418 			ds->ds_snapname[0] = '\0';
419 			if (ds->ds_phys->ds_prev_snap_obj) {
420 				err = dsl_dataset_get_ref(dp,
421 				    ds->ds_phys->ds_prev_snap_obj,
422 				    ds, &ds->ds_prev);
423 			}
424 		} else {
425 			if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
426 				err = dsl_dataset_get_snapname(ds);
427 			if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
428 				err = zap_count(
429 				    ds->ds_dir->dd_pool->dp_meta_objset,
430 				    ds->ds_phys->ds_userrefs_obj,
431 				    &ds->ds_userrefs);
432 			}
433 		}
434 
435 		if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
436 			/*
437 			 * In sync context, we're called with either no lock
438 			 * or with the write lock.  If we're not syncing,
439 			 * we're always called with the read lock held.
440 			 */
441 			boolean_t need_lock =
442 			    !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
443 			    dsl_pool_sync_context(dp);
444 
445 			if (need_lock)
446 				rw_enter(&dp->dp_config_rwlock, RW_READER);
447 
448 			err = dsl_prop_get_ds(ds,
449 			    "refreservation", sizeof (uint64_t), 1,
450 			    &ds->ds_reserved, NULL);
451 			if (err == 0) {
452 				err = dsl_prop_get_ds(ds,
453 				    "refquota", sizeof (uint64_t), 1,
454 				    &ds->ds_quota, NULL);
455 			}
456 
457 			if (need_lock)
458 				rw_exit(&dp->dp_config_rwlock);
459 		} else {
460 			ds->ds_reserved = ds->ds_quota = 0;
461 		}
462 
463 		if (err != 0 || (winner = dmu_buf_set_user_ie(dbuf, ds,
464 		    &ds->ds_phys, dsl_dataset_evict)) != NULL) {
465 			bplist_destroy(&ds->ds_pending_deadlist);
466 			dsl_deadlist_close(&ds->ds_deadlist);
467 			if (ds->ds_prev)
468 				dsl_dataset_drop_ref(ds->ds_prev, ds);
469 			dsl_dir_close(ds->ds_dir, ds);
470 			mutex_destroy(&ds->ds_lock);
471 			mutex_destroy(&ds->ds_recvlock);
472 			mutex_destroy(&ds->ds_opening_lock);
473 			rw_destroy(&ds->ds_rwlock);
474 			cv_destroy(&ds->ds_exclusive_cv);
475 			kmem_free(ds, sizeof (dsl_dataset_t));
476 			if (err) {
477 				dmu_buf_rele(dbuf, tag);
478 				return (err);
479 			}
480 			ds = winner;
481 		} else {
482 			ds->ds_fsid_guid =
483 			    unique_insert(ds->ds_phys->ds_fsid_guid);
484 		}
485 	}
486 	ASSERT3P(ds->ds_dbuf, ==, dbuf);
487 	ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
488 	ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
489 	    spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
490 	    dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
491 	mutex_enter(&ds->ds_lock);
492 	if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
493 		mutex_exit(&ds->ds_lock);
494 		dmu_buf_rele(ds->ds_dbuf, tag);
495 		return (ENOENT);
496 	}
497 	mutex_exit(&ds->ds_lock);
498 	*dsp = ds;
499 	return (0);
500 }
501 
502 static int
503 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
504 {
505 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
506 
507 	/*
508 	 * In syncing context we don't want the rwlock lock: there
509 	 * may be an existing writer waiting for sync phase to
510 	 * finish.  We don't need to worry about such writers, since
511 	 * sync phase is single-threaded, so the writer can't be
512 	 * doing anything while we are active.
513 	 */
514 	if (dsl_pool_sync_context(dp)) {
515 		ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
516 		return (0);
517 	}
518 
519 	/*
520 	 * Normal users will hold the ds_rwlock as a READER until they
521 	 * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
522 	 * drop their READER lock after they set the ds_owner field.
523 	 *
524 	 * If the dataset is being destroyed, the destroy thread will
525 	 * obtain a WRITER lock for exclusive access after it's done its
526 	 * open-context work and then change the ds_owner to
527 	 * dsl_reaper once destruction is assured.  So threads
528 	 * may block here temporarily, until the "destructability" of
529 	 * the dataset is determined.
530 	 */
531 	ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
532 	mutex_enter(&ds->ds_lock);
533 	while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
534 		rw_exit(&dp->dp_config_rwlock);
535 		cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
536 		if (DSL_DATASET_IS_DESTROYED(ds)) {
537 			mutex_exit(&ds->ds_lock);
538 			dsl_dataset_drop_ref(ds, tag);
539 			rw_enter(&dp->dp_config_rwlock, RW_READER);
540 			return (ENOENT);
541 		}
542 		/*
543 		 * The dp_config_rwlock lives above the ds_lock. And
544 		 * we need to check DSL_DATASET_IS_DESTROYED() while
545 		 * holding the ds_lock, so we have to drop and reacquire
546 		 * the ds_lock here.
547 		 */
548 		mutex_exit(&ds->ds_lock);
549 		rw_enter(&dp->dp_config_rwlock, RW_READER);
550 		mutex_enter(&ds->ds_lock);
551 	}
552 	mutex_exit(&ds->ds_lock);
553 	return (0);
554 }
555 
556 int
557 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
558     dsl_dataset_t **dsp)
559 {
560 	int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
561 
562 	if (err)
563 		return (err);
564 	return (dsl_dataset_hold_ref(*dsp, tag));
565 }
566 
567 int
568 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
569     void *tag, dsl_dataset_t **dsp)
570 {
571 	int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
572 	if (err)
573 		return (err);
574 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
575 		dsl_dataset_rele(*dsp, tag);
576 		*dsp = NULL;
577 		return (EBUSY);
578 	}
579 	return (0);
580 }
581 
582 int
583 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
584 {
585 	dsl_dir_t *dd;
586 	dsl_pool_t *dp;
587 	const char *snapname;
588 	uint64_t obj;
589 	int err = 0;
590 
591 	err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
592 	if (err)
593 		return (err);
594 
595 	dp = dd->dd_pool;
596 	obj = dd->dd_phys->dd_head_dataset_obj;
597 	rw_enter(&dp->dp_config_rwlock, RW_READER);
598 	if (obj)
599 		err = dsl_dataset_get_ref(dp, obj, tag, dsp);
600 	else
601 		err = ENOENT;
602 	if (err)
603 		goto out;
604 
605 	err = dsl_dataset_hold_ref(*dsp, tag);
606 
607 	/* we may be looking for a snapshot */
608 	if (err == 0 && snapname != NULL) {
609 		dsl_dataset_t *ds = NULL;
610 
611 		if (*snapname++ != '@') {
612 			dsl_dataset_rele(*dsp, tag);
613 			err = ENOENT;
614 			goto out;
615 		}
616 
617 		dprintf("looking for snapshot '%s'\n", snapname);
618 		err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
619 		if (err == 0)
620 			err = dsl_dataset_get_ref(dp, obj, tag, &ds);
621 		dsl_dataset_rele(*dsp, tag);
622 
623 		ASSERT3U((err == 0), ==, (ds != NULL));
624 
625 		if (ds) {
626 			mutex_enter(&ds->ds_lock);
627 			if (ds->ds_snapname[0] == 0)
628 				(void) strlcpy(ds->ds_snapname, snapname,
629 				    sizeof (ds->ds_snapname));
630 			mutex_exit(&ds->ds_lock);
631 			err = dsl_dataset_hold_ref(ds, tag);
632 			*dsp = err ? NULL : ds;
633 		}
634 	}
635 out:
636 	rw_exit(&dp->dp_config_rwlock);
637 	dsl_dir_close(dd, FTAG);
638 	return (err);
639 }
640 
641 int
642 dsl_dataset_own(const char *name, boolean_t inconsistentok,
643     void *tag, dsl_dataset_t **dsp)
644 {
645 	int err = dsl_dataset_hold(name, tag, dsp);
646 	if (err)
647 		return (err);
648 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
649 		dsl_dataset_rele(*dsp, tag);
650 		return (EBUSY);
651 	}
652 	return (0);
653 }
654 
655 void
656 dsl_dataset_name(dsl_dataset_t *ds, char *name)
657 {
658 	if (ds == NULL) {
659 		(void) strcpy(name, "mos");
660 	} else {
661 		dsl_dir_name(ds->ds_dir, name);
662 		VERIFY(0 == dsl_dataset_get_snapname(ds));
663 		if (ds->ds_snapname[0]) {
664 			(void) strcat(name, "@");
665 			/*
666 			 * We use a "recursive" mutex so that we
667 			 * can call dprintf_ds() with ds_lock held.
668 			 */
669 			if (!MUTEX_HELD(&ds->ds_lock)) {
670 				mutex_enter(&ds->ds_lock);
671 				(void) strcat(name, ds->ds_snapname);
672 				mutex_exit(&ds->ds_lock);
673 			} else {
674 				(void) strcat(name, ds->ds_snapname);
675 			}
676 		}
677 	}
678 }
679 
680 static int
681 dsl_dataset_namelen(dsl_dataset_t *ds)
682 {
683 	int result;
684 
685 	if (ds == NULL) {
686 		result = 3;	/* "mos" */
687 	} else {
688 		result = dsl_dir_namelen(ds->ds_dir);
689 		VERIFY(0 == dsl_dataset_get_snapname(ds));
690 		if (ds->ds_snapname[0]) {
691 			++result;	/* adding one for the @-sign */
692 			if (!MUTEX_HELD(&ds->ds_lock)) {
693 				mutex_enter(&ds->ds_lock);
694 				result += strlen(ds->ds_snapname);
695 				mutex_exit(&ds->ds_lock);
696 			} else {
697 				result += strlen(ds->ds_snapname);
698 			}
699 		}
700 	}
701 
702 	return (result);
703 }
704 
705 void
706 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
707 {
708 	dmu_buf_rele(ds->ds_dbuf, tag);
709 }
710 
711 void
712 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
713 {
714 	if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
715 		rw_exit(&ds->ds_rwlock);
716 	}
717 	dsl_dataset_drop_ref(ds, tag);
718 }
719 
720 void
721 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
722 {
723 	ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
724 	    (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
725 
726 	mutex_enter(&ds->ds_lock);
727 	ds->ds_owner = NULL;
728 	if (RW_WRITE_HELD(&ds->ds_rwlock)) {
729 		rw_exit(&ds->ds_rwlock);
730 		cv_broadcast(&ds->ds_exclusive_cv);
731 	}
732 	mutex_exit(&ds->ds_lock);
733 	if (ds->ds_dbuf)
734 		dsl_dataset_drop_ref(ds, tag);
735 	else
736 		dsl_dataset_evict(NULL, ds);
737 }
738 
739 boolean_t
740 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
741 {
742 	boolean_t gotit = FALSE;
743 
744 	mutex_enter(&ds->ds_lock);
745 	if (ds->ds_owner == NULL &&
746 	    (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
747 		ds->ds_owner = tag;
748 		if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
749 			rw_exit(&ds->ds_rwlock);
750 		gotit = TRUE;
751 	}
752 	mutex_exit(&ds->ds_lock);
753 	return (gotit);
754 }
755 
756 void
757 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
758 {
759 	ASSERT3P(owner, ==, ds->ds_owner);
760 	if (!RW_WRITE_HELD(&ds->ds_rwlock))
761 		rw_enter(&ds->ds_rwlock, RW_WRITER);
762 }
763 
764 uint64_t
765 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
766     uint64_t flags, dmu_tx_t *tx)
767 {
768 	dsl_pool_t *dp = dd->dd_pool;
769 	dmu_buf_t *dbuf;
770 	dsl_dataset_phys_t *dsphys;
771 	uint64_t dsobj;
772 	objset_t *mos = dp->dp_meta_objset;
773 
774 	if (origin == NULL)
775 		origin = dp->dp_origin_snap;
776 
777 	ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
778 	ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
779 	ASSERT(dmu_tx_is_syncing(tx));
780 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
781 
782 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
783 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
784 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
785 	dmu_buf_will_dirty(dbuf, tx);
786 	dsphys = dbuf->db_data;
787 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
788 	dsphys->ds_dir_obj = dd->dd_object;
789 	dsphys->ds_flags = flags;
790 	dsphys->ds_fsid_guid = unique_create();
791 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
792 	    sizeof (dsphys->ds_guid));
793 	dsphys->ds_snapnames_zapobj =
794 	    zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
795 	    DMU_OT_NONE, 0, tx);
796 	dsphys->ds_creation_time = gethrestime_sec();
797 	dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
798 
799 	if (origin == NULL) {
800 		dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
801 	} else {
802 		dsl_dataset_t *ohds;
803 
804 		dsphys->ds_prev_snap_obj = origin->ds_object;
805 		dsphys->ds_prev_snap_txg =
806 		    origin->ds_phys->ds_creation_txg;
807 		dsphys->ds_referenced_bytes =
808 		    origin->ds_phys->ds_referenced_bytes;
809 		dsphys->ds_compressed_bytes =
810 		    origin->ds_phys->ds_compressed_bytes;
811 		dsphys->ds_uncompressed_bytes =
812 		    origin->ds_phys->ds_uncompressed_bytes;
813 		dsphys->ds_bp = origin->ds_phys->ds_bp;
814 		dsphys->ds_flags |= origin->ds_phys->ds_flags;
815 
816 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
817 		origin->ds_phys->ds_num_children++;
818 
819 		VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
820 		    origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds));
821 		dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
822 		    dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
823 		dsl_dataset_rele(ohds, FTAG);
824 
825 		if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
826 			if (origin->ds_phys->ds_next_clones_obj == 0) {
827 				origin->ds_phys->ds_next_clones_obj =
828 				    zap_create(mos,
829 				    DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
830 			}
831 			VERIFY(0 == zap_add_int(mos,
832 			    origin->ds_phys->ds_next_clones_obj,
833 			    dsobj, tx));
834 		}
835 
836 		dmu_buf_will_dirty(dd->dd_dbuf, tx);
837 		dd->dd_phys->dd_origin_obj = origin->ds_object;
838 		if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
839 			if (origin->ds_dir->dd_phys->dd_clones == 0) {
840 				dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
841 				origin->ds_dir->dd_phys->dd_clones =
842 				    zap_create(mos,
843 				    DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
844 			}
845 			VERIFY3U(0, ==, zap_add_int(mos,
846 			    origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
847 		}
848 	}
849 
850 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
851 		dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
852 
853 	dmu_buf_rele(dbuf, FTAG);
854 
855 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
856 	dd->dd_phys->dd_head_dataset_obj = dsobj;
857 
858 	return (dsobj);
859 }
860 
861 uint64_t
862 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
863     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
864 {
865 	dsl_pool_t *dp = pdd->dd_pool;
866 	uint64_t dsobj, ddobj;
867 	dsl_dir_t *dd;
868 
869 	ASSERT(lastname[0] != '@');
870 
871 	ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
872 	VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
873 
874 	dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
875 
876 	dsl_deleg_set_create_perms(dd, tx, cr);
877 
878 	dsl_dir_close(dd, FTAG);
879 
880 	/*
881 	 * If we are creating a clone, make sure we zero out any stale
882 	 * data from the origin snapshots zil header.
883 	 */
884 	if (origin != NULL) {
885 		dsl_dataset_t *ds;
886 		objset_t *os;
887 
888 		VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
889 		VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
890 		bzero(&os->os_zil_header, sizeof (os->os_zil_header));
891 		dsl_dataset_dirty(ds, tx);
892 		dsl_dataset_rele(ds, FTAG);
893 	}
894 
895 	return (dsobj);
896 }
897 
898 /*
899  * The snapshots must all be in the same pool.
900  */
901 int
902 dmu_snapshots_destroy_nvl(nvlist_t *snaps, boolean_t defer,
903     nvlist_t *errlist)
904 {
905 	int err;
906 	dsl_sync_task_t *dst;
907 	spa_t *spa;
908 	nvpair_t *pair;
909 	dsl_sync_task_group_t *dstg;
910 
911 	pair = nvlist_next_nvpair(snaps, NULL);
912 	if (pair == NULL)
913 		return (0);
914 
915 	err = spa_open(nvpair_name(pair), &spa, FTAG);
916 	if (err)
917 		return (err);
918 	dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
919 
920 	for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
921 	    pair = nvlist_next_nvpair(snaps, pair)) {
922 		dsl_dataset_t *ds;
923 
924 		err = dsl_dataset_own(nvpair_name(pair), B_TRUE, dstg, &ds);
925 		if (err == 0) {
926 			struct dsl_ds_destroyarg *dsda;
927 
928 			dsl_dataset_make_exclusive(ds, dstg);
929 			dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg),
930 			    KM_SLEEP);
931 			dsda->ds = ds;
932 			dsda->defer = defer;
933 			dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
934 			    dsl_dataset_destroy_sync, dsda, dstg, 0);
935 		} else if (err == ENOENT) {
936 			err = 0;
937 		} else {
938 			fnvlist_add_int32(errlist, nvpair_name(pair), err);
939 			break;
940 		}
941 	}
942 
943 	if (err == 0)
944 		err = dsl_sync_task_group_wait(dstg);
945 
946 	for (dst = list_head(&dstg->dstg_tasks); dst;
947 	    dst = list_next(&dstg->dstg_tasks, dst)) {
948 		struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
949 		dsl_dataset_t *ds = dsda->ds;
950 
951 		/*
952 		 * Return the snapshots that triggered the error.
953 		 */
954 		if (dst->dst_err != 0) {
955 			char name[ZFS_MAXNAMELEN];
956 			dsl_dataset_name(ds, name);
957 			fnvlist_add_int32(errlist, name, dst->dst_err);
958 		}
959 		ASSERT3P(dsda->rm_origin, ==, NULL);
960 		dsl_dataset_disown(ds, dstg);
961 		kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
962 	}
963 
964 	dsl_sync_task_group_destroy(dstg);
965 	spa_close(spa, FTAG);
966 	return (err);
967 
968 }
969 
970 static boolean_t
971 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
972 {
973 	boolean_t might_destroy = B_FALSE;
974 
975 	mutex_enter(&ds->ds_lock);
976 	if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
977 	    DS_IS_DEFER_DESTROY(ds))
978 		might_destroy = B_TRUE;
979 	mutex_exit(&ds->ds_lock);
980 
981 	return (might_destroy);
982 }
983 
984 /*
985  * If we're removing a clone, and these three conditions are true:
986  *	1) the clone's origin has no other children
987  *	2) the clone's origin has no user references
988  *	3) the clone's origin has been marked for deferred destruction
989  * Then, prepare to remove the origin as part of this sync task group.
990  */
991 static int
992 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
993 {
994 	dsl_dataset_t *ds = dsda->ds;
995 	dsl_dataset_t *origin = ds->ds_prev;
996 
997 	if (dsl_dataset_might_destroy_origin(origin)) {
998 		char *name;
999 		int namelen;
1000 		int error;
1001 
1002 		namelen = dsl_dataset_namelen(origin) + 1;
1003 		name = kmem_alloc(namelen, KM_SLEEP);
1004 		dsl_dataset_name(origin, name);
1005 #ifdef _KERNEL
1006 		error = zfs_unmount_snap(name, NULL);
1007 		if (error) {
1008 			kmem_free(name, namelen);
1009 			return (error);
1010 		}
1011 #endif
1012 		error = dsl_dataset_own(name, B_TRUE, tag, &origin);
1013 		kmem_free(name, namelen);
1014 		if (error)
1015 			return (error);
1016 		dsda->rm_origin = origin;
1017 		dsl_dataset_make_exclusive(origin, tag);
1018 	}
1019 
1020 	return (0);
1021 }
1022 
1023 /*
1024  * ds must be opened as OWNER.  On return (whether successful or not),
1025  * ds will be closed and caller can no longer dereference it.
1026  */
1027 int
1028 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1029 {
1030 	int err;
1031 	dsl_sync_task_group_t *dstg;
1032 	objset_t *os;
1033 	dsl_dir_t *dd;
1034 	uint64_t obj;
1035 	struct dsl_ds_destroyarg dsda = { 0 };
1036 
1037 	dsda.ds = ds;
1038 
1039 	if (dsl_dataset_is_snapshot(ds)) {
1040 		/* Destroying a snapshot is simpler */
1041 		dsl_dataset_make_exclusive(ds, tag);
1042 
1043 		dsda.defer = defer;
1044 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1045 		    dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1046 		    &dsda, tag, 0);
1047 		ASSERT3P(dsda.rm_origin, ==, NULL);
1048 		goto out;
1049 	} else if (defer) {
1050 		err = EINVAL;
1051 		goto out;
1052 	}
1053 
1054 	dd = ds->ds_dir;
1055 
1056 	if (!spa_feature_is_enabled(dsl_dataset_get_spa(ds),
1057 	    &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
1058 		/*
1059 		 * Check for errors and mark this ds as inconsistent, in
1060 		 * case we crash while freeing the objects.
1061 		 */
1062 		err = dsl_sync_task_do(dd->dd_pool,
1063 		    dsl_dataset_destroy_begin_check,
1064 		    dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1065 		if (err)
1066 			goto out;
1067 
1068 		err = dmu_objset_from_ds(ds, &os);
1069 		if (err)
1070 			goto out;
1071 
1072 		/*
1073 		 * Remove all objects while in the open context so that
1074 		 * there is less work to do in the syncing context.
1075 		 */
1076 		for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1077 		    ds->ds_phys->ds_prev_snap_txg)) {
1078 			/*
1079 			 * Ignore errors, if there is not enough disk space
1080 			 * we will deal with it in dsl_dataset_destroy_sync().
1081 			 */
1082 			(void) dmu_free_object(os, obj);
1083 		}
1084 		if (err != ESRCH)
1085 			goto out;
1086 
1087 		/*
1088 		 * Sync out all in-flight IO.
1089 		 */
1090 		txg_wait_synced(dd->dd_pool, 0);
1091 
1092 		/*
1093 		 * If we managed to free all the objects in open
1094 		 * context, the user space accounting should be zero.
1095 		 */
1096 		if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1097 		    dmu_objset_userused_enabled(os)) {
1098 			uint64_t count;
1099 
1100 			ASSERT(zap_count(os, DMU_USERUSED_OBJECT,
1101 			    &count) != 0 || count == 0);
1102 			ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT,
1103 			    &count) != 0 || count == 0);
1104 		}
1105 	}
1106 
1107 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1108 	err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1109 	rw_exit(&dd->dd_pool->dp_config_rwlock);
1110 
1111 	if (err)
1112 		goto out;
1113 
1114 	/*
1115 	 * Blow away the dsl_dir + head dataset.
1116 	 */
1117 	dsl_dataset_make_exclusive(ds, tag);
1118 	/*
1119 	 * If we're removing a clone, we might also need to remove its
1120 	 * origin.
1121 	 */
1122 	do {
1123 		dsda.need_prep = B_FALSE;
1124 		if (dsl_dir_is_clone(dd)) {
1125 			err = dsl_dataset_origin_rm_prep(&dsda, tag);
1126 			if (err) {
1127 				dsl_dir_close(dd, FTAG);
1128 				goto out;
1129 			}
1130 		}
1131 
1132 		dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1133 		dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1134 		    dsl_dataset_destroy_sync, &dsda, tag, 0);
1135 		dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1136 		    dsl_dir_destroy_sync, dd, FTAG, 0);
1137 		err = dsl_sync_task_group_wait(dstg);
1138 		dsl_sync_task_group_destroy(dstg);
1139 
1140 		/*
1141 		 * We could be racing against 'zfs release' or 'zfs destroy -d'
1142 		 * on the origin snap, in which case we can get EBUSY if we
1143 		 * needed to destroy the origin snap but were not ready to
1144 		 * do so.
1145 		 */
1146 		if (dsda.need_prep) {
1147 			ASSERT(err == EBUSY);
1148 			ASSERT(dsl_dir_is_clone(dd));
1149 			ASSERT(dsda.rm_origin == NULL);
1150 		}
1151 	} while (dsda.need_prep);
1152 
1153 	if (dsda.rm_origin != NULL)
1154 		dsl_dataset_disown(dsda.rm_origin, tag);
1155 
1156 	/* if it is successful, dsl_dir_destroy_sync will close the dd */
1157 	if (err)
1158 		dsl_dir_close(dd, FTAG);
1159 out:
1160 	dsl_dataset_disown(ds, tag);
1161 	return (err);
1162 }
1163 
1164 blkptr_t *
1165 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1166 {
1167 	return (&ds->ds_phys->ds_bp);
1168 }
1169 
1170 void
1171 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1172 {
1173 	ASSERT(dmu_tx_is_syncing(tx));
1174 	/* If it's the meta-objset, set dp_meta_rootbp */
1175 	if (ds == NULL) {
1176 		tx->tx_pool->dp_meta_rootbp = *bp;
1177 	} else {
1178 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
1179 		ds->ds_phys->ds_bp = *bp;
1180 	}
1181 }
1182 
1183 spa_t *
1184 dsl_dataset_get_spa(dsl_dataset_t *ds)
1185 {
1186 	return (ds->ds_dir->dd_pool->dp_spa);
1187 }
1188 
1189 void
1190 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1191 {
1192 	dsl_pool_t *dp;
1193 
1194 	if (ds == NULL) /* this is the meta-objset */
1195 		return;
1196 
1197 	ASSERT(ds->ds_objset != NULL);
1198 
1199 	if (ds->ds_phys->ds_next_snap_obj != 0)
1200 		panic("dirtying snapshot!");
1201 
1202 	dp = ds->ds_dir->dd_pool;
1203 
1204 	if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1205 		/* up the hold count until we can be written out */
1206 		dmu_buf_add_ref(ds->ds_dbuf, ds);
1207 	}
1208 }
1209 
1210 boolean_t
1211 dsl_dataset_is_dirty(dsl_dataset_t *ds)
1212 {
1213 	for (int t = 0; t < TXG_SIZE; t++) {
1214 		if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
1215 		    ds, t))
1216 			return (B_TRUE);
1217 	}
1218 	return (B_FALSE);
1219 }
1220 
1221 /*
1222  * The unique space in the head dataset can be calculated by subtracting
1223  * the space used in the most recent snapshot, that is still being used
1224  * in this file system, from the space currently in use.  To figure out
1225  * the space in the most recent snapshot still in use, we need to take
1226  * the total space used in the snapshot and subtract out the space that
1227  * has been freed up since the snapshot was taken.
1228  */
1229 static void
1230 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1231 {
1232 	uint64_t mrs_used;
1233 	uint64_t dlused, dlcomp, dluncomp;
1234 
1235 	ASSERT(!dsl_dataset_is_snapshot(ds));
1236 
1237 	if (ds->ds_phys->ds_prev_snap_obj != 0)
1238 		mrs_used = ds->ds_prev->ds_phys->ds_referenced_bytes;
1239 	else
1240 		mrs_used = 0;
1241 
1242 	dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
1243 
1244 	ASSERT3U(dlused, <=, mrs_used);
1245 	ds->ds_phys->ds_unique_bytes =
1246 	    ds->ds_phys->ds_referenced_bytes - (mrs_used - dlused);
1247 
1248 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1249 	    SPA_VERSION_UNIQUE_ACCURATE)
1250 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1251 }
1252 
1253 struct killarg {
1254 	dsl_dataset_t *ds;
1255 	dmu_tx_t *tx;
1256 };
1257 
1258 /* ARGSUSED */
1259 static int
1260 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1261     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1262 {
1263 	struct killarg *ka = arg;
1264 	dmu_tx_t *tx = ka->tx;
1265 
1266 	if (bp == NULL)
1267 		return (0);
1268 
1269 	if (zb->zb_level == ZB_ZIL_LEVEL) {
1270 		ASSERT(zilog != NULL);
1271 		/*
1272 		 * It's a block in the intent log.  It has no
1273 		 * accounting, so just free it.
1274 		 */
1275 		dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1276 	} else {
1277 		ASSERT(zilog == NULL);
1278 		ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1279 		(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1280 	}
1281 
1282 	return (0);
1283 }
1284 
1285 /* ARGSUSED */
1286 static int
1287 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1288 {
1289 	dsl_dataset_t *ds = arg1;
1290 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1291 	uint64_t count;
1292 	int err;
1293 
1294 	/*
1295 	 * Can't delete a head dataset if there are snapshots of it.
1296 	 * (Except if the only snapshots are from the branch we cloned
1297 	 * from.)
1298 	 */
1299 	if (ds->ds_prev != NULL &&
1300 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1301 		return (EBUSY);
1302 
1303 	/*
1304 	 * This is really a dsl_dir thing, but check it here so that
1305 	 * we'll be less likely to leave this dataset inconsistent &
1306 	 * nearly destroyed.
1307 	 */
1308 	err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1309 	if (err)
1310 		return (err);
1311 	if (count != 0)
1312 		return (EEXIST);
1313 
1314 	return (0);
1315 }
1316 
1317 /* ARGSUSED */
1318 static void
1319 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1320 {
1321 	dsl_dataset_t *ds = arg1;
1322 
1323 	/* Mark it as inconsistent on-disk, in case we crash */
1324 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1325 	ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1326 
1327 	spa_history_log_internal_ds(ds, "destroy begin", tx, "");
1328 }
1329 
1330 static int
1331 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1332     dmu_tx_t *tx)
1333 {
1334 	dsl_dataset_t *ds = dsda->ds;
1335 	dsl_dataset_t *ds_prev = ds->ds_prev;
1336 
1337 	if (dsl_dataset_might_destroy_origin(ds_prev)) {
1338 		struct dsl_ds_destroyarg ndsda = {0};
1339 
1340 		/*
1341 		 * If we're not prepared to remove the origin, don't remove
1342 		 * the clone either.
1343 		 */
1344 		if (dsda->rm_origin == NULL) {
1345 			dsda->need_prep = B_TRUE;
1346 			return (EBUSY);
1347 		}
1348 
1349 		ndsda.ds = ds_prev;
1350 		ndsda.is_origin_rm = B_TRUE;
1351 		return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1352 	}
1353 
1354 	/*
1355 	 * If we're not going to remove the origin after all,
1356 	 * undo the open context setup.
1357 	 */
1358 	if (dsda->rm_origin != NULL) {
1359 		dsl_dataset_disown(dsda->rm_origin, tag);
1360 		dsda->rm_origin = NULL;
1361 	}
1362 
1363 	return (0);
1364 }
1365 
1366 /*
1367  * If you add new checks here, you may need to add
1368  * additional checks to the "temporary" case in
1369  * snapshot_check() in dmu_objset.c.
1370  */
1371 /* ARGSUSED */
1372 int
1373 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1374 {
1375 	struct dsl_ds_destroyarg *dsda = arg1;
1376 	dsl_dataset_t *ds = dsda->ds;
1377 
1378 	/* we have an owner hold, so noone else can destroy us */
1379 	ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1380 
1381 	/*
1382 	 * Only allow deferred destroy on pools that support it.
1383 	 * NOTE: deferred destroy is only supported on snapshots.
1384 	 */
1385 	if (dsda->defer) {
1386 		if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1387 		    SPA_VERSION_USERREFS)
1388 			return (ENOTSUP);
1389 		ASSERT(dsl_dataset_is_snapshot(ds));
1390 		return (0);
1391 	}
1392 
1393 	/*
1394 	 * Can't delete a head dataset if there are snapshots of it.
1395 	 * (Except if the only snapshots are from the branch we cloned
1396 	 * from.)
1397 	 */
1398 	if (ds->ds_prev != NULL &&
1399 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1400 		return (EBUSY);
1401 
1402 	/*
1403 	 * If we made changes this txg, traverse_dsl_dataset won't find
1404 	 * them.  Try again.
1405 	 */
1406 	if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1407 		return (EAGAIN);
1408 
1409 	if (dsl_dataset_is_snapshot(ds)) {
1410 		/*
1411 		 * If this snapshot has an elevated user reference count,
1412 		 * we can't destroy it yet.
1413 		 */
1414 		if (ds->ds_userrefs > 0 && !dsda->releasing)
1415 			return (EBUSY);
1416 
1417 		mutex_enter(&ds->ds_lock);
1418 		/*
1419 		 * Can't delete a branch point. However, if we're destroying
1420 		 * a clone and removing its origin due to it having a user
1421 		 * hold count of 0 and having been marked for deferred destroy,
1422 		 * it's OK for the origin to have a single clone.
1423 		 */
1424 		if (ds->ds_phys->ds_num_children >
1425 		    (dsda->is_origin_rm ? 2 : 1)) {
1426 			mutex_exit(&ds->ds_lock);
1427 			return (EEXIST);
1428 		}
1429 		mutex_exit(&ds->ds_lock);
1430 	} else if (dsl_dir_is_clone(ds->ds_dir)) {
1431 		return (dsl_dataset_origin_check(dsda, arg2, tx));
1432 	}
1433 
1434 	/* XXX we should do some i/o error checking... */
1435 	return (0);
1436 }
1437 
1438 struct refsarg {
1439 	kmutex_t lock;
1440 	boolean_t gone;
1441 	kcondvar_t cv;
1442 };
1443 
1444 /* ARGSUSED */
1445 static void
1446 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1447 {
1448 	struct refsarg *arg = argv;
1449 
1450 	mutex_enter(&arg->lock);
1451 	arg->gone = TRUE;
1452 	cv_signal(&arg->cv);
1453 	mutex_exit(&arg->lock);
1454 }
1455 
1456 static void
1457 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1458 {
1459 	struct refsarg arg;
1460 
1461 	mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1462 	cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1463 	arg.gone = FALSE;
1464 	(void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1465 	    dsl_dataset_refs_gone);
1466 	dmu_buf_rele(ds->ds_dbuf, tag);
1467 	mutex_enter(&arg.lock);
1468 	while (!arg.gone)
1469 		cv_wait(&arg.cv, &arg.lock);
1470 	ASSERT(arg.gone);
1471 	mutex_exit(&arg.lock);
1472 	ds->ds_dbuf = NULL;
1473 	ds->ds_phys = NULL;
1474 	mutex_destroy(&arg.lock);
1475 	cv_destroy(&arg.cv);
1476 }
1477 
1478 static void
1479 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1480 {
1481 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1482 	uint64_t count;
1483 	int err;
1484 
1485 	ASSERT(ds->ds_phys->ds_num_children >= 2);
1486 	err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1487 	/*
1488 	 * The err should not be ENOENT, but a bug in a previous version
1489 	 * of the code could cause upgrade_clones_cb() to not set
1490 	 * ds_next_snap_obj when it should, leading to a missing entry.
1491 	 * If we knew that the pool was created after
1492 	 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1493 	 * ENOENT.  However, at least we can check that we don't have
1494 	 * too many entries in the next_clones_obj even after failing to
1495 	 * remove this one.
1496 	 */
1497 	if (err != ENOENT) {
1498 		VERIFY0(err);
1499 	}
1500 	ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1501 	    &count));
1502 	ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1503 }
1504 
1505 static void
1506 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
1507 {
1508 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1509 	zap_cursor_t zc;
1510 	zap_attribute_t za;
1511 
1512 	/*
1513 	 * If it is the old version, dd_clones doesn't exist so we can't
1514 	 * find the clones, but deadlist_remove_key() is a no-op so it
1515 	 * doesn't matter.
1516 	 */
1517 	if (ds->ds_dir->dd_phys->dd_clones == 0)
1518 		return;
1519 
1520 	for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones);
1521 	    zap_cursor_retrieve(&zc, &za) == 0;
1522 	    zap_cursor_advance(&zc)) {
1523 		dsl_dataset_t *clone;
1524 
1525 		VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
1526 		    za.za_first_integer, FTAG, &clone));
1527 		if (clone->ds_dir->dd_origin_txg > mintxg) {
1528 			dsl_deadlist_remove_key(&clone->ds_deadlist,
1529 			    mintxg, tx);
1530 			dsl_dataset_remove_clones_key(clone, mintxg, tx);
1531 		}
1532 		dsl_dataset_rele(clone, FTAG);
1533 	}
1534 	zap_cursor_fini(&zc);
1535 }
1536 
1537 struct process_old_arg {
1538 	dsl_dataset_t *ds;
1539 	dsl_dataset_t *ds_prev;
1540 	boolean_t after_branch_point;
1541 	zio_t *pio;
1542 	uint64_t used, comp, uncomp;
1543 };
1544 
1545 static int
1546 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
1547 {
1548 	struct process_old_arg *poa = arg;
1549 	dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
1550 
1551 	if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) {
1552 		dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
1553 		if (poa->ds_prev && !poa->after_branch_point &&
1554 		    bp->blk_birth >
1555 		    poa->ds_prev->ds_phys->ds_prev_snap_txg) {
1556 			poa->ds_prev->ds_phys->ds_unique_bytes +=
1557 			    bp_get_dsize_sync(dp->dp_spa, bp);
1558 		}
1559 	} else {
1560 		poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
1561 		poa->comp += BP_GET_PSIZE(bp);
1562 		poa->uncomp += BP_GET_UCSIZE(bp);
1563 		dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
1564 	}
1565 	return (0);
1566 }
1567 
1568 static void
1569 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
1570     dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
1571 {
1572 	struct process_old_arg poa = { 0 };
1573 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1574 	objset_t *mos = dp->dp_meta_objset;
1575 
1576 	ASSERT(ds->ds_deadlist.dl_oldfmt);
1577 	ASSERT(ds_next->ds_deadlist.dl_oldfmt);
1578 
1579 	poa.ds = ds;
1580 	poa.ds_prev = ds_prev;
1581 	poa.after_branch_point = after_branch_point;
1582 	poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1583 	VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
1584 	    process_old_cb, &poa, tx));
1585 	VERIFY0(zio_wait(poa.pio));
1586 	ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes);
1587 
1588 	/* change snapused */
1589 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1590 	    -poa.used, -poa.comp, -poa.uncomp, tx);
1591 
1592 	/* swap next's deadlist to our deadlist */
1593 	dsl_deadlist_close(&ds->ds_deadlist);
1594 	dsl_deadlist_close(&ds_next->ds_deadlist);
1595 	SWITCH64(ds_next->ds_phys->ds_deadlist_obj,
1596 	    ds->ds_phys->ds_deadlist_obj);
1597 	dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
1598 	dsl_deadlist_open(&ds_next->ds_deadlist, mos,
1599 	    ds_next->ds_phys->ds_deadlist_obj);
1600 }
1601 
1602 static int
1603 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
1604 {
1605 	int err;
1606 	struct killarg ka;
1607 
1608 	/*
1609 	 * Free everything that we point to (that's born after
1610 	 * the previous snapshot, if we are a clone)
1611 	 *
1612 	 * NB: this should be very quick, because we already
1613 	 * freed all the objects in open context.
1614 	 */
1615 	ka.ds = ds;
1616 	ka.tx = tx;
1617 	err = traverse_dataset(ds,
1618 	    ds->ds_phys->ds_prev_snap_txg, TRAVERSE_POST,
1619 	    kill_blkptr, &ka);
1620 	ASSERT0(err);
1621 	ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || ds->ds_phys->ds_unique_bytes == 0);
1622 
1623 	return (err);
1624 }
1625 
1626 void
1627 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1628 {
1629 	struct dsl_ds_destroyarg *dsda = arg1;
1630 	dsl_dataset_t *ds = dsda->ds;
1631 	int err;
1632 	int after_branch_point = FALSE;
1633 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1634 	objset_t *mos = dp->dp_meta_objset;
1635 	dsl_dataset_t *ds_prev = NULL;
1636 	boolean_t wont_destroy;
1637 	uint64_t obj;
1638 
1639 	wont_destroy = (dsda->defer &&
1640 	    (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1));
1641 
1642 	ASSERT(ds->ds_owner || wont_destroy);
1643 	ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1644 	ASSERT(ds->ds_prev == NULL ||
1645 	    ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1646 	ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1647 
1648 	if (wont_destroy) {
1649 		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1650 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
1651 		ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1652 		spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
1653 		return;
1654 	}
1655 
1656 	/* We need to log before removing it from the namespace. */
1657 	spa_history_log_internal_ds(ds, "destroy", tx, "");
1658 
1659 	/* signal any waiters that this dataset is going away */
1660 	mutex_enter(&ds->ds_lock);
1661 	ds->ds_owner = dsl_reaper;
1662 	cv_broadcast(&ds->ds_exclusive_cv);
1663 	mutex_exit(&ds->ds_lock);
1664 
1665 	/* Remove our reservation */
1666 	if (ds->ds_reserved != 0) {
1667 		dsl_prop_setarg_t psa;
1668 		uint64_t value = 0;
1669 
1670 		dsl_prop_setarg_init_uint64(&psa, "refreservation",
1671 		    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1672 		    &value);
1673 		psa.psa_effective_value = 0;	/* predict default value */
1674 
1675 		dsl_dataset_set_reservation_sync(ds, &psa, tx);
1676 		ASSERT0(ds->ds_reserved);
1677 	}
1678 
1679 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1680 
1681 	dsl_scan_ds_destroyed(ds, tx);
1682 
1683 	obj = ds->ds_object;
1684 
1685 	if (ds->ds_phys->ds_prev_snap_obj != 0) {
1686 		if (ds->ds_prev) {
1687 			ds_prev = ds->ds_prev;
1688 		} else {
1689 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1690 			    ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1691 		}
1692 		after_branch_point =
1693 		    (ds_prev->ds_phys->ds_next_snap_obj != obj);
1694 
1695 		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1696 		if (after_branch_point &&
1697 		    ds_prev->ds_phys->ds_next_clones_obj != 0) {
1698 			remove_from_next_clones(ds_prev, obj, tx);
1699 			if (ds->ds_phys->ds_next_snap_obj != 0) {
1700 				VERIFY(0 == zap_add_int(mos,
1701 				    ds_prev->ds_phys->ds_next_clones_obj,
1702 				    ds->ds_phys->ds_next_snap_obj, tx));
1703 			}
1704 		}
1705 		if (after_branch_point &&
1706 		    ds->ds_phys->ds_next_snap_obj == 0) {
1707 			/* This clone is toast. */
1708 			ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1709 			ds_prev->ds_phys->ds_num_children--;
1710 
1711 			/*
1712 			 * If the clone's origin has no other clones, no
1713 			 * user holds, and has been marked for deferred
1714 			 * deletion, then we should have done the necessary
1715 			 * destroy setup for it.
1716 			 */
1717 			if (ds_prev->ds_phys->ds_num_children == 1 &&
1718 			    ds_prev->ds_userrefs == 0 &&
1719 			    DS_IS_DEFER_DESTROY(ds_prev)) {
1720 				ASSERT3P(dsda->rm_origin, !=, NULL);
1721 			} else {
1722 				ASSERT3P(dsda->rm_origin, ==, NULL);
1723 			}
1724 		} else if (!after_branch_point) {
1725 			ds_prev->ds_phys->ds_next_snap_obj =
1726 			    ds->ds_phys->ds_next_snap_obj;
1727 		}
1728 	}
1729 
1730 	if (dsl_dataset_is_snapshot(ds)) {
1731 		dsl_dataset_t *ds_next;
1732 		uint64_t old_unique;
1733 		uint64_t used = 0, comp = 0, uncomp = 0;
1734 
1735 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1736 		    ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1737 		ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1738 
1739 		old_unique = ds_next->ds_phys->ds_unique_bytes;
1740 
1741 		dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1742 		ds_next->ds_phys->ds_prev_snap_obj =
1743 		    ds->ds_phys->ds_prev_snap_obj;
1744 		ds_next->ds_phys->ds_prev_snap_txg =
1745 		    ds->ds_phys->ds_prev_snap_txg;
1746 		ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1747 		    ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1748 
1749 
1750 		if (ds_next->ds_deadlist.dl_oldfmt) {
1751 			process_old_deadlist(ds, ds_prev, ds_next,
1752 			    after_branch_point, tx);
1753 		} else {
1754 			/* Adjust prev's unique space. */
1755 			if (ds_prev && !after_branch_point) {
1756 				dsl_deadlist_space_range(&ds_next->ds_deadlist,
1757 				    ds_prev->ds_phys->ds_prev_snap_txg,
1758 				    ds->ds_phys->ds_prev_snap_txg,
1759 				    &used, &comp, &uncomp);
1760 				ds_prev->ds_phys->ds_unique_bytes += used;
1761 			}
1762 
1763 			/* Adjust snapused. */
1764 			dsl_deadlist_space_range(&ds_next->ds_deadlist,
1765 			    ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
1766 			    &used, &comp, &uncomp);
1767 			dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1768 			    -used, -comp, -uncomp, tx);
1769 
1770 			/* Move blocks to be freed to pool's free list. */
1771 			dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
1772 			    &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg,
1773 			    tx);
1774 			dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
1775 			    DD_USED_HEAD, used, comp, uncomp, tx);
1776 
1777 			/* Merge our deadlist into next's and free it. */
1778 			dsl_deadlist_merge(&ds_next->ds_deadlist,
1779 			    ds->ds_phys->ds_deadlist_obj, tx);
1780 		}
1781 		dsl_deadlist_close(&ds->ds_deadlist);
1782 		dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1783 
1784 		/* Collapse range in clone heads */
1785 		dsl_dataset_remove_clones_key(ds,
1786 		    ds->ds_phys->ds_creation_txg, tx);
1787 
1788 		if (dsl_dataset_is_snapshot(ds_next)) {
1789 			dsl_dataset_t *ds_nextnext;
1790 
1791 			/*
1792 			 * Update next's unique to include blocks which
1793 			 * were previously shared by only this snapshot
1794 			 * and it.  Those blocks will be born after the
1795 			 * prev snap and before this snap, and will have
1796 			 * died after the next snap and before the one
1797 			 * after that (ie. be on the snap after next's
1798 			 * deadlist).
1799 			 */
1800 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1801 			    ds_next->ds_phys->ds_next_snap_obj,
1802 			    FTAG, &ds_nextnext));
1803 			dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
1804 			    ds->ds_phys->ds_prev_snap_txg,
1805 			    ds->ds_phys->ds_creation_txg,
1806 			    &used, &comp, &uncomp);
1807 			ds_next->ds_phys->ds_unique_bytes += used;
1808 			dsl_dataset_rele(ds_nextnext, FTAG);
1809 			ASSERT3P(ds_next->ds_prev, ==, NULL);
1810 
1811 			/* Collapse range in this head. */
1812 			dsl_dataset_t *hds;
1813 			VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
1814 			    ds->ds_dir->dd_phys->dd_head_dataset_obj,
1815 			    FTAG, &hds));
1816 			dsl_deadlist_remove_key(&hds->ds_deadlist,
1817 			    ds->ds_phys->ds_creation_txg, tx);
1818 			dsl_dataset_rele(hds, FTAG);
1819 
1820 		} else {
1821 			ASSERT3P(ds_next->ds_prev, ==, ds);
1822 			dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1823 			ds_next->ds_prev = NULL;
1824 			if (ds_prev) {
1825 				VERIFY(0 == dsl_dataset_get_ref(dp,
1826 				    ds->ds_phys->ds_prev_snap_obj,
1827 				    ds_next, &ds_next->ds_prev));
1828 			}
1829 
1830 			dsl_dataset_recalc_head_uniq(ds_next);
1831 
1832 			/*
1833 			 * Reduce the amount of our unconsmed refreservation
1834 			 * being charged to our parent by the amount of
1835 			 * new unique data we have gained.
1836 			 */
1837 			if (old_unique < ds_next->ds_reserved) {
1838 				int64_t mrsdelta;
1839 				uint64_t new_unique =
1840 				    ds_next->ds_phys->ds_unique_bytes;
1841 
1842 				ASSERT(old_unique <= new_unique);
1843 				mrsdelta = MIN(new_unique - old_unique,
1844 				    ds_next->ds_reserved - old_unique);
1845 				dsl_dir_diduse_space(ds->ds_dir,
1846 				    DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1847 			}
1848 		}
1849 		dsl_dataset_rele(ds_next, FTAG);
1850 	} else {
1851 		zfeature_info_t *async_destroy =
1852 		    &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY];
1853 		objset_t *os;
1854 
1855 		/*
1856 		 * There's no next snapshot, so this is a head dataset.
1857 		 * Destroy the deadlist.  Unless it's a clone, the
1858 		 * deadlist should be empty.  (If it's a clone, it's
1859 		 * safe to ignore the deadlist contents.)
1860 		 */
1861 		dsl_deadlist_close(&ds->ds_deadlist);
1862 		dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx);
1863 		ds->ds_phys->ds_deadlist_obj = 0;
1864 
1865 		VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os));
1866 
1867 		if (!spa_feature_is_enabled(dp->dp_spa, async_destroy)) {
1868 			err = old_synchronous_dataset_destroy(ds, tx);
1869 		} else {
1870 			/*
1871 			 * Move the bptree into the pool's list of trees to
1872 			 * clean up and update space accounting information.
1873 			 */
1874 			uint64_t used, comp, uncomp;
1875 
1876 			zil_destroy_sync(dmu_objset_zil(os), tx);
1877 
1878 			if (!spa_feature_is_active(dp->dp_spa, async_destroy)) {
1879 				spa_feature_incr(dp->dp_spa, async_destroy, tx);
1880 				dp->dp_bptree_obj = bptree_alloc(mos, tx);
1881 				VERIFY(zap_add(mos,
1882 				    DMU_POOL_DIRECTORY_OBJECT,
1883 				    DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
1884 				    &dp->dp_bptree_obj, tx) == 0);
1885 			}
1886 
1887 			used = ds->ds_dir->dd_phys->dd_used_bytes;
1888 			comp = ds->ds_dir->dd_phys->dd_compressed_bytes;
1889 			uncomp = ds->ds_dir->dd_phys->dd_uncompressed_bytes;
1890 
1891 			ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1892 			    ds->ds_phys->ds_unique_bytes == used);
1893 
1894 			bptree_add(mos, dp->dp_bptree_obj,
1895 			    &ds->ds_phys->ds_bp, ds->ds_phys->ds_prev_snap_txg,
1896 			    used, comp, uncomp, tx);
1897 			dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1898 			    -used, -comp, -uncomp, tx);
1899 			dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1900 			    used, comp, uncomp, tx);
1901 		}
1902 
1903 		if (ds->ds_prev != NULL) {
1904 			if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1905 				VERIFY3U(0, ==, zap_remove_int(mos,
1906 				    ds->ds_prev->ds_dir->dd_phys->dd_clones,
1907 				    ds->ds_object, tx));
1908 			}
1909 			dsl_dataset_rele(ds->ds_prev, ds);
1910 			ds->ds_prev = ds_prev = NULL;
1911 		}
1912 	}
1913 
1914 	/*
1915 	 * This must be done after the dsl_traverse(), because it will
1916 	 * re-open the objset.
1917 	 */
1918 	if (ds->ds_objset) {
1919 		dmu_objset_evict(ds->ds_objset);
1920 		ds->ds_objset = NULL;
1921 	}
1922 
1923 	if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1924 		/* Erase the link in the dir */
1925 		dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1926 		ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1927 		ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1928 		err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1929 		ASSERT(err == 0);
1930 	} else {
1931 		/* remove from snapshot namespace */
1932 		dsl_dataset_t *ds_head;
1933 		ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1934 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1935 		    ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1936 		VERIFY(0 == dsl_dataset_get_snapname(ds));
1937 #ifdef ZFS_DEBUG
1938 		{
1939 			uint64_t val;
1940 
1941 			err = dsl_dataset_snap_lookup(ds_head,
1942 			    ds->ds_snapname, &val);
1943 			ASSERT0(err);
1944 			ASSERT3U(val, ==, obj);
1945 		}
1946 #endif
1947 		err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1948 		ASSERT(err == 0);
1949 		dsl_dataset_rele(ds_head, FTAG);
1950 	}
1951 
1952 	if (ds_prev && ds->ds_prev != ds_prev)
1953 		dsl_dataset_rele(ds_prev, FTAG);
1954 
1955 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1956 
1957 	if (ds->ds_phys->ds_next_clones_obj != 0) {
1958 		uint64_t count;
1959 		ASSERT(0 == zap_count(mos,
1960 		    ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1961 		VERIFY(0 == dmu_object_free(mos,
1962 		    ds->ds_phys->ds_next_clones_obj, tx));
1963 	}
1964 	if (ds->ds_phys->ds_props_obj != 0)
1965 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1966 	if (ds->ds_phys->ds_userrefs_obj != 0)
1967 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1968 	dsl_dir_close(ds->ds_dir, ds);
1969 	ds->ds_dir = NULL;
1970 	dsl_dataset_drain_refs(ds, tag);
1971 	VERIFY(0 == dmu_object_free(mos, obj, tx));
1972 
1973 	if (dsda->rm_origin) {
1974 		/*
1975 		 * Remove the origin of the clone we just destroyed.
1976 		 */
1977 		struct dsl_ds_destroyarg ndsda = {0};
1978 
1979 		ndsda.ds = dsda->rm_origin;
1980 		dsl_dataset_destroy_sync(&ndsda, tag, tx);
1981 	}
1982 }
1983 
1984 static int
1985 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1986 {
1987 	uint64_t asize;
1988 
1989 	if (!dmu_tx_is_syncing(tx))
1990 		return (0);
1991 
1992 	/*
1993 	 * If there's an fs-only reservation, any blocks that might become
1994 	 * owned by the snapshot dataset must be accommodated by space
1995 	 * outside of the reservation.
1996 	 */
1997 	ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1998 	asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1999 	if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
2000 		return (ENOSPC);
2001 
2002 	/*
2003 	 * Propagate any reserved space for this snapshot to other
2004 	 * snapshot checks in this sync group.
2005 	 */
2006 	if (asize > 0)
2007 		dsl_dir_willuse_space(ds->ds_dir, asize, tx);
2008 
2009 	return (0);
2010 }
2011 
2012 int
2013 dsl_dataset_snapshot_check(dsl_dataset_t *ds, const char *snapname,
2014     dmu_tx_t *tx)
2015 {
2016 	int err;
2017 	uint64_t value;
2018 
2019 	/*
2020 	 * We don't allow multiple snapshots of the same txg.  If there
2021 	 * is already one, try again.
2022 	 */
2023 	if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
2024 		return (EAGAIN);
2025 
2026 	/*
2027 	 * Check for conflicting snapshot name.
2028 	 */
2029 	err = dsl_dataset_snap_lookup(ds, snapname, &value);
2030 	if (err == 0)
2031 		return (EEXIST);
2032 	if (err != ENOENT)
2033 		return (err);
2034 
2035 	/*
2036 	 * Check that the dataset's name is not too long.  Name consists
2037 	 * of the dataset's length + 1 for the @-sign + snapshot name's length
2038 	 */
2039 	if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
2040 		return (ENAMETOOLONG);
2041 
2042 	err = dsl_dataset_snapshot_reserve_space(ds, tx);
2043 	if (err)
2044 		return (err);
2045 
2046 	ds->ds_trysnap_txg = tx->tx_txg;
2047 	return (0);
2048 }
2049 
2050 void
2051 dsl_dataset_snapshot_sync(dsl_dataset_t *ds, const char *snapname,
2052     dmu_tx_t *tx)
2053 {
2054 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2055 	dmu_buf_t *dbuf;
2056 	dsl_dataset_phys_t *dsphys;
2057 	uint64_t dsobj, crtxg;
2058 	objset_t *mos = dp->dp_meta_objset;
2059 	int err;
2060 
2061 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
2062 
2063 	/*
2064 	 * The origin's ds_creation_txg has to be < TXG_INITIAL
2065 	 */
2066 	if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
2067 		crtxg = 1;
2068 	else
2069 		crtxg = tx->tx_txg;
2070 
2071 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
2072 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
2073 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
2074 	dmu_buf_will_dirty(dbuf, tx);
2075 	dsphys = dbuf->db_data;
2076 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
2077 	dsphys->ds_dir_obj = ds->ds_dir->dd_object;
2078 	dsphys->ds_fsid_guid = unique_create();
2079 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
2080 	    sizeof (dsphys->ds_guid));
2081 	dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
2082 	dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
2083 	dsphys->ds_next_snap_obj = ds->ds_object;
2084 	dsphys->ds_num_children = 1;
2085 	dsphys->ds_creation_time = gethrestime_sec();
2086 	dsphys->ds_creation_txg = crtxg;
2087 	dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
2088 	dsphys->ds_referenced_bytes = ds->ds_phys->ds_referenced_bytes;
2089 	dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
2090 	dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
2091 	dsphys->ds_flags = ds->ds_phys->ds_flags;
2092 	dsphys->ds_bp = ds->ds_phys->ds_bp;
2093 	dmu_buf_rele(dbuf, FTAG);
2094 
2095 	ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
2096 	if (ds->ds_prev) {
2097 		uint64_t next_clones_obj =
2098 		    ds->ds_prev->ds_phys->ds_next_clones_obj;
2099 		ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
2100 		    ds->ds_object ||
2101 		    ds->ds_prev->ds_phys->ds_num_children > 1);
2102 		if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
2103 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2104 			ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
2105 			    ds->ds_prev->ds_phys->ds_creation_txg);
2106 			ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
2107 		} else if (next_clones_obj != 0) {
2108 			remove_from_next_clones(ds->ds_prev,
2109 			    dsphys->ds_next_snap_obj, tx);
2110 			VERIFY3U(0, ==, zap_add_int(mos,
2111 			    next_clones_obj, dsobj, tx));
2112 		}
2113 	}
2114 
2115 	/*
2116 	 * If we have a reference-reservation on this dataset, we will
2117 	 * need to increase the amount of refreservation being charged
2118 	 * since our unique space is going to zero.
2119 	 */
2120 	if (ds->ds_reserved) {
2121 		int64_t delta;
2122 		ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
2123 		delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
2124 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
2125 		    delta, 0, 0, tx);
2126 	}
2127 
2128 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2129 	zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu",
2130 	    ds->ds_dir->dd_myname, snapname, dsobj,
2131 	    ds->ds_phys->ds_prev_snap_txg);
2132 	ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist,
2133 	    UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx);
2134 	dsl_deadlist_close(&ds->ds_deadlist);
2135 	dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj);
2136 	dsl_deadlist_add_key(&ds->ds_deadlist,
2137 	    ds->ds_phys->ds_prev_snap_txg, tx);
2138 
2139 	ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
2140 	ds->ds_phys->ds_prev_snap_obj = dsobj;
2141 	ds->ds_phys->ds_prev_snap_txg = crtxg;
2142 	ds->ds_phys->ds_unique_bytes = 0;
2143 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
2144 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
2145 
2146 	err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
2147 	    snapname, 8, 1, &dsobj, tx);
2148 	ASSERT(err == 0);
2149 
2150 	if (ds->ds_prev)
2151 		dsl_dataset_drop_ref(ds->ds_prev, ds);
2152 	VERIFY(0 == dsl_dataset_get_ref(dp,
2153 	    ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
2154 
2155 	dsl_scan_ds_snapshotted(ds, tx);
2156 
2157 	dsl_dir_snap_cmtime_update(ds->ds_dir);
2158 
2159 	spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, "");
2160 }
2161 
2162 void
2163 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
2164 {
2165 	ASSERT(dmu_tx_is_syncing(tx));
2166 	ASSERT(ds->ds_objset != NULL);
2167 	ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
2168 
2169 	/*
2170 	 * in case we had to change ds_fsid_guid when we opened it,
2171 	 * sync it out now.
2172 	 */
2173 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
2174 	ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
2175 
2176 	dmu_objset_sync(ds->ds_objset, zio, tx);
2177 }
2178 
2179 static void
2180 get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
2181 {
2182 	uint64_t count = 0;
2183 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
2184 	zap_cursor_t zc;
2185 	zap_attribute_t za;
2186 	nvlist_t *propval;
2187 	nvlist_t *val;
2188 
2189 	rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2190 	VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2191 	VERIFY(nvlist_alloc(&val, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2192 
2193 	/*
2194 	 * There may me missing entries in ds_next_clones_obj
2195 	 * due to a bug in a previous version of the code.
2196 	 * Only trust it if it has the right number of entries.
2197 	 */
2198 	if (ds->ds_phys->ds_next_clones_obj != 0) {
2199 		ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
2200 		    &count));
2201 	}
2202 	if (count != ds->ds_phys->ds_num_children - 1) {
2203 		goto fail;
2204 	}
2205 	for (zap_cursor_init(&zc, mos, ds->ds_phys->ds_next_clones_obj);
2206 	    zap_cursor_retrieve(&zc, &za) == 0;
2207 	    zap_cursor_advance(&zc)) {
2208 		dsl_dataset_t *clone;
2209 		char buf[ZFS_MAXNAMELEN];
2210 		/*
2211 		 * Even though we hold the dp_config_rwlock, the dataset
2212 		 * may fail to open, returning ENOENT.  If there is a
2213 		 * thread concurrently attempting to destroy this
2214 		 * dataset, it will have the ds_rwlock held for
2215 		 * RW_WRITER.  Our call to dsl_dataset_hold_obj() ->
2216 		 * dsl_dataset_hold_ref() will fail its
2217 		 * rw_tryenter(&ds->ds_rwlock, RW_READER), drop the
2218 		 * dp_config_rwlock, and wait for the destroy progress
2219 		 * and signal ds_exclusive_cv.  If the destroy was
2220 		 * successful, we will see that
2221 		 * DSL_DATASET_IS_DESTROYED(), and return ENOENT.
2222 		 */
2223 		if (dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
2224 		    za.za_first_integer, FTAG, &clone) != 0)
2225 			continue;
2226 		dsl_dir_name(clone->ds_dir, buf);
2227 		VERIFY(nvlist_add_boolean(val, buf) == 0);
2228 		dsl_dataset_rele(clone, FTAG);
2229 	}
2230 	zap_cursor_fini(&zc);
2231 	VERIFY(nvlist_add_nvlist(propval, ZPROP_VALUE, val) == 0);
2232 	VERIFY(nvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
2233 	    propval) == 0);
2234 fail:
2235 	nvlist_free(val);
2236 	nvlist_free(propval);
2237 	rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2238 }
2239 
2240 void
2241 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
2242 {
2243 	uint64_t refd, avail, uobjs, aobjs, ratio;
2244 
2245 	ratio = ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2246 	    (ds->ds_phys->ds_uncompressed_bytes * 100 /
2247 	    ds->ds_phys->ds_compressed_bytes);
2248 
2249 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO, ratio);
2250 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
2251 	    ds->ds_phys->ds_uncompressed_bytes);
2252 
2253 	if (dsl_dataset_is_snapshot(ds)) {
2254 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, ratio);
2255 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2256 		    ds->ds_phys->ds_unique_bytes);
2257 		get_clones_stat(ds, nv);
2258 	} else {
2259 		dsl_dir_stats(ds->ds_dir, nv);
2260 	}
2261 
2262 	dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
2263 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
2264 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
2265 
2266 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
2267 	    ds->ds_phys->ds_creation_time);
2268 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2269 	    ds->ds_phys->ds_creation_txg);
2270 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2271 	    ds->ds_quota);
2272 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2273 	    ds->ds_reserved);
2274 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2275 	    ds->ds_phys->ds_guid);
2276 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2277 	    ds->ds_phys->ds_unique_bytes);
2278 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2279 	    ds->ds_object);
2280 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2281 	    ds->ds_userrefs);
2282 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2283 	    DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2284 
2285 	if (ds->ds_phys->ds_prev_snap_obj != 0) {
2286 		uint64_t written, comp, uncomp;
2287 		dsl_pool_t *dp = ds->ds_dir->dd_pool;
2288 		dsl_dataset_t *prev;
2289 
2290 		rw_enter(&dp->dp_config_rwlock, RW_READER);
2291 		int err = dsl_dataset_hold_obj(dp,
2292 		    ds->ds_phys->ds_prev_snap_obj, FTAG, &prev);
2293 		rw_exit(&dp->dp_config_rwlock);
2294 		if (err == 0) {
2295 			err = dsl_dataset_space_written(prev, ds, &written,
2296 			    &comp, &uncomp);
2297 			dsl_dataset_rele(prev, FTAG);
2298 			if (err == 0) {
2299 				dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
2300 				    written);
2301 			}
2302 		}
2303 	}
2304 }
2305 
2306 void
2307 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2308 {
2309 	stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2310 	stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2311 	stat->dds_guid = ds->ds_phys->ds_guid;
2312 	stat->dds_origin[0] = '\0';
2313 	if (dsl_dataset_is_snapshot(ds)) {
2314 		stat->dds_is_snapshot = B_TRUE;
2315 		stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2316 	} else {
2317 		stat->dds_is_snapshot = B_FALSE;
2318 		stat->dds_num_clones = 0;
2319 
2320 		rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2321 		if (dsl_dir_is_clone(ds->ds_dir)) {
2322 			dsl_dataset_t *ods;
2323 
2324 			VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2325 			    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2326 			dsl_dataset_name(ods, stat->dds_origin);
2327 			dsl_dataset_drop_ref(ods, FTAG);
2328 		}
2329 		rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2330 	}
2331 }
2332 
2333 uint64_t
2334 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2335 {
2336 	return (ds->ds_fsid_guid);
2337 }
2338 
2339 void
2340 dsl_dataset_space(dsl_dataset_t *ds,
2341     uint64_t *refdbytesp, uint64_t *availbytesp,
2342     uint64_t *usedobjsp, uint64_t *availobjsp)
2343 {
2344 	*refdbytesp = ds->ds_phys->ds_referenced_bytes;
2345 	*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2346 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2347 		*availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2348 	if (ds->ds_quota != 0) {
2349 		/*
2350 		 * Adjust available bytes according to refquota
2351 		 */
2352 		if (*refdbytesp < ds->ds_quota)
2353 			*availbytesp = MIN(*availbytesp,
2354 			    ds->ds_quota - *refdbytesp);
2355 		else
2356 			*availbytesp = 0;
2357 	}
2358 	*usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2359 	*availobjsp = DN_MAX_OBJECT - *usedobjsp;
2360 }
2361 
2362 boolean_t
2363 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2364 {
2365 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2366 
2367 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2368 	    dsl_pool_sync_context(dp));
2369 	if (ds->ds_prev == NULL)
2370 		return (B_FALSE);
2371 	if (ds->ds_phys->ds_bp.blk_birth >
2372 	    ds->ds_prev->ds_phys->ds_creation_txg) {
2373 		objset_t *os, *os_prev;
2374 		/*
2375 		 * It may be that only the ZIL differs, because it was
2376 		 * reset in the head.  Don't count that as being
2377 		 * modified.
2378 		 */
2379 		if (dmu_objset_from_ds(ds, &os) != 0)
2380 			return (B_TRUE);
2381 		if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0)
2382 			return (B_TRUE);
2383 		return (bcmp(&os->os_phys->os_meta_dnode,
2384 		    &os_prev->os_phys->os_meta_dnode,
2385 		    sizeof (os->os_phys->os_meta_dnode)) != 0);
2386 	}
2387 	return (B_FALSE);
2388 }
2389 
2390 /* ARGSUSED */
2391 static int
2392 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2393 {
2394 	dsl_dataset_t *ds = arg1;
2395 	char *newsnapname = arg2;
2396 	dsl_dir_t *dd = ds->ds_dir;
2397 	dsl_dataset_t *hds;
2398 	uint64_t val;
2399 	int err;
2400 
2401 	err = dsl_dataset_hold_obj(dd->dd_pool,
2402 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2403 	if (err)
2404 		return (err);
2405 
2406 	/* new name better not be in use */
2407 	err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2408 	dsl_dataset_rele(hds, FTAG);
2409 
2410 	if (err == 0)
2411 		err = EEXIST;
2412 	else if (err == ENOENT)
2413 		err = 0;
2414 
2415 	/* dataset name + 1 for the "@" + the new snapshot name must fit */
2416 	if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2417 		err = ENAMETOOLONG;
2418 
2419 	return (err);
2420 }
2421 
2422 static void
2423 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2424 {
2425 	dsl_dataset_t *ds = arg1;
2426 	const char *newsnapname = arg2;
2427 	dsl_dir_t *dd = ds->ds_dir;
2428 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2429 	dsl_dataset_t *hds;
2430 	int err;
2431 
2432 	ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2433 
2434 	VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2435 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2436 
2437 	VERIFY(0 == dsl_dataset_get_snapname(ds));
2438 	err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2439 	ASSERT0(err);
2440 	mutex_enter(&ds->ds_lock);
2441 	(void) strcpy(ds->ds_snapname, newsnapname);
2442 	mutex_exit(&ds->ds_lock);
2443 	err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2444 	    ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2445 	ASSERT0(err);
2446 
2447 	spa_history_log_internal_ds(ds, "rename", tx,
2448 	    "-> @%s", newsnapname);
2449 	dsl_dataset_rele(hds, FTAG);
2450 }
2451 
2452 struct renamesnaparg {
2453 	dsl_sync_task_group_t *dstg;
2454 	char failed[MAXPATHLEN];
2455 	char *oldsnap;
2456 	char *newsnap;
2457 };
2458 
2459 static int
2460 dsl_snapshot_rename_one(const char *name, void *arg)
2461 {
2462 	struct renamesnaparg *ra = arg;
2463 	dsl_dataset_t *ds = NULL;
2464 	char *snapname;
2465 	int err;
2466 
2467 	snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2468 	(void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2469 
2470 	/*
2471 	 * For recursive snapshot renames the parent won't be changing
2472 	 * so we just pass name for both the to/from argument.
2473 	 */
2474 	err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2475 	if (err != 0) {
2476 		strfree(snapname);
2477 		return (err == ENOENT ? 0 : err);
2478 	}
2479 
2480 #ifdef _KERNEL
2481 	/*
2482 	 * For all filesystems undergoing rename, we'll need to unmount it.
2483 	 */
2484 	(void) zfs_unmount_snap(snapname, NULL);
2485 #endif
2486 	err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2487 	strfree(snapname);
2488 	if (err != 0)
2489 		return (err == ENOENT ? 0 : err);
2490 
2491 	dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2492 	    dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2493 
2494 	return (0);
2495 }
2496 
2497 static int
2498 dsl_recursive_rename(char *oldname, const char *newname)
2499 {
2500 	int err;
2501 	struct renamesnaparg *ra;
2502 	dsl_sync_task_t *dst;
2503 	spa_t *spa;
2504 	char *cp, *fsname = spa_strdup(oldname);
2505 	int len = strlen(oldname) + 1;
2506 
2507 	/* truncate the snapshot name to get the fsname */
2508 	cp = strchr(fsname, '@');
2509 	*cp = '\0';
2510 
2511 	err = spa_open(fsname, &spa, FTAG);
2512 	if (err) {
2513 		kmem_free(fsname, len);
2514 		return (err);
2515 	}
2516 	ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2517 	ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2518 
2519 	ra->oldsnap = strchr(oldname, '@') + 1;
2520 	ra->newsnap = strchr(newname, '@') + 1;
2521 	*ra->failed = '\0';
2522 
2523 	err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2524 	    DS_FIND_CHILDREN);
2525 	kmem_free(fsname, len);
2526 
2527 	if (err == 0) {
2528 		err = dsl_sync_task_group_wait(ra->dstg);
2529 	}
2530 
2531 	for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2532 	    dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2533 		dsl_dataset_t *ds = dst->dst_arg1;
2534 		if (dst->dst_err) {
2535 			dsl_dir_name(ds->ds_dir, ra->failed);
2536 			(void) strlcat(ra->failed, "@", sizeof (ra->failed));
2537 			(void) strlcat(ra->failed, ra->newsnap,
2538 			    sizeof (ra->failed));
2539 		}
2540 		dsl_dataset_rele(ds, ra->dstg);
2541 	}
2542 
2543 	if (err)
2544 		(void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2545 
2546 	dsl_sync_task_group_destroy(ra->dstg);
2547 	kmem_free(ra, sizeof (struct renamesnaparg));
2548 	spa_close(spa, FTAG);
2549 	return (err);
2550 }
2551 
2552 static int
2553 dsl_valid_rename(const char *oldname, void *arg)
2554 {
2555 	int delta = *(int *)arg;
2556 
2557 	if (strlen(oldname) + delta >= MAXNAMELEN)
2558 		return (ENAMETOOLONG);
2559 
2560 	return (0);
2561 }
2562 
2563 #pragma weak dmu_objset_rename = dsl_dataset_rename
2564 int
2565 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2566 {
2567 	dsl_dir_t *dd;
2568 	dsl_dataset_t *ds;
2569 	const char *tail;
2570 	int err;
2571 
2572 	err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2573 	if (err)
2574 		return (err);
2575 
2576 	if (tail == NULL) {
2577 		int delta = strlen(newname) - strlen(oldname);
2578 
2579 		/* if we're growing, validate child name lengths */
2580 		if (delta > 0)
2581 			err = dmu_objset_find(oldname, dsl_valid_rename,
2582 			    &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2583 
2584 		if (err == 0)
2585 			err = dsl_dir_rename(dd, newname);
2586 		dsl_dir_close(dd, FTAG);
2587 		return (err);
2588 	}
2589 
2590 	if (tail[0] != '@') {
2591 		/* the name ended in a nonexistent component */
2592 		dsl_dir_close(dd, FTAG);
2593 		return (ENOENT);
2594 	}
2595 
2596 	dsl_dir_close(dd, FTAG);
2597 
2598 	/* new name must be snapshot in same filesystem */
2599 	tail = strchr(newname, '@');
2600 	if (tail == NULL)
2601 		return (EINVAL);
2602 	tail++;
2603 	if (strncmp(oldname, newname, tail - newname) != 0)
2604 		return (EXDEV);
2605 
2606 	if (recursive) {
2607 		err = dsl_recursive_rename(oldname, newname);
2608 	} else {
2609 		err = dsl_dataset_hold(oldname, FTAG, &ds);
2610 		if (err)
2611 			return (err);
2612 
2613 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2614 		    dsl_dataset_snapshot_rename_check,
2615 		    dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2616 
2617 		dsl_dataset_rele(ds, FTAG);
2618 	}
2619 
2620 	return (err);
2621 }
2622 
2623 struct promotenode {
2624 	list_node_t link;
2625 	dsl_dataset_t *ds;
2626 };
2627 
2628 struct promotearg {
2629 	list_t shared_snaps, origin_snaps, clone_snaps;
2630 	dsl_dataset_t *origin_origin;
2631 	uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2632 	char *err_ds;
2633 };
2634 
2635 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2636 static boolean_t snaplist_unstable(list_t *l);
2637 
2638 static int
2639 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2640 {
2641 	dsl_dataset_t *hds = arg1;
2642 	struct promotearg *pa = arg2;
2643 	struct promotenode *snap = list_head(&pa->shared_snaps);
2644 	dsl_dataset_t *origin_ds = snap->ds;
2645 	int err;
2646 	uint64_t unused;
2647 
2648 	/* Check that it is a real clone */
2649 	if (!dsl_dir_is_clone(hds->ds_dir))
2650 		return (EINVAL);
2651 
2652 	/* Since this is so expensive, don't do the preliminary check */
2653 	if (!dmu_tx_is_syncing(tx))
2654 		return (0);
2655 
2656 	if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2657 		return (EXDEV);
2658 
2659 	/* compute origin's new unique space */
2660 	snap = list_tail(&pa->clone_snaps);
2661 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2662 	dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2663 	    origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2664 	    &pa->unique, &unused, &unused);
2665 
2666 	/*
2667 	 * Walk the snapshots that we are moving
2668 	 *
2669 	 * Compute space to transfer.  Consider the incremental changes
2670 	 * to used for each snapshot:
2671 	 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2672 	 * So each snapshot gave birth to:
2673 	 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2674 	 * So a sequence would look like:
2675 	 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2676 	 * Which simplifies to:
2677 	 * uN + kN + kN-1 + ... + k1 + k0
2678 	 * Note however, if we stop before we reach the ORIGIN we get:
2679 	 * uN + kN + kN-1 + ... + kM - uM-1
2680 	 */
2681 	pa->used = origin_ds->ds_phys->ds_referenced_bytes;
2682 	pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2683 	pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2684 	for (snap = list_head(&pa->shared_snaps); snap;
2685 	    snap = list_next(&pa->shared_snaps, snap)) {
2686 		uint64_t val, dlused, dlcomp, dluncomp;
2687 		dsl_dataset_t *ds = snap->ds;
2688 
2689 		/* Check that the snapshot name does not conflict */
2690 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2691 		err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2692 		if (err == 0) {
2693 			err = EEXIST;
2694 			goto out;
2695 		}
2696 		if (err != ENOENT)
2697 			goto out;
2698 
2699 		/* The very first snapshot does not have a deadlist */
2700 		if (ds->ds_phys->ds_prev_snap_obj == 0)
2701 			continue;
2702 
2703 		dsl_deadlist_space(&ds->ds_deadlist,
2704 		    &dlused, &dlcomp, &dluncomp);
2705 		pa->used += dlused;
2706 		pa->comp += dlcomp;
2707 		pa->uncomp += dluncomp;
2708 	}
2709 
2710 	/*
2711 	 * If we are a clone of a clone then we never reached ORIGIN,
2712 	 * so we need to subtract out the clone origin's used space.
2713 	 */
2714 	if (pa->origin_origin) {
2715 		pa->used -= pa->origin_origin->ds_phys->ds_referenced_bytes;
2716 		pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2717 		pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2718 	}
2719 
2720 	/* Check that there is enough space here */
2721 	err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2722 	    pa->used);
2723 	if (err)
2724 		return (err);
2725 
2726 	/*
2727 	 * Compute the amounts of space that will be used by snapshots
2728 	 * after the promotion (for both origin and clone).  For each,
2729 	 * it is the amount of space that will be on all of their
2730 	 * deadlists (that was not born before their new origin).
2731 	 */
2732 	if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2733 		uint64_t space;
2734 
2735 		/*
2736 		 * Note, typically this will not be a clone of a clone,
2737 		 * so dd_origin_txg will be < TXG_INITIAL, so
2738 		 * these snaplist_space() -> dsl_deadlist_space_range()
2739 		 * calls will be fast because they do not have to
2740 		 * iterate over all bps.
2741 		 */
2742 		snap = list_head(&pa->origin_snaps);
2743 		err = snaplist_space(&pa->shared_snaps,
2744 		    snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2745 		if (err)
2746 			return (err);
2747 
2748 		err = snaplist_space(&pa->clone_snaps,
2749 		    snap->ds->ds_dir->dd_origin_txg, &space);
2750 		if (err)
2751 			return (err);
2752 		pa->cloneusedsnap += space;
2753 	}
2754 	if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2755 		err = snaplist_space(&pa->origin_snaps,
2756 		    origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2757 		if (err)
2758 			return (err);
2759 	}
2760 
2761 	return (0);
2762 out:
2763 	pa->err_ds =  snap->ds->ds_snapname;
2764 	return (err);
2765 }
2766 
2767 static void
2768 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2769 {
2770 	dsl_dataset_t *hds = arg1;
2771 	struct promotearg *pa = arg2;
2772 	struct promotenode *snap = list_head(&pa->shared_snaps);
2773 	dsl_dataset_t *origin_ds = snap->ds;
2774 	dsl_dataset_t *origin_head;
2775 	dsl_dir_t *dd = hds->ds_dir;
2776 	dsl_pool_t *dp = hds->ds_dir->dd_pool;
2777 	dsl_dir_t *odd = NULL;
2778 	uint64_t oldnext_obj;
2779 	int64_t delta;
2780 
2781 	ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2782 
2783 	snap = list_head(&pa->origin_snaps);
2784 	origin_head = snap->ds;
2785 
2786 	/*
2787 	 * We need to explicitly open odd, since origin_ds's dd will be
2788 	 * changing.
2789 	 */
2790 	VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2791 	    NULL, FTAG, &odd));
2792 
2793 	/* change origin's next snap */
2794 	dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2795 	oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2796 	snap = list_tail(&pa->clone_snaps);
2797 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2798 	origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2799 
2800 	/* change the origin's next clone */
2801 	if (origin_ds->ds_phys->ds_next_clones_obj) {
2802 		remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2803 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2804 		    origin_ds->ds_phys->ds_next_clones_obj,
2805 		    oldnext_obj, tx));
2806 	}
2807 
2808 	/* change origin */
2809 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
2810 	ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2811 	dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2812 	dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2813 	dmu_buf_will_dirty(odd->dd_dbuf, tx);
2814 	odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2815 	origin_head->ds_dir->dd_origin_txg =
2816 	    origin_ds->ds_phys->ds_creation_txg;
2817 
2818 	/* change dd_clone entries */
2819 	if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2820 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2821 		    odd->dd_phys->dd_clones, hds->ds_object, tx));
2822 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2823 		    pa->origin_origin->ds_dir->dd_phys->dd_clones,
2824 		    hds->ds_object, tx));
2825 
2826 		VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
2827 		    pa->origin_origin->ds_dir->dd_phys->dd_clones,
2828 		    origin_head->ds_object, tx));
2829 		if (dd->dd_phys->dd_clones == 0) {
2830 			dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset,
2831 			    DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
2832 		}
2833 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2834 		    dd->dd_phys->dd_clones, origin_head->ds_object, tx));
2835 
2836 	}
2837 
2838 	/* move snapshots to this dir */
2839 	for (snap = list_head(&pa->shared_snaps); snap;
2840 	    snap = list_next(&pa->shared_snaps, snap)) {
2841 		dsl_dataset_t *ds = snap->ds;
2842 
2843 		/* unregister props as dsl_dir is changing */
2844 		if (ds->ds_objset) {
2845 			dmu_objset_evict(ds->ds_objset);
2846 			ds->ds_objset = NULL;
2847 		}
2848 		/* move snap name entry */
2849 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2850 		VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2851 		    ds->ds_snapname, tx));
2852 		VERIFY(0 == zap_add(dp->dp_meta_objset,
2853 		    hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2854 		    8, 1, &ds->ds_object, tx));
2855 
2856 		/* change containing dsl_dir */
2857 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2858 		ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2859 		ds->ds_phys->ds_dir_obj = dd->dd_object;
2860 		ASSERT3P(ds->ds_dir, ==, odd);
2861 		dsl_dir_close(ds->ds_dir, ds);
2862 		VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2863 		    NULL, ds, &ds->ds_dir));
2864 
2865 		/* move any clone references */
2866 		if (ds->ds_phys->ds_next_clones_obj &&
2867 		    spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
2868 			zap_cursor_t zc;
2869 			zap_attribute_t za;
2870 
2871 			for (zap_cursor_init(&zc, dp->dp_meta_objset,
2872 			    ds->ds_phys->ds_next_clones_obj);
2873 			    zap_cursor_retrieve(&zc, &za) == 0;
2874 			    zap_cursor_advance(&zc)) {
2875 				dsl_dataset_t *cnds;
2876 				uint64_t o;
2877 
2878 				if (za.za_first_integer == oldnext_obj) {
2879 					/*
2880 					 * We've already moved the
2881 					 * origin's reference.
2882 					 */
2883 					continue;
2884 				}
2885 
2886 				VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
2887 				    za.za_first_integer, FTAG, &cnds));
2888 				o = cnds->ds_dir->dd_phys->dd_head_dataset_obj;
2889 
2890 				VERIFY3U(zap_remove_int(dp->dp_meta_objset,
2891 				    odd->dd_phys->dd_clones, o, tx), ==, 0);
2892 				VERIFY3U(zap_add_int(dp->dp_meta_objset,
2893 				    dd->dd_phys->dd_clones, o, tx), ==, 0);
2894 				dsl_dataset_rele(cnds, FTAG);
2895 			}
2896 			zap_cursor_fini(&zc);
2897 		}
2898 
2899 		ASSERT0(dsl_prop_numcb(ds));
2900 	}
2901 
2902 	/*
2903 	 * Change space accounting.
2904 	 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2905 	 * both be valid, or both be 0 (resulting in delta == 0).  This
2906 	 * is true for each of {clone,origin} independently.
2907 	 */
2908 
2909 	delta = pa->cloneusedsnap -
2910 	    dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2911 	ASSERT3S(delta, >=, 0);
2912 	ASSERT3U(pa->used, >=, delta);
2913 	dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2914 	dsl_dir_diduse_space(dd, DD_USED_HEAD,
2915 	    pa->used - delta, pa->comp, pa->uncomp, tx);
2916 
2917 	delta = pa->originusedsnap -
2918 	    odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2919 	ASSERT3S(delta, <=, 0);
2920 	ASSERT3U(pa->used, >=, -delta);
2921 	dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2922 	dsl_dir_diduse_space(odd, DD_USED_HEAD,
2923 	    -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2924 
2925 	origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2926 
2927 	/* log history record */
2928 	spa_history_log_internal_ds(hds, "promote", tx, "");
2929 
2930 	dsl_dir_close(odd, FTAG);
2931 }
2932 
2933 static char *snaplist_tag = "snaplist";
2934 /*
2935  * Make a list of dsl_dataset_t's for the snapshots between first_obj
2936  * (exclusive) and last_obj (inclusive).  The list will be in reverse
2937  * order (last_obj will be the list_head()).  If first_obj == 0, do all
2938  * snapshots back to this dataset's origin.
2939  */
2940 static int
2941 snaplist_make(dsl_pool_t *dp, boolean_t own,
2942     uint64_t first_obj, uint64_t last_obj, list_t *l)
2943 {
2944 	uint64_t obj = last_obj;
2945 
2946 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2947 
2948 	list_create(l, sizeof (struct promotenode),
2949 	    offsetof(struct promotenode, link));
2950 
2951 	while (obj != first_obj) {
2952 		dsl_dataset_t *ds;
2953 		struct promotenode *snap;
2954 		int err;
2955 
2956 		if (own) {
2957 			err = dsl_dataset_own_obj(dp, obj,
2958 			    0, snaplist_tag, &ds);
2959 			if (err == 0)
2960 				dsl_dataset_make_exclusive(ds, snaplist_tag);
2961 		} else {
2962 			err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2963 		}
2964 		if (err == ENOENT) {
2965 			/* lost race with snapshot destroy */
2966 			struct promotenode *last = list_tail(l);
2967 			ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2968 			obj = last->ds->ds_phys->ds_prev_snap_obj;
2969 			continue;
2970 		} else if (err) {
2971 			return (err);
2972 		}
2973 
2974 		if (first_obj == 0)
2975 			first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2976 
2977 		snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2978 		snap->ds = ds;
2979 		list_insert_tail(l, snap);
2980 		obj = ds->ds_phys->ds_prev_snap_obj;
2981 	}
2982 
2983 	return (0);
2984 }
2985 
2986 static int
2987 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2988 {
2989 	struct promotenode *snap;
2990 
2991 	*spacep = 0;
2992 	for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2993 		uint64_t used, comp, uncomp;
2994 		dsl_deadlist_space_range(&snap->ds->ds_deadlist,
2995 		    mintxg, UINT64_MAX, &used, &comp, &uncomp);
2996 		*spacep += used;
2997 	}
2998 	return (0);
2999 }
3000 
3001 static void
3002 snaplist_destroy(list_t *l, boolean_t own)
3003 {
3004 	struct promotenode *snap;
3005 
3006 	if (!l || !list_link_active(&l->list_head))
3007 		return;
3008 
3009 	while ((snap = list_tail(l)) != NULL) {
3010 		list_remove(l, snap);
3011 		if (own)
3012 			dsl_dataset_disown(snap->ds, snaplist_tag);
3013 		else
3014 			dsl_dataset_rele(snap->ds, snaplist_tag);
3015 		kmem_free(snap, sizeof (struct promotenode));
3016 	}
3017 	list_destroy(l);
3018 }
3019 
3020 /*
3021  * Promote a clone.  Nomenclature note:
3022  * "clone" or "cds": the original clone which is being promoted
3023  * "origin" or "ods": the snapshot which is originally clone's origin
3024  * "origin head" or "ohds": the dataset which is the head
3025  * (filesystem/volume) for the origin
3026  * "origin origin": the origin of the origin's filesystem (typically
3027  * NULL, indicating that the clone is not a clone of a clone).
3028  */
3029 int
3030 dsl_dataset_promote(const char *name, char *conflsnap)
3031 {
3032 	dsl_dataset_t *ds;
3033 	dsl_dir_t *dd;
3034 	dsl_pool_t *dp;
3035 	dmu_object_info_t doi;
3036 	struct promotearg pa = { 0 };
3037 	struct promotenode *snap;
3038 	int err;
3039 
3040 	err = dsl_dataset_hold(name, FTAG, &ds);
3041 	if (err)
3042 		return (err);
3043 	dd = ds->ds_dir;
3044 	dp = dd->dd_pool;
3045 
3046 	err = dmu_object_info(dp->dp_meta_objset,
3047 	    ds->ds_phys->ds_snapnames_zapobj, &doi);
3048 	if (err) {
3049 		dsl_dataset_rele(ds, FTAG);
3050 		return (err);
3051 	}
3052 
3053 	if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
3054 		dsl_dataset_rele(ds, FTAG);
3055 		return (EINVAL);
3056 	}
3057 
3058 	/*
3059 	 * We are going to inherit all the snapshots taken before our
3060 	 * origin (i.e., our new origin will be our parent's origin).
3061 	 * Take ownership of them so that we can rename them into our
3062 	 * namespace.
3063 	 */
3064 	rw_enter(&dp->dp_config_rwlock, RW_READER);
3065 
3066 	err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
3067 	    &pa.shared_snaps);
3068 	if (err != 0)
3069 		goto out;
3070 
3071 	err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
3072 	if (err != 0)
3073 		goto out;
3074 
3075 	snap = list_head(&pa.shared_snaps);
3076 	ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
3077 	err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
3078 	    snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
3079 	if (err != 0)
3080 		goto out;
3081 
3082 	if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) {
3083 		err = dsl_dataset_hold_obj(dp,
3084 		    snap->ds->ds_dir->dd_phys->dd_origin_obj,
3085 		    FTAG, &pa.origin_origin);
3086 		if (err != 0)
3087 			goto out;
3088 	}
3089 
3090 out:
3091 	rw_exit(&dp->dp_config_rwlock);
3092 
3093 	/*
3094 	 * Add in 128x the snapnames zapobj size, since we will be moving
3095 	 * a bunch of snapnames to the promoted ds, and dirtying their
3096 	 * bonus buffers.
3097 	 */
3098 	if (err == 0) {
3099 		err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
3100 		    dsl_dataset_promote_sync, ds, &pa,
3101 		    2 + 2 * doi.doi_physical_blocks_512);
3102 		if (err && pa.err_ds && conflsnap)
3103 			(void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
3104 	}
3105 
3106 	snaplist_destroy(&pa.shared_snaps, B_TRUE);
3107 	snaplist_destroy(&pa.clone_snaps, B_FALSE);
3108 	snaplist_destroy(&pa.origin_snaps, B_FALSE);
3109 	if (pa.origin_origin)
3110 		dsl_dataset_rele(pa.origin_origin, FTAG);
3111 	dsl_dataset_rele(ds, FTAG);
3112 	return (err);
3113 }
3114 
3115 struct cloneswaparg {
3116 	dsl_dataset_t *cds; /* clone dataset */
3117 	dsl_dataset_t *ohds; /* origin's head dataset */
3118 	boolean_t force;
3119 	int64_t unused_refres_delta; /* change in unconsumed refreservation */
3120 };
3121 
3122 /* ARGSUSED */
3123 static int
3124 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
3125 {
3126 	struct cloneswaparg *csa = arg1;
3127 
3128 	/* they should both be heads */
3129 	if (dsl_dataset_is_snapshot(csa->cds) ||
3130 	    dsl_dataset_is_snapshot(csa->ohds))
3131 		return (EINVAL);
3132 
3133 	/* the branch point should be just before them */
3134 	if (csa->cds->ds_prev != csa->ohds->ds_prev)
3135 		return (EINVAL);
3136 
3137 	/* cds should be the clone (unless they are unrelated) */
3138 	if (csa->cds->ds_prev != NULL &&
3139 	    csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
3140 	    csa->ohds->ds_object !=
3141 	    csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
3142 		return (EINVAL);
3143 
3144 	/* the clone should be a child of the origin */
3145 	if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
3146 		return (EINVAL);
3147 
3148 	/* ohds shouldn't be modified unless 'force' */
3149 	if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
3150 		return (ETXTBSY);
3151 
3152 	/* adjust amount of any unconsumed refreservation */
3153 	csa->unused_refres_delta =
3154 	    (int64_t)MIN(csa->ohds->ds_reserved,
3155 	    csa->ohds->ds_phys->ds_unique_bytes) -
3156 	    (int64_t)MIN(csa->ohds->ds_reserved,
3157 	    csa->cds->ds_phys->ds_unique_bytes);
3158 
3159 	if (csa->unused_refres_delta > 0 &&
3160 	    csa->unused_refres_delta >
3161 	    dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
3162 		return (ENOSPC);
3163 
3164 	if (csa->ohds->ds_quota != 0 &&
3165 	    csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
3166 		return (EDQUOT);
3167 
3168 	return (0);
3169 }
3170 
3171 /* ARGSUSED */
3172 static void
3173 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3174 {
3175 	struct cloneswaparg *csa = arg1;
3176 	dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
3177 
3178 	ASSERT(csa->cds->ds_reserved == 0);
3179 	ASSERT(csa->ohds->ds_quota == 0 ||
3180 	    csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
3181 
3182 	dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
3183 	dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
3184 
3185 	if (csa->cds->ds_objset != NULL) {
3186 		dmu_objset_evict(csa->cds->ds_objset);
3187 		csa->cds->ds_objset = NULL;
3188 	}
3189 
3190 	if (csa->ohds->ds_objset != NULL) {
3191 		dmu_objset_evict(csa->ohds->ds_objset);
3192 		csa->ohds->ds_objset = NULL;
3193 	}
3194 
3195 	/*
3196 	 * Reset origin's unique bytes, if it exists.
3197 	 */
3198 	if (csa->cds->ds_prev) {
3199 		dsl_dataset_t *origin = csa->cds->ds_prev;
3200 		uint64_t comp, uncomp;
3201 
3202 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
3203 		dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3204 		    origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
3205 		    &origin->ds_phys->ds_unique_bytes, &comp, &uncomp);
3206 	}
3207 
3208 	/* swap blkptrs */
3209 	{
3210 		blkptr_t tmp;
3211 		tmp = csa->ohds->ds_phys->ds_bp;
3212 		csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
3213 		csa->cds->ds_phys->ds_bp = tmp;
3214 	}
3215 
3216 	/* set dd_*_bytes */
3217 	{
3218 		int64_t dused, dcomp, duncomp;
3219 		uint64_t cdl_used, cdl_comp, cdl_uncomp;
3220 		uint64_t odl_used, odl_comp, odl_uncomp;
3221 
3222 		ASSERT3U(csa->cds->ds_dir->dd_phys->
3223 		    dd_used_breakdown[DD_USED_SNAP], ==, 0);
3224 
3225 		dsl_deadlist_space(&csa->cds->ds_deadlist,
3226 		    &cdl_used, &cdl_comp, &cdl_uncomp);
3227 		dsl_deadlist_space(&csa->ohds->ds_deadlist,
3228 		    &odl_used, &odl_comp, &odl_uncomp);
3229 
3230 		dused = csa->cds->ds_phys->ds_referenced_bytes + cdl_used -
3231 		    (csa->ohds->ds_phys->ds_referenced_bytes + odl_used);
3232 		dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
3233 		    (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
3234 		duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
3235 		    cdl_uncomp -
3236 		    (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
3237 
3238 		dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
3239 		    dused, dcomp, duncomp, tx);
3240 		dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
3241 		    -dused, -dcomp, -duncomp, tx);
3242 
3243 		/*
3244 		 * The difference in the space used by snapshots is the
3245 		 * difference in snapshot space due to the head's
3246 		 * deadlist (since that's the only thing that's
3247 		 * changing that affects the snapused).
3248 		 */
3249 		dsl_deadlist_space_range(&csa->cds->ds_deadlist,
3250 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3251 		    &cdl_used, &cdl_comp, &cdl_uncomp);
3252 		dsl_deadlist_space_range(&csa->ohds->ds_deadlist,
3253 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX,
3254 		    &odl_used, &odl_comp, &odl_uncomp);
3255 		dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
3256 		    DD_USED_HEAD, DD_USED_SNAP, tx);
3257 	}
3258 
3259 	/* swap ds_*_bytes */
3260 	SWITCH64(csa->ohds->ds_phys->ds_referenced_bytes,
3261 	    csa->cds->ds_phys->ds_referenced_bytes);
3262 	SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
3263 	    csa->cds->ds_phys->ds_compressed_bytes);
3264 	SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
3265 	    csa->cds->ds_phys->ds_uncompressed_bytes);
3266 	SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
3267 	    csa->cds->ds_phys->ds_unique_bytes);
3268 
3269 	/* apply any parent delta for change in unconsumed refreservation */
3270 	dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
3271 	    csa->unused_refres_delta, 0, 0, tx);
3272 
3273 	/*
3274 	 * Swap deadlists.
3275 	 */
3276 	dsl_deadlist_close(&csa->cds->ds_deadlist);
3277 	dsl_deadlist_close(&csa->ohds->ds_deadlist);
3278 	SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
3279 	    csa->cds->ds_phys->ds_deadlist_obj);
3280 	dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
3281 	    csa->cds->ds_phys->ds_deadlist_obj);
3282 	dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
3283 	    csa->ohds->ds_phys->ds_deadlist_obj);
3284 
3285 	dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
3286 
3287 	spa_history_log_internal_ds(csa->cds, "clone swap", tx,
3288 	    "parent=%s", csa->ohds->ds_dir->dd_myname);
3289 }
3290 
3291 /*
3292  * Swap 'clone' with its origin head datasets.  Used at the end of "zfs
3293  * recv" into an existing fs to swizzle the file system to the new
3294  * version, and by "zfs rollback".  Can also be used to swap two
3295  * independent head datasets if neither has any snapshots.
3296  */
3297 int
3298 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
3299     boolean_t force)
3300 {
3301 	struct cloneswaparg csa;
3302 	int error;
3303 
3304 	ASSERT(clone->ds_owner);
3305 	ASSERT(origin_head->ds_owner);
3306 retry:
3307 	/*
3308 	 * Need exclusive access for the swap. If we're swapping these
3309 	 * datasets back after an error, we already hold the locks.
3310 	 */
3311 	if (!RW_WRITE_HELD(&clone->ds_rwlock))
3312 		rw_enter(&clone->ds_rwlock, RW_WRITER);
3313 	if (!RW_WRITE_HELD(&origin_head->ds_rwlock) &&
3314 	    !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
3315 		rw_exit(&clone->ds_rwlock);
3316 		rw_enter(&origin_head->ds_rwlock, RW_WRITER);
3317 		if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
3318 			rw_exit(&origin_head->ds_rwlock);
3319 			goto retry;
3320 		}
3321 	}
3322 	csa.cds = clone;
3323 	csa.ohds = origin_head;
3324 	csa.force = force;
3325 	error = dsl_sync_task_do(clone->ds_dir->dd_pool,
3326 	    dsl_dataset_clone_swap_check,
3327 	    dsl_dataset_clone_swap_sync, &csa, NULL, 9);
3328 	return (error);
3329 }
3330 
3331 /*
3332  * Given a pool name and a dataset object number in that pool,
3333  * return the name of that dataset.
3334  */
3335 int
3336 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
3337 {
3338 	spa_t *spa;
3339 	dsl_pool_t *dp;
3340 	dsl_dataset_t *ds;
3341 	int error;
3342 
3343 	if ((error = spa_open(pname, &spa, FTAG)) != 0)
3344 		return (error);
3345 	dp = spa_get_dsl(spa);
3346 	rw_enter(&dp->dp_config_rwlock, RW_READER);
3347 	if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3348 		dsl_dataset_name(ds, buf);
3349 		dsl_dataset_rele(ds, FTAG);
3350 	}
3351 	rw_exit(&dp->dp_config_rwlock);
3352 	spa_close(spa, FTAG);
3353 
3354 	return (error);
3355 }
3356 
3357 int
3358 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3359     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3360 {
3361 	int error = 0;
3362 
3363 	ASSERT3S(asize, >, 0);
3364 
3365 	/*
3366 	 * *ref_rsrv is the portion of asize that will come from any
3367 	 * unconsumed refreservation space.
3368 	 */
3369 	*ref_rsrv = 0;
3370 
3371 	mutex_enter(&ds->ds_lock);
3372 	/*
3373 	 * Make a space adjustment for reserved bytes.
3374 	 */
3375 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3376 		ASSERT3U(*used, >=,
3377 		    ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3378 		*used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3379 		*ref_rsrv =
3380 		    asize - MIN(asize, parent_delta(ds, asize + inflight));
3381 	}
3382 
3383 	if (!check_quota || ds->ds_quota == 0) {
3384 		mutex_exit(&ds->ds_lock);
3385 		return (0);
3386 	}
3387 	/*
3388 	 * If they are requesting more space, and our current estimate
3389 	 * is over quota, they get to try again unless the actual
3390 	 * on-disk is over quota and there are no pending changes (which
3391 	 * may free up space for us).
3392 	 */
3393 	if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
3394 		if (inflight > 0 ||
3395 		    ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
3396 			error = ERESTART;
3397 		else
3398 			error = EDQUOT;
3399 	}
3400 	mutex_exit(&ds->ds_lock);
3401 
3402 	return (error);
3403 }
3404 
3405 /* ARGSUSED */
3406 static int
3407 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3408 {
3409 	dsl_dataset_t *ds = arg1;
3410 	dsl_prop_setarg_t *psa = arg2;
3411 	int err;
3412 
3413 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3414 		return (ENOTSUP);
3415 
3416 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3417 		return (err);
3418 
3419 	if (psa->psa_effective_value == 0)
3420 		return (0);
3421 
3422 	if (psa->psa_effective_value < ds->ds_phys->ds_referenced_bytes ||
3423 	    psa->psa_effective_value < ds->ds_reserved)
3424 		return (ENOSPC);
3425 
3426 	return (0);
3427 }
3428 
3429 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3430 
3431 void
3432 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3433 {
3434 	dsl_dataset_t *ds = arg1;
3435 	dsl_prop_setarg_t *psa = arg2;
3436 	uint64_t effective_value = psa->psa_effective_value;
3437 
3438 	dsl_prop_set_sync(ds, psa, tx);
3439 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3440 
3441 	if (ds->ds_quota != effective_value) {
3442 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3443 		ds->ds_quota = effective_value;
3444 	}
3445 }
3446 
3447 int
3448 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3449 {
3450 	dsl_dataset_t *ds;
3451 	dsl_prop_setarg_t psa;
3452 	int err;
3453 
3454 	dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3455 
3456 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3457 	if (err)
3458 		return (err);
3459 
3460 	/*
3461 	 * If someone removes a file, then tries to set the quota, we
3462 	 * want to make sure the file freeing takes effect.
3463 	 */
3464 	txg_wait_open(ds->ds_dir->dd_pool, 0);
3465 
3466 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3467 	    dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3468 	    ds, &psa, 0);
3469 
3470 	dsl_dataset_rele(ds, FTAG);
3471 	return (err);
3472 }
3473 
3474 static int
3475 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3476 {
3477 	dsl_dataset_t *ds = arg1;
3478 	dsl_prop_setarg_t *psa = arg2;
3479 	uint64_t effective_value;
3480 	uint64_t unique;
3481 	int err;
3482 
3483 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3484 	    SPA_VERSION_REFRESERVATION)
3485 		return (ENOTSUP);
3486 
3487 	if (dsl_dataset_is_snapshot(ds))
3488 		return (EINVAL);
3489 
3490 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3491 		return (err);
3492 
3493 	effective_value = psa->psa_effective_value;
3494 
3495 	/*
3496 	 * If we are doing the preliminary check in open context, the
3497 	 * space estimates may be inaccurate.
3498 	 */
3499 	if (!dmu_tx_is_syncing(tx))
3500 		return (0);
3501 
3502 	mutex_enter(&ds->ds_lock);
3503 	if (!DS_UNIQUE_IS_ACCURATE(ds))
3504 		dsl_dataset_recalc_head_uniq(ds);
3505 	unique = ds->ds_phys->ds_unique_bytes;
3506 	mutex_exit(&ds->ds_lock);
3507 
3508 	if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3509 		uint64_t delta = MAX(unique, effective_value) -
3510 		    MAX(unique, ds->ds_reserved);
3511 
3512 		if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3513 			return (ENOSPC);
3514 		if (ds->ds_quota > 0 &&
3515 		    effective_value > ds->ds_quota)
3516 			return (ENOSPC);
3517 	}
3518 
3519 	return (0);
3520 }
3521 
3522 static void
3523 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3524 {
3525 	dsl_dataset_t *ds = arg1;
3526 	dsl_prop_setarg_t *psa = arg2;
3527 	uint64_t effective_value = psa->psa_effective_value;
3528 	uint64_t unique;
3529 	int64_t delta;
3530 
3531 	dsl_prop_set_sync(ds, psa, tx);
3532 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3533 
3534 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
3535 
3536 	mutex_enter(&ds->ds_dir->dd_lock);
3537 	mutex_enter(&ds->ds_lock);
3538 	ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3539 	unique = ds->ds_phys->ds_unique_bytes;
3540 	delta = MAX(0, (int64_t)(effective_value - unique)) -
3541 	    MAX(0, (int64_t)(ds->ds_reserved - unique));
3542 	ds->ds_reserved = effective_value;
3543 	mutex_exit(&ds->ds_lock);
3544 
3545 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3546 	mutex_exit(&ds->ds_dir->dd_lock);
3547 }
3548 
3549 int
3550 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3551     uint64_t reservation)
3552 {
3553 	dsl_dataset_t *ds;
3554 	dsl_prop_setarg_t psa;
3555 	int err;
3556 
3557 	dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3558 	    &reservation);
3559 
3560 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3561 	if (err)
3562 		return (err);
3563 
3564 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3565 	    dsl_dataset_set_reservation_check,
3566 	    dsl_dataset_set_reservation_sync, ds, &psa, 0);
3567 
3568 	dsl_dataset_rele(ds, FTAG);
3569 	return (err);
3570 }
3571 
3572 typedef struct zfs_hold_cleanup_arg {
3573 	dsl_pool_t *dp;
3574 	uint64_t dsobj;
3575 	char htag[MAXNAMELEN];
3576 } zfs_hold_cleanup_arg_t;
3577 
3578 static void
3579 dsl_dataset_user_release_onexit(void *arg)
3580 {
3581 	zfs_hold_cleanup_arg_t *ca = arg;
3582 
3583 	(void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag,
3584 	    B_TRUE);
3585 	kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
3586 }
3587 
3588 void
3589 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag,
3590     minor_t minor)
3591 {
3592 	zfs_hold_cleanup_arg_t *ca;
3593 
3594 	ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP);
3595 	ca->dp = ds->ds_dir->dd_pool;
3596 	ca->dsobj = ds->ds_object;
3597 	(void) strlcpy(ca->htag, htag, sizeof (ca->htag));
3598 	VERIFY3U(0, ==, zfs_onexit_add_cb(minor,
3599 	    dsl_dataset_user_release_onexit, ca, NULL));
3600 }
3601 
3602 /*
3603  * If you add new checks here, you may need to add
3604  * additional checks to the "temporary" case in
3605  * snapshot_check() in dmu_objset.c.
3606  */
3607 static int
3608 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3609 {
3610 	dsl_dataset_t *ds = arg1;
3611 	struct dsl_ds_holdarg *ha = arg2;
3612 	const char *htag = ha->htag;
3613 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3614 	int error = 0;
3615 
3616 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3617 		return (ENOTSUP);
3618 
3619 	if (!dsl_dataset_is_snapshot(ds))
3620 		return (EINVAL);
3621 
3622 	/* tags must be unique */
3623 	mutex_enter(&ds->ds_lock);
3624 	if (ds->ds_phys->ds_userrefs_obj) {
3625 		error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3626 		    8, 1, tx);
3627 		if (error == 0)
3628 			error = EEXIST;
3629 		else if (error == ENOENT)
3630 			error = 0;
3631 	}
3632 	mutex_exit(&ds->ds_lock);
3633 
3634 	if (error == 0 && ha->temphold &&
3635 	    strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3636 		error = E2BIG;
3637 
3638 	return (error);
3639 }
3640 
3641 void
3642 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3643 {
3644 	dsl_dataset_t *ds = arg1;
3645 	struct dsl_ds_holdarg *ha = arg2;
3646 	const char *htag = ha->htag;
3647 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3648 	objset_t *mos = dp->dp_meta_objset;
3649 	uint64_t now = gethrestime_sec();
3650 	uint64_t zapobj;
3651 
3652 	mutex_enter(&ds->ds_lock);
3653 	if (ds->ds_phys->ds_userrefs_obj == 0) {
3654 		/*
3655 		 * This is the first user hold for this dataset.  Create
3656 		 * the userrefs zap object.
3657 		 */
3658 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3659 		zapobj = ds->ds_phys->ds_userrefs_obj =
3660 		    zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3661 	} else {
3662 		zapobj = ds->ds_phys->ds_userrefs_obj;
3663 	}
3664 	ds->ds_userrefs++;
3665 	mutex_exit(&ds->ds_lock);
3666 
3667 	VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3668 
3669 	if (ha->temphold) {
3670 		VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3671 		    htag, &now, tx));
3672 	}
3673 
3674 	spa_history_log_internal_ds(ds, "hold", tx,
3675 	    "tag = %s temp = %d holds now = %llu",
3676 	    htag, (int)ha->temphold, ds->ds_userrefs);
3677 }
3678 
3679 static int
3680 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3681 {
3682 	struct dsl_ds_holdarg *ha = arg;
3683 	dsl_dataset_t *ds;
3684 	int error;
3685 	char *name;
3686 
3687 	/* alloc a buffer to hold dsname@snapname plus terminating NULL */
3688 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3689 	error = dsl_dataset_hold(name, ha->dstg, &ds);
3690 	strfree(name);
3691 	if (error == 0) {
3692 		ha->gotone = B_TRUE;
3693 		dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3694 		    dsl_dataset_user_hold_sync, ds, ha, 0);
3695 	} else if (error == ENOENT && ha->recursive) {
3696 		error = 0;
3697 	} else {
3698 		(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3699 	}
3700 	return (error);
3701 }
3702 
3703 int
3704 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag,
3705     boolean_t temphold)
3706 {
3707 	struct dsl_ds_holdarg *ha;
3708 	int error;
3709 
3710 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3711 	ha->htag = htag;
3712 	ha->temphold = temphold;
3713 	error = dsl_sync_task_do(ds->ds_dir->dd_pool,
3714 	    dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync,
3715 	    ds, ha, 0);
3716 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3717 
3718 	return (error);
3719 }
3720 
3721 int
3722 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3723     boolean_t recursive, boolean_t temphold, int cleanup_fd)
3724 {
3725 	struct dsl_ds_holdarg *ha;
3726 	dsl_sync_task_t *dst;
3727 	spa_t *spa;
3728 	int error;
3729 	minor_t minor = 0;
3730 
3731 	if (cleanup_fd != -1) {
3732 		/* Currently we only support cleanup-on-exit of tempholds. */
3733 		if (!temphold)
3734 			return (EINVAL);
3735 		error = zfs_onexit_fd_hold(cleanup_fd, &minor);
3736 		if (error)
3737 			return (error);
3738 	}
3739 
3740 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3741 
3742 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3743 
3744 	error = spa_open(dsname, &spa, FTAG);
3745 	if (error) {
3746 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3747 		if (cleanup_fd != -1)
3748 			zfs_onexit_fd_rele(cleanup_fd);
3749 		return (error);
3750 	}
3751 
3752 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3753 	ha->htag = htag;
3754 	ha->snapname = snapname;
3755 	ha->recursive = recursive;
3756 	ha->temphold = temphold;
3757 
3758 	if (recursive) {
3759 		error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3760 		    ha, DS_FIND_CHILDREN);
3761 	} else {
3762 		error = dsl_dataset_user_hold_one(dsname, ha);
3763 	}
3764 	if (error == 0)
3765 		error = dsl_sync_task_group_wait(ha->dstg);
3766 
3767 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3768 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3769 		dsl_dataset_t *ds = dst->dst_arg1;
3770 
3771 		if (dst->dst_err) {
3772 			dsl_dataset_name(ds, ha->failed);
3773 			*strchr(ha->failed, '@') = '\0';
3774 		} else if (error == 0 && minor != 0 && temphold) {
3775 			/*
3776 			 * If this hold is to be released upon process exit,
3777 			 * register that action now.
3778 			 */
3779 			dsl_register_onexit_hold_cleanup(ds, htag, minor);
3780 		}
3781 		dsl_dataset_rele(ds, ha->dstg);
3782 	}
3783 
3784 	if (error == 0 && recursive && !ha->gotone)
3785 		error = ENOENT;
3786 
3787 	if (error)
3788 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3789 
3790 	dsl_sync_task_group_destroy(ha->dstg);
3791 
3792 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3793 	spa_close(spa, FTAG);
3794 	if (cleanup_fd != -1)
3795 		zfs_onexit_fd_rele(cleanup_fd);
3796 	return (error);
3797 }
3798 
3799 struct dsl_ds_releasearg {
3800 	dsl_dataset_t *ds;
3801 	const char *htag;
3802 	boolean_t own;		/* do we own or just hold ds? */
3803 };
3804 
3805 static int
3806 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3807     boolean_t *might_destroy)
3808 {
3809 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3810 	uint64_t zapobj;
3811 	uint64_t tmp;
3812 	int error;
3813 
3814 	*might_destroy = B_FALSE;
3815 
3816 	mutex_enter(&ds->ds_lock);
3817 	zapobj = ds->ds_phys->ds_userrefs_obj;
3818 	if (zapobj == 0) {
3819 		/* The tag can't possibly exist */
3820 		mutex_exit(&ds->ds_lock);
3821 		return (ESRCH);
3822 	}
3823 
3824 	/* Make sure the tag exists */
3825 	error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3826 	if (error) {
3827 		mutex_exit(&ds->ds_lock);
3828 		if (error == ENOENT)
3829 			error = ESRCH;
3830 		return (error);
3831 	}
3832 
3833 	if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3834 	    DS_IS_DEFER_DESTROY(ds))
3835 		*might_destroy = B_TRUE;
3836 
3837 	mutex_exit(&ds->ds_lock);
3838 	return (0);
3839 }
3840 
3841 static int
3842 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3843 {
3844 	struct dsl_ds_releasearg *ra = arg1;
3845 	dsl_dataset_t *ds = ra->ds;
3846 	boolean_t might_destroy;
3847 	int error;
3848 
3849 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3850 		return (ENOTSUP);
3851 
3852 	error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3853 	if (error)
3854 		return (error);
3855 
3856 	if (might_destroy) {
3857 		struct dsl_ds_destroyarg dsda = {0};
3858 
3859 		if (dmu_tx_is_syncing(tx)) {
3860 			/*
3861 			 * If we're not prepared to remove the snapshot,
3862 			 * we can't allow the release to happen right now.
3863 			 */
3864 			if (!ra->own)
3865 				return (EBUSY);
3866 		}
3867 		dsda.ds = ds;
3868 		dsda.releasing = B_TRUE;
3869 		return (dsl_dataset_destroy_check(&dsda, tag, tx));
3870 	}
3871 
3872 	return (0);
3873 }
3874 
3875 static void
3876 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3877 {
3878 	struct dsl_ds_releasearg *ra = arg1;
3879 	dsl_dataset_t *ds = ra->ds;
3880 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3881 	objset_t *mos = dp->dp_meta_objset;
3882 	uint64_t zapobj;
3883 	uint64_t refs;
3884 	int error;
3885 
3886 	mutex_enter(&ds->ds_lock);
3887 	ds->ds_userrefs--;
3888 	refs = ds->ds_userrefs;
3889 	mutex_exit(&ds->ds_lock);
3890 	error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3891 	VERIFY(error == 0 || error == ENOENT);
3892 	zapobj = ds->ds_phys->ds_userrefs_obj;
3893 	VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3894 
3895 	spa_history_log_internal_ds(ds, "release", tx,
3896 	    "tag = %s refs now = %lld", ra->htag, (longlong_t)refs);
3897 
3898 	if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3899 	    DS_IS_DEFER_DESTROY(ds)) {
3900 		struct dsl_ds_destroyarg dsda = {0};
3901 
3902 		ASSERT(ra->own);
3903 		dsda.ds = ds;
3904 		dsda.releasing = B_TRUE;
3905 		/* We already did the destroy_check */
3906 		dsl_dataset_destroy_sync(&dsda, tag, tx);
3907 	}
3908 }
3909 
3910 static int
3911 dsl_dataset_user_release_one(const char *dsname, void *arg)
3912 {
3913 	struct dsl_ds_holdarg *ha = arg;
3914 	struct dsl_ds_releasearg *ra;
3915 	dsl_dataset_t *ds;
3916 	int error;
3917 	void *dtag = ha->dstg;
3918 	char *name;
3919 	boolean_t own = B_FALSE;
3920 	boolean_t might_destroy;
3921 
3922 	/* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3923 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3924 	error = dsl_dataset_hold(name, dtag, &ds);
3925 	strfree(name);
3926 	if (error == ENOENT && ha->recursive)
3927 		return (0);
3928 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3929 	if (error)
3930 		return (error);
3931 
3932 	ha->gotone = B_TRUE;
3933 
3934 	ASSERT(dsl_dataset_is_snapshot(ds));
3935 
3936 	error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3937 	if (error) {
3938 		dsl_dataset_rele(ds, dtag);
3939 		return (error);
3940 	}
3941 
3942 	if (might_destroy) {
3943 #ifdef _KERNEL
3944 		name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3945 		error = zfs_unmount_snap(name, NULL);
3946 		strfree(name);
3947 		if (error) {
3948 			dsl_dataset_rele(ds, dtag);
3949 			return (error);
3950 		}
3951 #endif
3952 		if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3953 			dsl_dataset_rele(ds, dtag);
3954 			return (EBUSY);
3955 		} else {
3956 			own = B_TRUE;
3957 			dsl_dataset_make_exclusive(ds, dtag);
3958 		}
3959 	}
3960 
3961 	ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3962 	ra->ds = ds;
3963 	ra->htag = ha->htag;
3964 	ra->own = own;
3965 	dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3966 	    dsl_dataset_user_release_sync, ra, dtag, 0);
3967 
3968 	return (0);
3969 }
3970 
3971 int
3972 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3973     boolean_t recursive)
3974 {
3975 	struct dsl_ds_holdarg *ha;
3976 	dsl_sync_task_t *dst;
3977 	spa_t *spa;
3978 	int error;
3979 
3980 top:
3981 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3982 
3983 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3984 
3985 	error = spa_open(dsname, &spa, FTAG);
3986 	if (error) {
3987 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3988 		return (error);
3989 	}
3990 
3991 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3992 	ha->htag = htag;
3993 	ha->snapname = snapname;
3994 	ha->recursive = recursive;
3995 	if (recursive) {
3996 		error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3997 		    ha, DS_FIND_CHILDREN);
3998 	} else {
3999 		error = dsl_dataset_user_release_one(dsname, ha);
4000 	}
4001 	if (error == 0)
4002 		error = dsl_sync_task_group_wait(ha->dstg);
4003 
4004 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
4005 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
4006 		struct dsl_ds_releasearg *ra = dst->dst_arg1;
4007 		dsl_dataset_t *ds = ra->ds;
4008 
4009 		if (dst->dst_err)
4010 			dsl_dataset_name(ds, ha->failed);
4011 
4012 		if (ra->own)
4013 			dsl_dataset_disown(ds, ha->dstg);
4014 		else
4015 			dsl_dataset_rele(ds, ha->dstg);
4016 
4017 		kmem_free(ra, sizeof (struct dsl_ds_releasearg));
4018 	}
4019 
4020 	if (error == 0 && recursive && !ha->gotone)
4021 		error = ENOENT;
4022 
4023 	if (error && error != EBUSY)
4024 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
4025 
4026 	dsl_sync_task_group_destroy(ha->dstg);
4027 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
4028 	spa_close(spa, FTAG);
4029 
4030 	/*
4031 	 * We can get EBUSY if we were racing with deferred destroy and
4032 	 * dsl_dataset_user_release_check() hadn't done the necessary
4033 	 * open context setup.  We can also get EBUSY if we're racing
4034 	 * with destroy and that thread is the ds_owner.  Either way
4035 	 * the busy condition should be transient, and we should retry
4036 	 * the release operation.
4037 	 */
4038 	if (error == EBUSY)
4039 		goto top;
4040 
4041 	return (error);
4042 }
4043 
4044 /*
4045  * Called at spa_load time (with retry == B_FALSE) to release a stale
4046  * temporary user hold. Also called by the onexit code (with retry == B_TRUE).
4047  */
4048 int
4049 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag,
4050     boolean_t retry)
4051 {
4052 	dsl_dataset_t *ds;
4053 	char *snap;
4054 	char *name;
4055 	int namelen;
4056 	int error;
4057 
4058 	do {
4059 		rw_enter(&dp->dp_config_rwlock, RW_READER);
4060 		error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
4061 		rw_exit(&dp->dp_config_rwlock);
4062 		if (error)
4063 			return (error);
4064 		namelen = dsl_dataset_namelen(ds)+1;
4065 		name = kmem_alloc(namelen, KM_SLEEP);
4066 		dsl_dataset_name(ds, name);
4067 		dsl_dataset_rele(ds, FTAG);
4068 
4069 		snap = strchr(name, '@');
4070 		*snap = '\0';
4071 		++snap;
4072 		error = dsl_dataset_user_release(name, snap, htag, B_FALSE);
4073 		kmem_free(name, namelen);
4074 
4075 		/*
4076 		 * The object can't have been destroyed because we have a hold,
4077 		 * but it might have been renamed, resulting in ENOENT.  Retry
4078 		 * if we've been requested to do so.
4079 		 *
4080 		 * It would be nice if we could use the dsobj all the way
4081 		 * through and avoid ENOENT entirely.  But we might need to
4082 		 * unmount the snapshot, and there's currently no way to lookup
4083 		 * a vfsp using a ZFS object id.
4084 		 */
4085 	} while ((error == ENOENT) && retry);
4086 
4087 	return (error);
4088 }
4089 
4090 int
4091 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
4092 {
4093 	dsl_dataset_t *ds;
4094 	int err;
4095 
4096 	err = dsl_dataset_hold(dsname, FTAG, &ds);
4097 	if (err)
4098 		return (err);
4099 
4100 	VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
4101 	if (ds->ds_phys->ds_userrefs_obj != 0) {
4102 		zap_attribute_t *za;
4103 		zap_cursor_t zc;
4104 
4105 		za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
4106 		for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
4107 		    ds->ds_phys->ds_userrefs_obj);
4108 		    zap_cursor_retrieve(&zc, za) == 0;
4109 		    zap_cursor_advance(&zc)) {
4110 			VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
4111 			    za->za_first_integer));
4112 		}
4113 		zap_cursor_fini(&zc);
4114 		kmem_free(za, sizeof (zap_attribute_t));
4115 	}
4116 	dsl_dataset_rele(ds, FTAG);
4117 	return (0);
4118 }
4119 
4120 /*
4121  * Note, this function is used as the callback for dmu_objset_find().  We
4122  * always return 0 so that we will continue to find and process
4123  * inconsistent datasets, even if we encounter an error trying to
4124  * process one of them.
4125  */
4126 /* ARGSUSED */
4127 int
4128 dsl_destroy_inconsistent(const char *dsname, void *arg)
4129 {
4130 	dsl_dataset_t *ds;
4131 
4132 	if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
4133 		if (DS_IS_INCONSISTENT(ds))
4134 			(void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
4135 		else
4136 			dsl_dataset_disown(ds, FTAG);
4137 	}
4138 	return (0);
4139 }
4140 
4141 /*
4142  * Return (in *usedp) the amount of space written in new that is not
4143  * present in oldsnap.  New may be a snapshot or the head.  Old must be
4144  * a snapshot before new, in new's filesystem (or its origin).  If not then
4145  * fail and return EINVAL.
4146  *
4147  * The written space is calculated by considering two components:  First, we
4148  * ignore any freed space, and calculate the written as new's used space
4149  * minus old's used space.  Next, we add in the amount of space that was freed
4150  * between the two snapshots, thus reducing new's used space relative to old's.
4151  * Specifically, this is the space that was born before old->ds_creation_txg,
4152  * and freed before new (ie. on new's deadlist or a previous deadlist).
4153  *
4154  * space freed                         [---------------------]
4155  * snapshots                       ---O-------O--------O-------O------
4156  *                                         oldsnap            new
4157  */
4158 int
4159 dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
4160     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4161 {
4162 	int err = 0;
4163 	uint64_t snapobj;
4164 	dsl_pool_t *dp = new->ds_dir->dd_pool;
4165 
4166 	*usedp = 0;
4167 	*usedp += new->ds_phys->ds_referenced_bytes;
4168 	*usedp -= oldsnap->ds_phys->ds_referenced_bytes;
4169 
4170 	*compp = 0;
4171 	*compp += new->ds_phys->ds_compressed_bytes;
4172 	*compp -= oldsnap->ds_phys->ds_compressed_bytes;
4173 
4174 	*uncompp = 0;
4175 	*uncompp += new->ds_phys->ds_uncompressed_bytes;
4176 	*uncompp -= oldsnap->ds_phys->ds_uncompressed_bytes;
4177 
4178 	rw_enter(&dp->dp_config_rwlock, RW_READER);
4179 	snapobj = new->ds_object;
4180 	while (snapobj != oldsnap->ds_object) {
4181 		dsl_dataset_t *snap;
4182 		uint64_t used, comp, uncomp;
4183 
4184 		if (snapobj == new->ds_object) {
4185 			snap = new;
4186 		} else {
4187 			err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
4188 			if (err != 0)
4189 				break;
4190 		}
4191 
4192 		if (snap->ds_phys->ds_prev_snap_txg ==
4193 		    oldsnap->ds_phys->ds_creation_txg) {
4194 			/*
4195 			 * The blocks in the deadlist can not be born after
4196 			 * ds_prev_snap_txg, so get the whole deadlist space,
4197 			 * which is more efficient (especially for old-format
4198 			 * deadlists).  Unfortunately the deadlist code
4199 			 * doesn't have enough information to make this
4200 			 * optimization itself.
4201 			 */
4202 			dsl_deadlist_space(&snap->ds_deadlist,
4203 			    &used, &comp, &uncomp);
4204 		} else {
4205 			dsl_deadlist_space_range(&snap->ds_deadlist,
4206 			    0, oldsnap->ds_phys->ds_creation_txg,
4207 			    &used, &comp, &uncomp);
4208 		}
4209 		*usedp += used;
4210 		*compp += comp;
4211 		*uncompp += uncomp;
4212 
4213 		/*
4214 		 * If we get to the beginning of the chain of snapshots
4215 		 * (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
4216 		 * was not a snapshot of/before new.
4217 		 */
4218 		snapobj = snap->ds_phys->ds_prev_snap_obj;
4219 		if (snap != new)
4220 			dsl_dataset_rele(snap, FTAG);
4221 		if (snapobj == 0) {
4222 			err = EINVAL;
4223 			break;
4224 		}
4225 
4226 	}
4227 	rw_exit(&dp->dp_config_rwlock);
4228 	return (err);
4229 }
4230 
4231 /*
4232  * Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
4233  * lastsnap, and all snapshots in between are deleted.
4234  *
4235  * blocks that would be freed            [---------------------------]
4236  * snapshots                       ---O-------O--------O-------O--------O
4237  *                                        firstsnap        lastsnap
4238  *
4239  * This is the set of blocks that were born after the snap before firstsnap,
4240  * (birth > firstsnap->prev_snap_txg) and died before the snap after the
4241  * last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
4242  * We calculate this by iterating over the relevant deadlists (from the snap
4243  * after lastsnap, backward to the snap after firstsnap), summing up the
4244  * space on the deadlist that was born after the snap before firstsnap.
4245  */
4246 int
4247 dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
4248     dsl_dataset_t *lastsnap,
4249     uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
4250 {
4251 	int err = 0;
4252 	uint64_t snapobj;
4253 	dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
4254 
4255 	ASSERT(dsl_dataset_is_snapshot(firstsnap));
4256 	ASSERT(dsl_dataset_is_snapshot(lastsnap));
4257 
4258 	/*
4259 	 * Check that the snapshots are in the same dsl_dir, and firstsnap
4260 	 * is before lastsnap.
4261 	 */
4262 	if (firstsnap->ds_dir != lastsnap->ds_dir ||
4263 	    firstsnap->ds_phys->ds_creation_txg >
4264 	    lastsnap->ds_phys->ds_creation_txg)
4265 		return (EINVAL);
4266 
4267 	*usedp = *compp = *uncompp = 0;
4268 
4269 	rw_enter(&dp->dp_config_rwlock, RW_READER);
4270 	snapobj = lastsnap->ds_phys->ds_next_snap_obj;
4271 	while (snapobj != firstsnap->ds_object) {
4272 		dsl_dataset_t *ds;
4273 		uint64_t used, comp, uncomp;
4274 
4275 		err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
4276 		if (err != 0)
4277 			break;
4278 
4279 		dsl_deadlist_space_range(&ds->ds_deadlist,
4280 		    firstsnap->ds_phys->ds_prev_snap_txg, UINT64_MAX,
4281 		    &used, &comp, &uncomp);
4282 		*usedp += used;
4283 		*compp += comp;
4284 		*uncompp += uncomp;
4285 
4286 		snapobj = ds->ds_phys->ds_prev_snap_obj;
4287 		ASSERT3U(snapobj, !=, 0);
4288 		dsl_dataset_rele(ds, FTAG);
4289 	}
4290 	rw_exit(&dp->dp_config_rwlock);
4291 	return (err);
4292 }
4293