xref: /titanic_51/usr/src/uts/common/fs/zfs/dsl_dataset.c (revision 3be32c0f0acac4f6258b029f1a27a16a7ec65bb0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/dmu_objset.h>
26 #include <sys/dsl_dataset.h>
27 #include <sys/dsl_dir.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_traverse.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/arc.h>
33 #include <sys/zio.h>
34 #include <sys/zap.h>
35 #include <sys/unique.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_ioctl.h>
38 #include <sys/spa.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/zvol.h>
41 #include <sys/dsl_scan.h>
42 
43 /*
44  * Enable/disable prefetching of dedup-ed blocks which are going to be freed.
45  */
46 int zfs_dedup_prefetch = 1;
47 
48 static char *dsl_reaper = "the grim reaper";
49 
50 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
51 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
52 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
53 
54 #define	DS_REF_MAX	(1ULL << 62)
55 
56 #define	DSL_DEADLIST_BLOCKSIZE	SPA_MAXBLOCKSIZE
57 
58 #define	DSL_DATASET_IS_DESTROYED(ds)	((ds)->ds_owner == dsl_reaper)
59 
60 
61 /*
62  * Figure out how much of this delta should be propogated to the dsl_dir
63  * layer.  If there's a refreservation, that space has already been
64  * partially accounted for in our ancestors.
65  */
66 static int64_t
67 parent_delta(dsl_dataset_t *ds, int64_t delta)
68 {
69 	uint64_t old_bytes, new_bytes;
70 
71 	if (ds->ds_reserved == 0)
72 		return (delta);
73 
74 	old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
75 	new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
76 
77 	ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
78 	return (new_bytes - old_bytes);
79 }
80 
81 void
82 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
83 {
84 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
85 	int compressed = BP_GET_PSIZE(bp);
86 	int uncompressed = BP_GET_UCSIZE(bp);
87 	int64_t delta;
88 
89 	dprintf_bp(bp, "ds=%p", ds);
90 
91 	ASSERT(dmu_tx_is_syncing(tx));
92 	/* It could have been compressed away to nothing */
93 	if (BP_IS_HOLE(bp))
94 		return;
95 	ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
96 	ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
97 	if (ds == NULL) {
98 		/*
99 		 * Account for the meta-objset space in its placeholder
100 		 * dsl_dir.
101 		 */
102 		ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
103 		dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
104 		    used, compressed, uncompressed, tx);
105 		dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
106 		return;
107 	}
108 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
109 
110 	mutex_enter(&ds->ds_dir->dd_lock);
111 	mutex_enter(&ds->ds_lock);
112 	delta = parent_delta(ds, used);
113 	ds->ds_phys->ds_used_bytes += used;
114 	ds->ds_phys->ds_compressed_bytes += compressed;
115 	ds->ds_phys->ds_uncompressed_bytes += uncompressed;
116 	ds->ds_phys->ds_unique_bytes += used;
117 	mutex_exit(&ds->ds_lock);
118 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
119 	    compressed, uncompressed, tx);
120 	dsl_dir_transfer_space(ds->ds_dir, used - delta,
121 	    DD_USED_REFRSRV, DD_USED_HEAD, tx);
122 	mutex_exit(&ds->ds_dir->dd_lock);
123 }
124 
125 int
126 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
127     boolean_t async)
128 {
129 	if (BP_IS_HOLE(bp))
130 		return (0);
131 
132 	ASSERT(dmu_tx_is_syncing(tx));
133 	ASSERT(bp->blk_birth <= tx->tx_txg);
134 
135 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
136 	int compressed = BP_GET_PSIZE(bp);
137 	int uncompressed = BP_GET_UCSIZE(bp);
138 
139 	ASSERT(used > 0);
140 	if (ds == NULL) {
141 		/*
142 		 * Account for the meta-objset space in its placeholder
143 		 * dataset.
144 		 */
145 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
146 
147 		dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
148 		    -used, -compressed, -uncompressed, tx);
149 		dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
150 		return (used);
151 	}
152 	ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
153 
154 	ASSERT(!dsl_dataset_is_snapshot(ds));
155 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
156 
157 	if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
158 		int64_t delta;
159 
160 		dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
161 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
162 
163 		mutex_enter(&ds->ds_dir->dd_lock);
164 		mutex_enter(&ds->ds_lock);
165 		ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
166 		    !DS_UNIQUE_IS_ACCURATE(ds));
167 		delta = parent_delta(ds, -used);
168 		ds->ds_phys->ds_unique_bytes -= used;
169 		mutex_exit(&ds->ds_lock);
170 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
171 		    delta, -compressed, -uncompressed, tx);
172 		dsl_dir_transfer_space(ds->ds_dir, -used - delta,
173 		    DD_USED_REFRSRV, DD_USED_HEAD, tx);
174 		mutex_exit(&ds->ds_dir->dd_lock);
175 	} else {
176 		dprintf_bp(bp, "putting on dead list: %s", "");
177 		if (async) {
178 			/*
179 			 * We are here as part of zio's write done callback,
180 			 * which means we're a zio interrupt thread.  We can't
181 			 * call bplist_enqueue() now because it may block
182 			 * waiting for I/O.  Instead, put bp on the deferred
183 			 * queue and let dsl_pool_sync() finish the job.
184 			 */
185 			bplist_enqueue_deferred(&ds->ds_deadlist, bp);
186 		} else {
187 			VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
188 		}
189 		ASSERT3U(ds->ds_prev->ds_object, ==,
190 		    ds->ds_phys->ds_prev_snap_obj);
191 		ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
192 		/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
193 		if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
194 		    ds->ds_object && bp->blk_birth >
195 		    ds->ds_prev->ds_phys->ds_prev_snap_txg) {
196 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
197 			mutex_enter(&ds->ds_prev->ds_lock);
198 			ds->ds_prev->ds_phys->ds_unique_bytes += used;
199 			mutex_exit(&ds->ds_prev->ds_lock);
200 		}
201 		if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
202 			dsl_dir_transfer_space(ds->ds_dir, used,
203 			    DD_USED_HEAD, DD_USED_SNAP, tx);
204 		}
205 	}
206 	mutex_enter(&ds->ds_lock);
207 	ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
208 	ds->ds_phys->ds_used_bytes -= used;
209 	ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
210 	ds->ds_phys->ds_compressed_bytes -= compressed;
211 	ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
212 	ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
213 	mutex_exit(&ds->ds_lock);
214 
215 	return (used);
216 }
217 
218 uint64_t
219 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
220 {
221 	uint64_t trysnap = 0;
222 
223 	if (ds == NULL)
224 		return (0);
225 	/*
226 	 * The snapshot creation could fail, but that would cause an
227 	 * incorrect FALSE return, which would only result in an
228 	 * overestimation of the amount of space that an operation would
229 	 * consume, which is OK.
230 	 *
231 	 * There's also a small window where we could miss a pending
232 	 * snapshot, because we could set the sync task in the quiescing
233 	 * phase.  So this should only be used as a guess.
234 	 */
235 	if (ds->ds_trysnap_txg >
236 	    spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
237 		trysnap = ds->ds_trysnap_txg;
238 	return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
239 }
240 
241 boolean_t
242 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp,
243     uint64_t blk_birth)
244 {
245 	if (blk_birth <= dsl_dataset_prev_snap_txg(ds))
246 		return (B_FALSE);
247 
248 	if (zfs_dedup_prefetch && bp && BP_GET_DEDUP(bp))
249 		ddt_prefetch(dsl_dataset_get_spa(ds), bp);
250 
251 	return (B_TRUE);
252 }
253 
254 /* ARGSUSED */
255 static void
256 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
257 {
258 	dsl_dataset_t *ds = dsv;
259 
260 	ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
261 
262 	unique_remove(ds->ds_fsid_guid);
263 
264 	if (ds->ds_objset != NULL)
265 		dmu_objset_evict(ds->ds_objset);
266 
267 	if (ds->ds_prev) {
268 		dsl_dataset_drop_ref(ds->ds_prev, ds);
269 		ds->ds_prev = NULL;
270 	}
271 
272 	bplist_close(&ds->ds_deadlist);
273 	if (ds->ds_dir)
274 		dsl_dir_close(ds->ds_dir, ds);
275 
276 	ASSERT(!list_link_active(&ds->ds_synced_link));
277 
278 	mutex_destroy(&ds->ds_lock);
279 	mutex_destroy(&ds->ds_recvlock);
280 	mutex_destroy(&ds->ds_opening_lock);
281 	rw_destroy(&ds->ds_rwlock);
282 	cv_destroy(&ds->ds_exclusive_cv);
283 	bplist_fini(&ds->ds_deadlist);
284 
285 	kmem_free(ds, sizeof (dsl_dataset_t));
286 }
287 
288 static int
289 dsl_dataset_get_snapname(dsl_dataset_t *ds)
290 {
291 	dsl_dataset_phys_t *headphys;
292 	int err;
293 	dmu_buf_t *headdbuf;
294 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
295 	objset_t *mos = dp->dp_meta_objset;
296 
297 	if (ds->ds_snapname[0])
298 		return (0);
299 	if (ds->ds_phys->ds_next_snap_obj == 0)
300 		return (0);
301 
302 	err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
303 	    FTAG, &headdbuf);
304 	if (err)
305 		return (err);
306 	headphys = headdbuf->db_data;
307 	err = zap_value_search(dp->dp_meta_objset,
308 	    headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
309 	dmu_buf_rele(headdbuf, FTAG);
310 	return (err);
311 }
312 
313 static int
314 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
315 {
316 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
317 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
318 	matchtype_t mt;
319 	int err;
320 
321 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
322 		mt = MT_FIRST;
323 	else
324 		mt = MT_EXACT;
325 
326 	err = zap_lookup_norm(mos, snapobj, name, 8, 1,
327 	    value, mt, NULL, 0, NULL);
328 	if (err == ENOTSUP && mt == MT_FIRST)
329 		err = zap_lookup(mos, snapobj, name, 8, 1, value);
330 	return (err);
331 }
332 
333 static int
334 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
335 {
336 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
337 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
338 	matchtype_t mt;
339 	int err;
340 
341 	dsl_dir_snap_cmtime_update(ds->ds_dir);
342 
343 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
344 		mt = MT_FIRST;
345 	else
346 		mt = MT_EXACT;
347 
348 	err = zap_remove_norm(mos, snapobj, name, mt, tx);
349 	if (err == ENOTSUP && mt == MT_FIRST)
350 		err = zap_remove(mos, snapobj, name, tx);
351 	return (err);
352 }
353 
354 static int
355 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
356     dsl_dataset_t **dsp)
357 {
358 	objset_t *mos = dp->dp_meta_objset;
359 	dmu_buf_t *dbuf;
360 	dsl_dataset_t *ds;
361 	int err;
362 
363 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
364 	    dsl_pool_sync_context(dp));
365 
366 	err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
367 	if (err)
368 		return (err);
369 	ds = dmu_buf_get_user(dbuf);
370 	if (ds == NULL) {
371 		dsl_dataset_t *winner;
372 
373 		ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
374 		ds->ds_dbuf = dbuf;
375 		ds->ds_object = dsobj;
376 		ds->ds_phys = dbuf->db_data;
377 
378 		mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
379 		mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
380 		mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
381 		rw_init(&ds->ds_rwlock, 0, 0, 0);
382 		cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
383 		bplist_init(&ds->ds_deadlist);
384 
385 		err = bplist_open(&ds->ds_deadlist,
386 		    mos, ds->ds_phys->ds_deadlist_obj);
387 		if (err == 0) {
388 			err = dsl_dir_open_obj(dp,
389 			    ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
390 		}
391 		if (err) {
392 			/*
393 			 * we don't really need to close the blist if we
394 			 * just opened it.
395 			 */
396 			mutex_destroy(&ds->ds_lock);
397 			mutex_destroy(&ds->ds_recvlock);
398 			mutex_destroy(&ds->ds_opening_lock);
399 			rw_destroy(&ds->ds_rwlock);
400 			cv_destroy(&ds->ds_exclusive_cv);
401 			bplist_fini(&ds->ds_deadlist);
402 			kmem_free(ds, sizeof (dsl_dataset_t));
403 			dmu_buf_rele(dbuf, tag);
404 			return (err);
405 		}
406 
407 		if (!dsl_dataset_is_snapshot(ds)) {
408 			ds->ds_snapname[0] = '\0';
409 			if (ds->ds_phys->ds_prev_snap_obj) {
410 				err = dsl_dataset_get_ref(dp,
411 				    ds->ds_phys->ds_prev_snap_obj,
412 				    ds, &ds->ds_prev);
413 			}
414 		} else {
415 			if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
416 				err = dsl_dataset_get_snapname(ds);
417 			if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
418 				err = zap_count(
419 				    ds->ds_dir->dd_pool->dp_meta_objset,
420 				    ds->ds_phys->ds_userrefs_obj,
421 				    &ds->ds_userrefs);
422 			}
423 		}
424 
425 		if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
426 			/*
427 			 * In sync context, we're called with either no lock
428 			 * or with the write lock.  If we're not syncing,
429 			 * we're always called with the read lock held.
430 			 */
431 			boolean_t need_lock =
432 			    !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
433 			    dsl_pool_sync_context(dp);
434 
435 			if (need_lock)
436 				rw_enter(&dp->dp_config_rwlock, RW_READER);
437 
438 			err = dsl_prop_get_ds(ds,
439 			    "refreservation", sizeof (uint64_t), 1,
440 			    &ds->ds_reserved, NULL);
441 			if (err == 0) {
442 				err = dsl_prop_get_ds(ds,
443 				    "refquota", sizeof (uint64_t), 1,
444 				    &ds->ds_quota, NULL);
445 			}
446 
447 			if (need_lock)
448 				rw_exit(&dp->dp_config_rwlock);
449 		} else {
450 			ds->ds_reserved = ds->ds_quota = 0;
451 		}
452 
453 		if (err == 0) {
454 			winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
455 			    dsl_dataset_evict);
456 		}
457 		if (err || winner) {
458 			bplist_close(&ds->ds_deadlist);
459 			if (ds->ds_prev)
460 				dsl_dataset_drop_ref(ds->ds_prev, ds);
461 			dsl_dir_close(ds->ds_dir, ds);
462 			mutex_destroy(&ds->ds_lock);
463 			mutex_destroy(&ds->ds_recvlock);
464 			mutex_destroy(&ds->ds_opening_lock);
465 			rw_destroy(&ds->ds_rwlock);
466 			cv_destroy(&ds->ds_exclusive_cv);
467 			bplist_fini(&ds->ds_deadlist);
468 			kmem_free(ds, sizeof (dsl_dataset_t));
469 			if (err) {
470 				dmu_buf_rele(dbuf, tag);
471 				return (err);
472 			}
473 			ds = winner;
474 		} else {
475 			ds->ds_fsid_guid =
476 			    unique_insert(ds->ds_phys->ds_fsid_guid);
477 		}
478 	}
479 	ASSERT3P(ds->ds_dbuf, ==, dbuf);
480 	ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
481 	ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
482 	    spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
483 	    dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
484 	mutex_enter(&ds->ds_lock);
485 	if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
486 		mutex_exit(&ds->ds_lock);
487 		dmu_buf_rele(ds->ds_dbuf, tag);
488 		return (ENOENT);
489 	}
490 	mutex_exit(&ds->ds_lock);
491 	*dsp = ds;
492 	return (0);
493 }
494 
495 static int
496 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
497 {
498 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
499 
500 	/*
501 	 * In syncing context we don't want the rwlock lock: there
502 	 * may be an existing writer waiting for sync phase to
503 	 * finish.  We don't need to worry about such writers, since
504 	 * sync phase is single-threaded, so the writer can't be
505 	 * doing anything while we are active.
506 	 */
507 	if (dsl_pool_sync_context(dp)) {
508 		ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
509 		return (0);
510 	}
511 
512 	/*
513 	 * Normal users will hold the ds_rwlock as a READER until they
514 	 * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
515 	 * drop their READER lock after they set the ds_owner field.
516 	 *
517 	 * If the dataset is being destroyed, the destroy thread will
518 	 * obtain a WRITER lock for exclusive access after it's done its
519 	 * open-context work and then change the ds_owner to
520 	 * dsl_reaper once destruction is assured.  So threads
521 	 * may block here temporarily, until the "destructability" of
522 	 * the dataset is determined.
523 	 */
524 	ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
525 	mutex_enter(&ds->ds_lock);
526 	while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
527 		rw_exit(&dp->dp_config_rwlock);
528 		cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
529 		if (DSL_DATASET_IS_DESTROYED(ds)) {
530 			mutex_exit(&ds->ds_lock);
531 			dsl_dataset_drop_ref(ds, tag);
532 			rw_enter(&dp->dp_config_rwlock, RW_READER);
533 			return (ENOENT);
534 		}
535 		/*
536 		 * The dp_config_rwlock lives above the ds_lock. And
537 		 * we need to check DSL_DATASET_IS_DESTROYED() while
538 		 * holding the ds_lock, so we have to drop and reacquire
539 		 * the ds_lock here.
540 		 */
541 		mutex_exit(&ds->ds_lock);
542 		rw_enter(&dp->dp_config_rwlock, RW_READER);
543 		mutex_enter(&ds->ds_lock);
544 	}
545 	mutex_exit(&ds->ds_lock);
546 	return (0);
547 }
548 
549 int
550 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
551     dsl_dataset_t **dsp)
552 {
553 	int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
554 
555 	if (err)
556 		return (err);
557 	return (dsl_dataset_hold_ref(*dsp, tag));
558 }
559 
560 int
561 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
562     void *tag, dsl_dataset_t **dsp)
563 {
564 	int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
565 	if (err)
566 		return (err);
567 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
568 		dsl_dataset_rele(*dsp, tag);
569 		*dsp = NULL;
570 		return (EBUSY);
571 	}
572 	return (0);
573 }
574 
575 int
576 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
577 {
578 	dsl_dir_t *dd;
579 	dsl_pool_t *dp;
580 	const char *snapname;
581 	uint64_t obj;
582 	int err = 0;
583 
584 	err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
585 	if (err)
586 		return (err);
587 
588 	dp = dd->dd_pool;
589 	obj = dd->dd_phys->dd_head_dataset_obj;
590 	rw_enter(&dp->dp_config_rwlock, RW_READER);
591 	if (obj)
592 		err = dsl_dataset_get_ref(dp, obj, tag, dsp);
593 	else
594 		err = ENOENT;
595 	if (err)
596 		goto out;
597 
598 	err = dsl_dataset_hold_ref(*dsp, tag);
599 
600 	/* we may be looking for a snapshot */
601 	if (err == 0 && snapname != NULL) {
602 		dsl_dataset_t *ds = NULL;
603 
604 		if (*snapname++ != '@') {
605 			dsl_dataset_rele(*dsp, tag);
606 			err = ENOENT;
607 			goto out;
608 		}
609 
610 		dprintf("looking for snapshot '%s'\n", snapname);
611 		err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
612 		if (err == 0)
613 			err = dsl_dataset_get_ref(dp, obj, tag, &ds);
614 		dsl_dataset_rele(*dsp, tag);
615 
616 		ASSERT3U((err == 0), ==, (ds != NULL));
617 
618 		if (ds) {
619 			mutex_enter(&ds->ds_lock);
620 			if (ds->ds_snapname[0] == 0)
621 				(void) strlcpy(ds->ds_snapname, snapname,
622 				    sizeof (ds->ds_snapname));
623 			mutex_exit(&ds->ds_lock);
624 			err = dsl_dataset_hold_ref(ds, tag);
625 			*dsp = err ? NULL : ds;
626 		}
627 	}
628 out:
629 	rw_exit(&dp->dp_config_rwlock);
630 	dsl_dir_close(dd, FTAG);
631 	return (err);
632 }
633 
634 int
635 dsl_dataset_own(const char *name, boolean_t inconsistentok,
636     void *tag, dsl_dataset_t **dsp)
637 {
638 	int err = dsl_dataset_hold(name, tag, dsp);
639 	if (err)
640 		return (err);
641 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
642 		dsl_dataset_rele(*dsp, tag);
643 		return (EBUSY);
644 	}
645 	return (0);
646 }
647 
648 void
649 dsl_dataset_name(dsl_dataset_t *ds, char *name)
650 {
651 	if (ds == NULL) {
652 		(void) strcpy(name, "mos");
653 	} else {
654 		dsl_dir_name(ds->ds_dir, name);
655 		VERIFY(0 == dsl_dataset_get_snapname(ds));
656 		if (ds->ds_snapname[0]) {
657 			(void) strcat(name, "@");
658 			/*
659 			 * We use a "recursive" mutex so that we
660 			 * can call dprintf_ds() with ds_lock held.
661 			 */
662 			if (!MUTEX_HELD(&ds->ds_lock)) {
663 				mutex_enter(&ds->ds_lock);
664 				(void) strcat(name, ds->ds_snapname);
665 				mutex_exit(&ds->ds_lock);
666 			} else {
667 				(void) strcat(name, ds->ds_snapname);
668 			}
669 		}
670 	}
671 }
672 
673 static int
674 dsl_dataset_namelen(dsl_dataset_t *ds)
675 {
676 	int result;
677 
678 	if (ds == NULL) {
679 		result = 3;	/* "mos" */
680 	} else {
681 		result = dsl_dir_namelen(ds->ds_dir);
682 		VERIFY(0 == dsl_dataset_get_snapname(ds));
683 		if (ds->ds_snapname[0]) {
684 			++result;	/* adding one for the @-sign */
685 			if (!MUTEX_HELD(&ds->ds_lock)) {
686 				mutex_enter(&ds->ds_lock);
687 				result += strlen(ds->ds_snapname);
688 				mutex_exit(&ds->ds_lock);
689 			} else {
690 				result += strlen(ds->ds_snapname);
691 			}
692 		}
693 	}
694 
695 	return (result);
696 }
697 
698 void
699 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
700 {
701 	dmu_buf_rele(ds->ds_dbuf, tag);
702 }
703 
704 void
705 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
706 {
707 	if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
708 		rw_exit(&ds->ds_rwlock);
709 	}
710 	dsl_dataset_drop_ref(ds, tag);
711 }
712 
713 void
714 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
715 {
716 	ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
717 	    (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
718 
719 	mutex_enter(&ds->ds_lock);
720 	ds->ds_owner = NULL;
721 	if (RW_WRITE_HELD(&ds->ds_rwlock)) {
722 		rw_exit(&ds->ds_rwlock);
723 		cv_broadcast(&ds->ds_exclusive_cv);
724 	}
725 	mutex_exit(&ds->ds_lock);
726 	if (ds->ds_dbuf)
727 		dsl_dataset_drop_ref(ds, tag);
728 	else
729 		dsl_dataset_evict(ds->ds_dbuf, ds);
730 }
731 
732 boolean_t
733 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
734 {
735 	boolean_t gotit = FALSE;
736 
737 	mutex_enter(&ds->ds_lock);
738 	if (ds->ds_owner == NULL &&
739 	    (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
740 		ds->ds_owner = tag;
741 		if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
742 			rw_exit(&ds->ds_rwlock);
743 		gotit = TRUE;
744 	}
745 	mutex_exit(&ds->ds_lock);
746 	return (gotit);
747 }
748 
749 void
750 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
751 {
752 	ASSERT3P(owner, ==, ds->ds_owner);
753 	if (!RW_WRITE_HELD(&ds->ds_rwlock))
754 		rw_enter(&ds->ds_rwlock, RW_WRITER);
755 }
756 
757 uint64_t
758 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
759     uint64_t flags, dmu_tx_t *tx)
760 {
761 	dsl_pool_t *dp = dd->dd_pool;
762 	dmu_buf_t *dbuf;
763 	dsl_dataset_phys_t *dsphys;
764 	uint64_t dsobj;
765 	objset_t *mos = dp->dp_meta_objset;
766 
767 	if (origin == NULL)
768 		origin = dp->dp_origin_snap;
769 
770 	ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
771 	ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
772 	ASSERT(dmu_tx_is_syncing(tx));
773 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
774 
775 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
776 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
777 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
778 	dmu_buf_will_dirty(dbuf, tx);
779 	dsphys = dbuf->db_data;
780 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
781 	dsphys->ds_dir_obj = dd->dd_object;
782 	dsphys->ds_flags = flags;
783 	dsphys->ds_fsid_guid = unique_create();
784 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
785 	    sizeof (dsphys->ds_guid));
786 	dsphys->ds_snapnames_zapobj =
787 	    zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
788 	    DMU_OT_NONE, 0, tx);
789 	dsphys->ds_creation_time = gethrestime_sec();
790 	dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
791 	dsphys->ds_deadlist_obj =
792 	    bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
793 
794 	if (origin) {
795 		dsphys->ds_prev_snap_obj = origin->ds_object;
796 		dsphys->ds_prev_snap_txg =
797 		    origin->ds_phys->ds_creation_txg;
798 		dsphys->ds_used_bytes =
799 		    origin->ds_phys->ds_used_bytes;
800 		dsphys->ds_compressed_bytes =
801 		    origin->ds_phys->ds_compressed_bytes;
802 		dsphys->ds_uncompressed_bytes =
803 		    origin->ds_phys->ds_uncompressed_bytes;
804 		dsphys->ds_bp = origin->ds_phys->ds_bp;
805 		dsphys->ds_flags |= origin->ds_phys->ds_flags;
806 
807 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
808 		origin->ds_phys->ds_num_children++;
809 
810 		if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
811 			if (origin->ds_phys->ds_next_clones_obj == 0) {
812 				origin->ds_phys->ds_next_clones_obj =
813 				    zap_create(mos,
814 				    DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
815 			}
816 			VERIFY(0 == zap_add_int(mos,
817 			    origin->ds_phys->ds_next_clones_obj,
818 			    dsobj, tx));
819 		}
820 
821 		dmu_buf_will_dirty(dd->dd_dbuf, tx);
822 		dd->dd_phys->dd_origin_obj = origin->ds_object;
823 	}
824 
825 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
826 		dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
827 
828 	dmu_buf_rele(dbuf, FTAG);
829 
830 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
831 	dd->dd_phys->dd_head_dataset_obj = dsobj;
832 
833 	return (dsobj);
834 }
835 
836 uint64_t
837 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
838     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
839 {
840 	dsl_pool_t *dp = pdd->dd_pool;
841 	uint64_t dsobj, ddobj;
842 	dsl_dir_t *dd;
843 
844 	ASSERT(lastname[0] != '@');
845 
846 	ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
847 	VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
848 
849 	dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
850 
851 	dsl_deleg_set_create_perms(dd, tx, cr);
852 
853 	dsl_dir_close(dd, FTAG);
854 
855 	return (dsobj);
856 }
857 
858 struct destroyarg {
859 	dsl_sync_task_group_t *dstg;
860 	char *snapname;
861 	char *failed;
862 	boolean_t defer;
863 };
864 
865 static int
866 dsl_snapshot_destroy_one(const char *name, void *arg)
867 {
868 	struct destroyarg *da = arg;
869 	dsl_dataset_t *ds;
870 	int err;
871 	char *dsname;
872 
873 	dsname = kmem_asprintf("%s@%s", name, da->snapname);
874 	err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
875 	strfree(dsname);
876 	if (err == 0) {
877 		struct dsl_ds_destroyarg *dsda;
878 
879 		dsl_dataset_make_exclusive(ds, da->dstg);
880 		dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
881 		dsda->ds = ds;
882 		dsda->defer = da->defer;
883 		dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
884 		    dsl_dataset_destroy_sync, dsda, da->dstg, 0);
885 	} else if (err == ENOENT) {
886 		err = 0;
887 	} else {
888 		(void) strcpy(da->failed, name);
889 	}
890 	return (err);
891 }
892 
893 /*
894  * Destroy 'snapname' in all descendants of 'fsname'.
895  */
896 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
897 int
898 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
899 {
900 	int err;
901 	struct destroyarg da;
902 	dsl_sync_task_t *dst;
903 	spa_t *spa;
904 
905 	err = spa_open(fsname, &spa, FTAG);
906 	if (err)
907 		return (err);
908 	da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
909 	da.snapname = snapname;
910 	da.failed = fsname;
911 	da.defer = defer;
912 
913 	err = dmu_objset_find(fsname,
914 	    dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
915 
916 	if (err == 0)
917 		err = dsl_sync_task_group_wait(da.dstg);
918 
919 	for (dst = list_head(&da.dstg->dstg_tasks); dst;
920 	    dst = list_next(&da.dstg->dstg_tasks, dst)) {
921 		struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
922 		dsl_dataset_t *ds = dsda->ds;
923 
924 		/*
925 		 * Return the file system name that triggered the error
926 		 */
927 		if (dst->dst_err) {
928 			dsl_dataset_name(ds, fsname);
929 			*strchr(fsname, '@') = '\0';
930 		}
931 		ASSERT3P(dsda->rm_origin, ==, NULL);
932 		dsl_dataset_disown(ds, da.dstg);
933 		kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
934 	}
935 
936 	dsl_sync_task_group_destroy(da.dstg);
937 	spa_close(spa, FTAG);
938 	return (err);
939 }
940 
941 static boolean_t
942 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
943 {
944 	boolean_t might_destroy = B_FALSE;
945 
946 	mutex_enter(&ds->ds_lock);
947 	if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
948 	    DS_IS_DEFER_DESTROY(ds))
949 		might_destroy = B_TRUE;
950 	mutex_exit(&ds->ds_lock);
951 
952 	return (might_destroy);
953 }
954 
955 /*
956  * If we're removing a clone, and these three conditions are true:
957  *	1) the clone's origin has no other children
958  *	2) the clone's origin has no user references
959  *	3) the clone's origin has been marked for deferred destruction
960  * Then, prepare to remove the origin as part of this sync task group.
961  */
962 static int
963 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
964 {
965 	dsl_dataset_t *ds = dsda->ds;
966 	dsl_dataset_t *origin = ds->ds_prev;
967 
968 	if (dsl_dataset_might_destroy_origin(origin)) {
969 		char *name;
970 		int namelen;
971 		int error;
972 
973 		namelen = dsl_dataset_namelen(origin) + 1;
974 		name = kmem_alloc(namelen, KM_SLEEP);
975 		dsl_dataset_name(origin, name);
976 #ifdef _KERNEL
977 		error = zfs_unmount_snap(name, NULL);
978 		if (error) {
979 			kmem_free(name, namelen);
980 			return (error);
981 		}
982 #endif
983 		error = dsl_dataset_own(name, B_TRUE, tag, &origin);
984 		kmem_free(name, namelen);
985 		if (error)
986 			return (error);
987 		dsda->rm_origin = origin;
988 		dsl_dataset_make_exclusive(origin, tag);
989 	}
990 
991 	return (0);
992 }
993 
994 /*
995  * ds must be opened as OWNER.  On return (whether successful or not),
996  * ds will be closed and caller can no longer dereference it.
997  */
998 int
999 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
1000 {
1001 	int err;
1002 	dsl_sync_task_group_t *dstg;
1003 	objset_t *os;
1004 	dsl_dir_t *dd;
1005 	uint64_t obj;
1006 	struct dsl_ds_destroyarg dsda = { 0 };
1007 	dsl_dataset_t dummy_ds = { 0 };
1008 
1009 	dsda.ds = ds;
1010 
1011 	if (dsl_dataset_is_snapshot(ds)) {
1012 		/* Destroying a snapshot is simpler */
1013 		dsl_dataset_make_exclusive(ds, tag);
1014 
1015 		dsda.defer = defer;
1016 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1017 		    dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1018 		    &dsda, tag, 0);
1019 		ASSERT3P(dsda.rm_origin, ==, NULL);
1020 		goto out;
1021 	} else if (defer) {
1022 		err = EINVAL;
1023 		goto out;
1024 	}
1025 
1026 	dd = ds->ds_dir;
1027 	dummy_ds.ds_dir = dd;
1028 	dummy_ds.ds_object = ds->ds_object;
1029 
1030 	/*
1031 	 * Check for errors and mark this ds as inconsistent, in
1032 	 * case we crash while freeing the objects.
1033 	 */
1034 	err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1035 	    dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1036 	if (err)
1037 		goto out;
1038 
1039 	err = dmu_objset_from_ds(ds, &os);
1040 	if (err)
1041 		goto out;
1042 
1043 	/*
1044 	 * remove the objects in open context, so that we won't
1045 	 * have too much to do in syncing context.
1046 	 */
1047 	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1048 	    ds->ds_phys->ds_prev_snap_txg)) {
1049 		/*
1050 		 * Ignore errors, if there is not enough disk space
1051 		 * we will deal with it in dsl_dataset_destroy_sync().
1052 		 */
1053 		(void) dmu_free_object(os, obj);
1054 	}
1055 
1056 	/*
1057 	 * We need to sync out all in-flight IO before we try to evict
1058 	 * (the dataset evict func is trying to clear the cached entries
1059 	 * for this dataset in the ARC).
1060 	 */
1061 	txg_wait_synced(dd->dd_pool, 0);
1062 
1063 	/*
1064 	 * If we managed to free all the objects in open
1065 	 * context, the user space accounting should be zero.
1066 	 */
1067 	if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1068 	    dmu_objset_userused_enabled(os)) {
1069 		uint64_t count;
1070 
1071 		ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1072 		    count == 0);
1073 		ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1074 		    count == 0);
1075 	}
1076 
1077 	if (err != ESRCH)
1078 		goto out;
1079 
1080 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1081 	err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1082 	rw_exit(&dd->dd_pool->dp_config_rwlock);
1083 
1084 	if (err)
1085 		goto out;
1086 
1087 	/*
1088 	 * Blow away the dsl_dir + head dataset.
1089 	 */
1090 	dsl_dataset_make_exclusive(ds, tag);
1091 	/*
1092 	 * If we're removing a clone, we might also need to remove its
1093 	 * origin.
1094 	 */
1095 	do {
1096 		dsda.need_prep = B_FALSE;
1097 		if (dsl_dir_is_clone(dd)) {
1098 			err = dsl_dataset_origin_rm_prep(&dsda, tag);
1099 			if (err) {
1100 				dsl_dir_close(dd, FTAG);
1101 				goto out;
1102 			}
1103 		}
1104 
1105 		dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1106 		dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1107 		    dsl_dataset_destroy_sync, &dsda, tag, 0);
1108 		dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1109 		    dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1110 		err = dsl_sync_task_group_wait(dstg);
1111 		dsl_sync_task_group_destroy(dstg);
1112 
1113 		/*
1114 		 * We could be racing against 'zfs release' or 'zfs destroy -d'
1115 		 * on the origin snap, in which case we can get EBUSY if we
1116 		 * needed to destroy the origin snap but were not ready to
1117 		 * do so.
1118 		 */
1119 		if (dsda.need_prep) {
1120 			ASSERT(err == EBUSY);
1121 			ASSERT(dsl_dir_is_clone(dd));
1122 			ASSERT(dsda.rm_origin == NULL);
1123 		}
1124 	} while (dsda.need_prep);
1125 
1126 	if (dsda.rm_origin != NULL)
1127 		dsl_dataset_disown(dsda.rm_origin, tag);
1128 
1129 	/* if it is successful, dsl_dir_destroy_sync will close the dd */
1130 	if (err)
1131 		dsl_dir_close(dd, FTAG);
1132 out:
1133 	dsl_dataset_disown(ds, tag);
1134 	return (err);
1135 }
1136 
1137 blkptr_t *
1138 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1139 {
1140 	return (&ds->ds_phys->ds_bp);
1141 }
1142 
1143 void
1144 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1145 {
1146 	ASSERT(dmu_tx_is_syncing(tx));
1147 	/* If it's the meta-objset, set dp_meta_rootbp */
1148 	if (ds == NULL) {
1149 		tx->tx_pool->dp_meta_rootbp = *bp;
1150 	} else {
1151 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
1152 		ds->ds_phys->ds_bp = *bp;
1153 	}
1154 }
1155 
1156 spa_t *
1157 dsl_dataset_get_spa(dsl_dataset_t *ds)
1158 {
1159 	return (ds->ds_dir->dd_pool->dp_spa);
1160 }
1161 
1162 void
1163 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1164 {
1165 	dsl_pool_t *dp;
1166 
1167 	if (ds == NULL) /* this is the meta-objset */
1168 		return;
1169 
1170 	ASSERT(ds->ds_objset != NULL);
1171 
1172 	if (ds->ds_phys->ds_next_snap_obj != 0)
1173 		panic("dirtying snapshot!");
1174 
1175 	dp = ds->ds_dir->dd_pool;
1176 
1177 	if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1178 		/* up the hold count until we can be written out */
1179 		dmu_buf_add_ref(ds->ds_dbuf, ds);
1180 	}
1181 }
1182 
1183 /*
1184  * The unique space in the head dataset can be calculated by subtracting
1185  * the space used in the most recent snapshot, that is still being used
1186  * in this file system, from the space currently in use.  To figure out
1187  * the space in the most recent snapshot still in use, we need to take
1188  * the total space used in the snapshot and subtract out the space that
1189  * has been freed up since the snapshot was taken.
1190  */
1191 static void
1192 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1193 {
1194 	uint64_t mrs_used;
1195 	uint64_t dlused, dlcomp, dluncomp;
1196 
1197 	ASSERT(!dsl_dataset_is_snapshot(ds));
1198 
1199 	if (ds->ds_phys->ds_prev_snap_obj != 0)
1200 		mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1201 	else
1202 		mrs_used = 0;
1203 
1204 	VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1205 	    &dluncomp));
1206 
1207 	ASSERT3U(dlused, <=, mrs_used);
1208 	ds->ds_phys->ds_unique_bytes =
1209 	    ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1210 
1211 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1212 	    SPA_VERSION_UNIQUE_ACCURATE)
1213 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1214 }
1215 
1216 struct killarg {
1217 	dsl_dataset_t *ds;
1218 	dmu_tx_t *tx;
1219 };
1220 
1221 /* ARGSUSED */
1222 static int
1223 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1224     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1225 {
1226 	struct killarg *ka = arg;
1227 	dmu_tx_t *tx = ka->tx;
1228 
1229 	if (bp == NULL)
1230 		return (0);
1231 
1232 	if (zb->zb_level == ZB_ZIL_LEVEL) {
1233 		ASSERT(zilog != NULL);
1234 		/*
1235 		 * It's a block in the intent log.  It has no
1236 		 * accounting, so just free it.
1237 		 */
1238 		dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1239 	} else {
1240 		ASSERT(zilog == NULL);
1241 		ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1242 		(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1243 	}
1244 
1245 	return (0);
1246 }
1247 
1248 /* ARGSUSED */
1249 static int
1250 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1251 {
1252 	dsl_dataset_t *ds = arg1;
1253 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1254 	uint64_t count;
1255 	int err;
1256 
1257 	/*
1258 	 * Can't delete a head dataset if there are snapshots of it.
1259 	 * (Except if the only snapshots are from the branch we cloned
1260 	 * from.)
1261 	 */
1262 	if (ds->ds_prev != NULL &&
1263 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1264 		return (EBUSY);
1265 
1266 	/*
1267 	 * This is really a dsl_dir thing, but check it here so that
1268 	 * we'll be less likely to leave this dataset inconsistent &
1269 	 * nearly destroyed.
1270 	 */
1271 	err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1272 	if (err)
1273 		return (err);
1274 	if (count != 0)
1275 		return (EEXIST);
1276 
1277 	return (0);
1278 }
1279 
1280 /* ARGSUSED */
1281 static void
1282 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1283 {
1284 	dsl_dataset_t *ds = arg1;
1285 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1286 
1287 	/* Mark it as inconsistent on-disk, in case we crash */
1288 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1289 	ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1290 
1291 	spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1292 	    "dataset = %llu", ds->ds_object);
1293 }
1294 
1295 static int
1296 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1297     dmu_tx_t *tx)
1298 {
1299 	dsl_dataset_t *ds = dsda->ds;
1300 	dsl_dataset_t *ds_prev = ds->ds_prev;
1301 
1302 	if (dsl_dataset_might_destroy_origin(ds_prev)) {
1303 		struct dsl_ds_destroyarg ndsda = {0};
1304 
1305 		/*
1306 		 * If we're not prepared to remove the origin, don't remove
1307 		 * the clone either.
1308 		 */
1309 		if (dsda->rm_origin == NULL) {
1310 			dsda->need_prep = B_TRUE;
1311 			return (EBUSY);
1312 		}
1313 
1314 		ndsda.ds = ds_prev;
1315 		ndsda.is_origin_rm = B_TRUE;
1316 		return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1317 	}
1318 
1319 	/*
1320 	 * If we're not going to remove the origin after all,
1321 	 * undo the open context setup.
1322 	 */
1323 	if (dsda->rm_origin != NULL) {
1324 		dsl_dataset_disown(dsda->rm_origin, tag);
1325 		dsda->rm_origin = NULL;
1326 	}
1327 
1328 	return (0);
1329 }
1330 
1331 /* ARGSUSED */
1332 int
1333 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1334 {
1335 	struct dsl_ds_destroyarg *dsda = arg1;
1336 	dsl_dataset_t *ds = dsda->ds;
1337 
1338 	/* we have an owner hold, so noone else can destroy us */
1339 	ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1340 
1341 	/*
1342 	 * Only allow deferred destroy on pools that support it.
1343 	 * NOTE: deferred destroy is only supported on snapshots.
1344 	 */
1345 	if (dsda->defer) {
1346 		if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1347 		    SPA_VERSION_USERREFS)
1348 			return (ENOTSUP);
1349 		ASSERT(dsl_dataset_is_snapshot(ds));
1350 		return (0);
1351 	}
1352 
1353 	/*
1354 	 * Can't delete a head dataset if there are snapshots of it.
1355 	 * (Except if the only snapshots are from the branch we cloned
1356 	 * from.)
1357 	 */
1358 	if (ds->ds_prev != NULL &&
1359 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1360 		return (EBUSY);
1361 
1362 	/*
1363 	 * If we made changes this txg, traverse_dsl_dataset won't find
1364 	 * them.  Try again.
1365 	 */
1366 	if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1367 		return (EAGAIN);
1368 
1369 	if (dsl_dataset_is_snapshot(ds)) {
1370 		/*
1371 		 * If this snapshot has an elevated user reference count,
1372 		 * we can't destroy it yet.
1373 		 */
1374 		if (ds->ds_userrefs > 0 && !dsda->releasing)
1375 			return (EBUSY);
1376 
1377 		mutex_enter(&ds->ds_lock);
1378 		/*
1379 		 * Can't delete a branch point. However, if we're destroying
1380 		 * a clone and removing its origin due to it having a user
1381 		 * hold count of 0 and having been marked for deferred destroy,
1382 		 * it's OK for the origin to have a single clone.
1383 		 */
1384 		if (ds->ds_phys->ds_num_children >
1385 		    (dsda->is_origin_rm ? 2 : 1)) {
1386 			mutex_exit(&ds->ds_lock);
1387 			return (EEXIST);
1388 		}
1389 		mutex_exit(&ds->ds_lock);
1390 	} else if (dsl_dir_is_clone(ds->ds_dir)) {
1391 		return (dsl_dataset_origin_check(dsda, arg2, tx));
1392 	}
1393 
1394 	/* XXX we should do some i/o error checking... */
1395 	return (0);
1396 }
1397 
1398 struct refsarg {
1399 	kmutex_t lock;
1400 	boolean_t gone;
1401 	kcondvar_t cv;
1402 };
1403 
1404 /* ARGSUSED */
1405 static void
1406 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1407 {
1408 	struct refsarg *arg = argv;
1409 
1410 	mutex_enter(&arg->lock);
1411 	arg->gone = TRUE;
1412 	cv_signal(&arg->cv);
1413 	mutex_exit(&arg->lock);
1414 }
1415 
1416 static void
1417 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1418 {
1419 	struct refsarg arg;
1420 
1421 	mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1422 	cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1423 	arg.gone = FALSE;
1424 	(void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1425 	    dsl_dataset_refs_gone);
1426 	dmu_buf_rele(ds->ds_dbuf, tag);
1427 	mutex_enter(&arg.lock);
1428 	while (!arg.gone)
1429 		cv_wait(&arg.cv, &arg.lock);
1430 	ASSERT(arg.gone);
1431 	mutex_exit(&arg.lock);
1432 	ds->ds_dbuf = NULL;
1433 	ds->ds_phys = NULL;
1434 	mutex_destroy(&arg.lock);
1435 	cv_destroy(&arg.cv);
1436 }
1437 
1438 static void
1439 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1440 {
1441 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1442 	uint64_t count;
1443 	int err;
1444 
1445 	ASSERT(ds->ds_phys->ds_num_children >= 2);
1446 	err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1447 	/*
1448 	 * The err should not be ENOENT, but a bug in a previous version
1449 	 * of the code could cause upgrade_clones_cb() to not set
1450 	 * ds_next_snap_obj when it should, leading to a missing entry.
1451 	 * If we knew that the pool was created after
1452 	 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1453 	 * ENOENT.  However, at least we can check that we don't have
1454 	 * too many entries in the next_clones_obj even after failing to
1455 	 * remove this one.
1456 	 */
1457 	if (err != ENOENT) {
1458 		VERIFY3U(err, ==, 0);
1459 	}
1460 	ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1461 	    &count));
1462 	ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1463 }
1464 
1465 void
1466 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1467 {
1468 	struct dsl_ds_destroyarg *dsda = arg1;
1469 	dsl_dataset_t *ds = dsda->ds;
1470 	int err;
1471 	int after_branch_point = FALSE;
1472 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1473 	objset_t *mos = dp->dp_meta_objset;
1474 	dsl_dataset_t *ds_prev = NULL;
1475 	uint64_t obj;
1476 
1477 	ASSERT(ds->ds_owner);
1478 	ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1479 	ASSERT(ds->ds_prev == NULL ||
1480 	    ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1481 	ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1482 
1483 	if (dsda->defer) {
1484 		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1485 		if (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1) {
1486 			dmu_buf_will_dirty(ds->ds_dbuf, tx);
1487 			ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1488 			return;
1489 		}
1490 	}
1491 
1492 	/* signal any waiters that this dataset is going away */
1493 	mutex_enter(&ds->ds_lock);
1494 	ds->ds_owner = dsl_reaper;
1495 	cv_broadcast(&ds->ds_exclusive_cv);
1496 	mutex_exit(&ds->ds_lock);
1497 
1498 	if (ds->ds_objset) {
1499 		dmu_objset_evict(ds->ds_objset);
1500 		ds->ds_objset = NULL;
1501 	}
1502 
1503 	/* Remove our reservation */
1504 	if (ds->ds_reserved != 0) {
1505 		dsl_prop_setarg_t psa;
1506 		uint64_t value = 0;
1507 
1508 		dsl_prop_setarg_init_uint64(&psa, "refreservation",
1509 		    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1510 		    &value);
1511 		psa.psa_effective_value = 0;	/* predict default value */
1512 
1513 		dsl_dataset_set_reservation_sync(ds, &psa, tx);
1514 		ASSERT3U(ds->ds_reserved, ==, 0);
1515 	}
1516 
1517 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1518 
1519 	dsl_scan_ds_destroyed(ds, tx);
1520 
1521 	obj = ds->ds_object;
1522 
1523 	if (ds->ds_phys->ds_prev_snap_obj != 0) {
1524 		if (ds->ds_prev) {
1525 			ds_prev = ds->ds_prev;
1526 		} else {
1527 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1528 			    ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1529 		}
1530 		after_branch_point =
1531 		    (ds_prev->ds_phys->ds_next_snap_obj != obj);
1532 
1533 		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1534 		if (after_branch_point &&
1535 		    ds_prev->ds_phys->ds_next_clones_obj != 0) {
1536 			remove_from_next_clones(ds_prev, obj, tx);
1537 			if (ds->ds_phys->ds_next_snap_obj != 0) {
1538 				VERIFY(0 == zap_add_int(mos,
1539 				    ds_prev->ds_phys->ds_next_clones_obj,
1540 				    ds->ds_phys->ds_next_snap_obj, tx));
1541 			}
1542 		}
1543 		if (after_branch_point &&
1544 		    ds->ds_phys->ds_next_snap_obj == 0) {
1545 			/* This clone is toast. */
1546 			ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1547 			ds_prev->ds_phys->ds_num_children--;
1548 
1549 			/*
1550 			 * If the clone's origin has no other clones, no
1551 			 * user holds, and has been marked for deferred
1552 			 * deletion, then we should have done the necessary
1553 			 * destroy setup for it.
1554 			 */
1555 			if (ds_prev->ds_phys->ds_num_children == 1 &&
1556 			    ds_prev->ds_userrefs == 0 &&
1557 			    DS_IS_DEFER_DESTROY(ds_prev)) {
1558 				ASSERT3P(dsda->rm_origin, !=, NULL);
1559 			} else {
1560 				ASSERT3P(dsda->rm_origin, ==, NULL);
1561 			}
1562 		} else if (!after_branch_point) {
1563 			ds_prev->ds_phys->ds_next_snap_obj =
1564 			    ds->ds_phys->ds_next_snap_obj;
1565 		}
1566 	}
1567 
1568 	if (dsl_dataset_is_snapshot(ds)) {
1569 		blkptr_t bp;
1570 		zio_t *pio;
1571 		dsl_dataset_t *ds_next;
1572 		uint64_t itor = 0;
1573 		uint64_t old_unique;
1574 		int64_t used = 0, compressed = 0, uncompressed = 0;
1575 
1576 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1577 		    ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1578 		ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1579 
1580 		old_unique = ds_next->ds_phys->ds_unique_bytes;
1581 
1582 		dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1583 		ds_next->ds_phys->ds_prev_snap_obj =
1584 		    ds->ds_phys->ds_prev_snap_obj;
1585 		ds_next->ds_phys->ds_prev_snap_txg =
1586 		    ds->ds_phys->ds_prev_snap_txg;
1587 		ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1588 		    ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1589 
1590 		pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1591 
1592 		/*
1593 		 * Transfer to our deadlist (which will become next's
1594 		 * new deadlist) any entries from next's current
1595 		 * deadlist which were born before prev, and free the
1596 		 * other entries.
1597 		 *
1598 		 * XXX we're doing this long task with the config lock held
1599 		 */
1600 		while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1601 			if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1602 				VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1603 				    &bp, tx));
1604 				if (ds_prev && !after_branch_point &&
1605 				    bp.blk_birth >
1606 				    ds_prev->ds_phys->ds_prev_snap_txg) {
1607 					ds_prev->ds_phys->ds_unique_bytes +=
1608 					    bp_get_dsize_sync(dp->dp_spa, &bp);
1609 				}
1610 			} else {
1611 				used += bp_get_dsize_sync(dp->dp_spa, &bp);
1612 				compressed += BP_GET_PSIZE(&bp);
1613 				uncompressed += BP_GET_UCSIZE(&bp);
1614 				dsl_free_sync(pio, dp, tx->tx_txg, &bp);
1615 			}
1616 		}
1617 		VERIFY3U(zio_wait(pio), ==, 0);
1618 		ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1619 
1620 		/* change snapused */
1621 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1622 		    -used, -compressed, -uncompressed, tx);
1623 
1624 		/* free next's deadlist */
1625 		bplist_close(&ds_next->ds_deadlist);
1626 		bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1627 
1628 		/* set next's deadlist to our deadlist */
1629 		bplist_close(&ds->ds_deadlist);
1630 		ds_next->ds_phys->ds_deadlist_obj =
1631 		    ds->ds_phys->ds_deadlist_obj;
1632 		VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1633 		    ds_next->ds_phys->ds_deadlist_obj));
1634 		ds->ds_phys->ds_deadlist_obj = 0;
1635 
1636 		if (dsl_dataset_is_snapshot(ds_next)) {
1637 			/*
1638 			 * Update next's unique to include blocks which
1639 			 * were previously shared by only this snapshot
1640 			 * and it.  Those blocks will be born after the
1641 			 * prev snap and before this snap, and will have
1642 			 * died after the next snap and before the one
1643 			 * after that (ie. be on the snap after next's
1644 			 * deadlist).
1645 			 *
1646 			 * XXX we're doing this long task with the
1647 			 * config lock held
1648 			 */
1649 			dsl_dataset_t *ds_after_next;
1650 			uint64_t space;
1651 
1652 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1653 			    ds_next->ds_phys->ds_next_snap_obj,
1654 			    FTAG, &ds_after_next));
1655 
1656 			VERIFY(0 ==
1657 			    bplist_space_birthrange(&ds_after_next->ds_deadlist,
1658 			    ds->ds_phys->ds_prev_snap_txg,
1659 			    ds->ds_phys->ds_creation_txg, &space));
1660 			ds_next->ds_phys->ds_unique_bytes += space;
1661 
1662 			dsl_dataset_rele(ds_after_next, FTAG);
1663 			ASSERT3P(ds_next->ds_prev, ==, NULL);
1664 		} else {
1665 			ASSERT3P(ds_next->ds_prev, ==, ds);
1666 			dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1667 			ds_next->ds_prev = NULL;
1668 			if (ds_prev) {
1669 				VERIFY(0 == dsl_dataset_get_ref(dp,
1670 				    ds->ds_phys->ds_prev_snap_obj,
1671 				    ds_next, &ds_next->ds_prev));
1672 			}
1673 
1674 			dsl_dataset_recalc_head_uniq(ds_next);
1675 
1676 			/*
1677 			 * Reduce the amount of our unconsmed refreservation
1678 			 * being charged to our parent by the amount of
1679 			 * new unique data we have gained.
1680 			 */
1681 			if (old_unique < ds_next->ds_reserved) {
1682 				int64_t mrsdelta;
1683 				uint64_t new_unique =
1684 				    ds_next->ds_phys->ds_unique_bytes;
1685 
1686 				ASSERT(old_unique <= new_unique);
1687 				mrsdelta = MIN(new_unique - old_unique,
1688 				    ds_next->ds_reserved - old_unique);
1689 				dsl_dir_diduse_space(ds->ds_dir,
1690 				    DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1691 			}
1692 		}
1693 		dsl_dataset_rele(ds_next, FTAG);
1694 	} else {
1695 		/*
1696 		 * There's no next snapshot, so this is a head dataset.
1697 		 * Destroy the deadlist.  Unless it's a clone, the
1698 		 * deadlist should be empty.  (If it's a clone, it's
1699 		 * safe to ignore the deadlist contents.)
1700 		 */
1701 		struct killarg ka;
1702 
1703 		ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1704 		bplist_close(&ds->ds_deadlist);
1705 		bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1706 		ds->ds_phys->ds_deadlist_obj = 0;
1707 
1708 		/*
1709 		 * Free everything that we point to (that's born after
1710 		 * the previous snapshot, if we are a clone)
1711 		 *
1712 		 * NB: this should be very quick, because we already
1713 		 * freed all the objects in open context.
1714 		 */
1715 		ka.ds = ds;
1716 		ka.tx = tx;
1717 		err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1718 		    TRAVERSE_POST, kill_blkptr, &ka);
1719 		ASSERT3U(err, ==, 0);
1720 		ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1721 		    ds->ds_phys->ds_unique_bytes == 0);
1722 
1723 		if (ds->ds_prev != NULL) {
1724 			dsl_dataset_rele(ds->ds_prev, ds);
1725 			ds->ds_prev = ds_prev = NULL;
1726 		}
1727 	}
1728 
1729 	if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1730 		/* Erase the link in the dir */
1731 		dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1732 		ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1733 		ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1734 		err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1735 		ASSERT(err == 0);
1736 	} else {
1737 		/* remove from snapshot namespace */
1738 		dsl_dataset_t *ds_head;
1739 		ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1740 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1741 		    ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1742 		VERIFY(0 == dsl_dataset_get_snapname(ds));
1743 #ifdef ZFS_DEBUG
1744 		{
1745 			uint64_t val;
1746 
1747 			err = dsl_dataset_snap_lookup(ds_head,
1748 			    ds->ds_snapname, &val);
1749 			ASSERT3U(err, ==, 0);
1750 			ASSERT3U(val, ==, obj);
1751 		}
1752 #endif
1753 		err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1754 		ASSERT(err == 0);
1755 		dsl_dataset_rele(ds_head, FTAG);
1756 	}
1757 
1758 	if (ds_prev && ds->ds_prev != ds_prev)
1759 		dsl_dataset_rele(ds_prev, FTAG);
1760 
1761 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1762 	spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1763 	    "dataset = %llu", ds->ds_object);
1764 
1765 	if (ds->ds_phys->ds_next_clones_obj != 0) {
1766 		uint64_t count;
1767 		ASSERT(0 == zap_count(mos,
1768 		    ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1769 		VERIFY(0 == dmu_object_free(mos,
1770 		    ds->ds_phys->ds_next_clones_obj, tx));
1771 	}
1772 	if (ds->ds_phys->ds_props_obj != 0)
1773 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1774 	if (ds->ds_phys->ds_userrefs_obj != 0)
1775 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1776 	dsl_dir_close(ds->ds_dir, ds);
1777 	ds->ds_dir = NULL;
1778 	dsl_dataset_drain_refs(ds, tag);
1779 	VERIFY(0 == dmu_object_free(mos, obj, tx));
1780 
1781 	if (dsda->rm_origin) {
1782 		/*
1783 		 * Remove the origin of the clone we just destroyed.
1784 		 */
1785 		struct dsl_ds_destroyarg ndsda = {0};
1786 
1787 		ndsda.ds = dsda->rm_origin;
1788 		dsl_dataset_destroy_sync(&ndsda, tag, tx);
1789 	}
1790 }
1791 
1792 static int
1793 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1794 {
1795 	uint64_t asize;
1796 
1797 	if (!dmu_tx_is_syncing(tx))
1798 		return (0);
1799 
1800 	/*
1801 	 * If there's an fs-only reservation, any blocks that might become
1802 	 * owned by the snapshot dataset must be accommodated by space
1803 	 * outside of the reservation.
1804 	 */
1805 	ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1806 	asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1807 	if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1808 		return (ENOSPC);
1809 
1810 	/*
1811 	 * Propogate any reserved space for this snapshot to other
1812 	 * snapshot checks in this sync group.
1813 	 */
1814 	if (asize > 0)
1815 		dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1816 
1817 	return (0);
1818 }
1819 
1820 int
1821 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1822 {
1823 	dsl_dataset_t *ds = arg1;
1824 	const char *snapname = arg2;
1825 	int err;
1826 	uint64_t value;
1827 
1828 	/*
1829 	 * We don't allow multiple snapshots of the same txg.  If there
1830 	 * is already one, try again.
1831 	 */
1832 	if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1833 		return (EAGAIN);
1834 
1835 	/*
1836 	 * Check for conflicting name snapshot name.
1837 	 */
1838 	err = dsl_dataset_snap_lookup(ds, snapname, &value);
1839 	if (err == 0)
1840 		return (EEXIST);
1841 	if (err != ENOENT)
1842 		return (err);
1843 
1844 	/*
1845 	 * Check that the dataset's name is not too long.  Name consists
1846 	 * of the dataset's length + 1 for the @-sign + snapshot name's length
1847 	 */
1848 	if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1849 		return (ENAMETOOLONG);
1850 
1851 	err = dsl_dataset_snapshot_reserve_space(ds, tx);
1852 	if (err)
1853 		return (err);
1854 
1855 	ds->ds_trysnap_txg = tx->tx_txg;
1856 	return (0);
1857 }
1858 
1859 void
1860 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1861 {
1862 	dsl_dataset_t *ds = arg1;
1863 	const char *snapname = arg2;
1864 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1865 	dmu_buf_t *dbuf;
1866 	dsl_dataset_phys_t *dsphys;
1867 	uint64_t dsobj, crtxg;
1868 	objset_t *mos = dp->dp_meta_objset;
1869 	int err;
1870 
1871 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1872 
1873 	/*
1874 	 * The origin's ds_creation_txg has to be < TXG_INITIAL
1875 	 */
1876 	if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1877 		crtxg = 1;
1878 	else
1879 		crtxg = tx->tx_txg;
1880 
1881 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1882 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1883 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1884 	dmu_buf_will_dirty(dbuf, tx);
1885 	dsphys = dbuf->db_data;
1886 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
1887 	dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1888 	dsphys->ds_fsid_guid = unique_create();
1889 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1890 	    sizeof (dsphys->ds_guid));
1891 	dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1892 	dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1893 	dsphys->ds_next_snap_obj = ds->ds_object;
1894 	dsphys->ds_num_children = 1;
1895 	dsphys->ds_creation_time = gethrestime_sec();
1896 	dsphys->ds_creation_txg = crtxg;
1897 	dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1898 	dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1899 	dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1900 	dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1901 	dsphys->ds_flags = ds->ds_phys->ds_flags;
1902 	dsphys->ds_bp = ds->ds_phys->ds_bp;
1903 	dmu_buf_rele(dbuf, FTAG);
1904 
1905 	ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1906 	if (ds->ds_prev) {
1907 		uint64_t next_clones_obj =
1908 		    ds->ds_prev->ds_phys->ds_next_clones_obj;
1909 		ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1910 		    ds->ds_object ||
1911 		    ds->ds_prev->ds_phys->ds_num_children > 1);
1912 		if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1913 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1914 			ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1915 			    ds->ds_prev->ds_phys->ds_creation_txg);
1916 			ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1917 		} else if (next_clones_obj != 0) {
1918 			remove_from_next_clones(ds->ds_prev,
1919 			    dsphys->ds_next_snap_obj, tx);
1920 			VERIFY3U(0, ==, zap_add_int(mos,
1921 			    next_clones_obj, dsobj, tx));
1922 		}
1923 	}
1924 
1925 	/*
1926 	 * If we have a reference-reservation on this dataset, we will
1927 	 * need to increase the amount of refreservation being charged
1928 	 * since our unique space is going to zero.
1929 	 */
1930 	if (ds->ds_reserved) {
1931 		int64_t delta;
1932 		ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
1933 		delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1934 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1935 		    delta, 0, 0, tx);
1936 	}
1937 
1938 	bplist_close(&ds->ds_deadlist);
1939 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1940 	ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1941 	ds->ds_phys->ds_prev_snap_obj = dsobj;
1942 	ds->ds_phys->ds_prev_snap_txg = crtxg;
1943 	ds->ds_phys->ds_unique_bytes = 0;
1944 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1945 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1946 	ds->ds_phys->ds_deadlist_obj =
1947 	    bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1948 	VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1949 	    ds->ds_phys->ds_deadlist_obj));
1950 
1951 	dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1952 	err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1953 	    snapname, 8, 1, &dsobj, tx);
1954 	ASSERT(err == 0);
1955 
1956 	if (ds->ds_prev)
1957 		dsl_dataset_drop_ref(ds->ds_prev, ds);
1958 	VERIFY(0 == dsl_dataset_get_ref(dp,
1959 	    ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1960 
1961 	dsl_scan_ds_snapshotted(ds, tx);
1962 
1963 	dsl_dir_snap_cmtime_update(ds->ds_dir);
1964 
1965 	spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
1966 	    "dataset = %llu", dsobj);
1967 }
1968 
1969 void
1970 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1971 {
1972 	ASSERT(dmu_tx_is_syncing(tx));
1973 	ASSERT(ds->ds_objset != NULL);
1974 	ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1975 
1976 	/*
1977 	 * in case we had to change ds_fsid_guid when we opened it,
1978 	 * sync it out now.
1979 	 */
1980 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1981 	ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
1982 
1983 	dsl_dir_dirty(ds->ds_dir, tx);
1984 	dmu_objset_sync(ds->ds_objset, zio, tx);
1985 }
1986 
1987 void
1988 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
1989 {
1990 	uint64_t refd, avail, uobjs, aobjs;
1991 
1992 	dsl_dir_stats(ds->ds_dir, nv);
1993 
1994 	dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
1995 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
1996 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
1997 
1998 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
1999 	    ds->ds_phys->ds_creation_time);
2000 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
2001 	    ds->ds_phys->ds_creation_txg);
2002 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
2003 	    ds->ds_quota);
2004 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
2005 	    ds->ds_reserved);
2006 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
2007 	    ds->ds_phys->ds_guid);
2008 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
2009 	    ds->ds_phys->ds_unique_bytes);
2010 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
2011 	    ds->ds_object);
2012 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2013 	    ds->ds_userrefs);
2014 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2015 	    DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2016 
2017 	if (ds->ds_phys->ds_next_snap_obj) {
2018 		/*
2019 		 * This is a snapshot; override the dd's space used with
2020 		 * our unique space and compression ratio.
2021 		 */
2022 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2023 		    ds->ds_phys->ds_unique_bytes);
2024 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2025 		    ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2026 		    (ds->ds_phys->ds_uncompressed_bytes * 100 /
2027 		    ds->ds_phys->ds_compressed_bytes));
2028 	}
2029 }
2030 
2031 void
2032 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2033 {
2034 	stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2035 	stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2036 	stat->dds_guid = ds->ds_phys->ds_guid;
2037 	if (ds->ds_phys->ds_next_snap_obj) {
2038 		stat->dds_is_snapshot = B_TRUE;
2039 		stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2040 	} else {
2041 		stat->dds_is_snapshot = B_FALSE;
2042 		stat->dds_num_clones = 0;
2043 	}
2044 
2045 	/* clone origin is really a dsl_dir thing... */
2046 	rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2047 	if (dsl_dir_is_clone(ds->ds_dir)) {
2048 		dsl_dataset_t *ods;
2049 
2050 		VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2051 		    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2052 		dsl_dataset_name(ods, stat->dds_origin);
2053 		dsl_dataset_drop_ref(ods, FTAG);
2054 	} else {
2055 		stat->dds_origin[0] = '\0';
2056 	}
2057 	rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2058 }
2059 
2060 uint64_t
2061 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2062 {
2063 	return (ds->ds_fsid_guid);
2064 }
2065 
2066 void
2067 dsl_dataset_space(dsl_dataset_t *ds,
2068     uint64_t *refdbytesp, uint64_t *availbytesp,
2069     uint64_t *usedobjsp, uint64_t *availobjsp)
2070 {
2071 	*refdbytesp = ds->ds_phys->ds_used_bytes;
2072 	*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2073 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2074 		*availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2075 	if (ds->ds_quota != 0) {
2076 		/*
2077 		 * Adjust available bytes according to refquota
2078 		 */
2079 		if (*refdbytesp < ds->ds_quota)
2080 			*availbytesp = MIN(*availbytesp,
2081 			    ds->ds_quota - *refdbytesp);
2082 		else
2083 			*availbytesp = 0;
2084 	}
2085 	*usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2086 	*availobjsp = DN_MAX_OBJECT - *usedobjsp;
2087 }
2088 
2089 boolean_t
2090 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2091 {
2092 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2093 
2094 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2095 	    dsl_pool_sync_context(dp));
2096 	if (ds->ds_prev == NULL)
2097 		return (B_FALSE);
2098 	if (ds->ds_phys->ds_bp.blk_birth >
2099 	    ds->ds_prev->ds_phys->ds_creation_txg)
2100 		return (B_TRUE);
2101 	return (B_FALSE);
2102 }
2103 
2104 /* ARGSUSED */
2105 static int
2106 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2107 {
2108 	dsl_dataset_t *ds = arg1;
2109 	char *newsnapname = arg2;
2110 	dsl_dir_t *dd = ds->ds_dir;
2111 	dsl_dataset_t *hds;
2112 	uint64_t val;
2113 	int err;
2114 
2115 	err = dsl_dataset_hold_obj(dd->dd_pool,
2116 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2117 	if (err)
2118 		return (err);
2119 
2120 	/* new name better not be in use */
2121 	err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2122 	dsl_dataset_rele(hds, FTAG);
2123 
2124 	if (err == 0)
2125 		err = EEXIST;
2126 	else if (err == ENOENT)
2127 		err = 0;
2128 
2129 	/* dataset name + 1 for the "@" + the new snapshot name must fit */
2130 	if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2131 		err = ENAMETOOLONG;
2132 
2133 	return (err);
2134 }
2135 
2136 static void
2137 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2138 {
2139 	dsl_dataset_t *ds = arg1;
2140 	const char *newsnapname = arg2;
2141 	dsl_dir_t *dd = ds->ds_dir;
2142 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2143 	dsl_dataset_t *hds;
2144 	int err;
2145 
2146 	ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2147 
2148 	VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2149 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2150 
2151 	VERIFY(0 == dsl_dataset_get_snapname(ds));
2152 	err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2153 	ASSERT3U(err, ==, 0);
2154 	mutex_enter(&ds->ds_lock);
2155 	(void) strcpy(ds->ds_snapname, newsnapname);
2156 	mutex_exit(&ds->ds_lock);
2157 	err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2158 	    ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2159 	ASSERT3U(err, ==, 0);
2160 
2161 	spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2162 	    "dataset = %llu", ds->ds_object);
2163 	dsl_dataset_rele(hds, FTAG);
2164 }
2165 
2166 struct renamesnaparg {
2167 	dsl_sync_task_group_t *dstg;
2168 	char failed[MAXPATHLEN];
2169 	char *oldsnap;
2170 	char *newsnap;
2171 };
2172 
2173 static int
2174 dsl_snapshot_rename_one(const char *name, void *arg)
2175 {
2176 	struct renamesnaparg *ra = arg;
2177 	dsl_dataset_t *ds = NULL;
2178 	char *snapname;
2179 	int err;
2180 
2181 	snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2182 	(void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2183 
2184 	/*
2185 	 * For recursive snapshot renames the parent won't be changing
2186 	 * so we just pass name for both the to/from argument.
2187 	 */
2188 	err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2189 	if (err != 0) {
2190 		strfree(snapname);
2191 		return (err == ENOENT ? 0 : err);
2192 	}
2193 
2194 #ifdef _KERNEL
2195 	/*
2196 	 * For all filesystems undergoing rename, we'll need to unmount it.
2197 	 */
2198 	(void) zfs_unmount_snap(snapname, NULL);
2199 #endif
2200 	err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2201 	strfree(snapname);
2202 	if (err != 0)
2203 		return (err == ENOENT ? 0 : err);
2204 
2205 	dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2206 	    dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2207 
2208 	return (0);
2209 }
2210 
2211 static int
2212 dsl_recursive_rename(char *oldname, const char *newname)
2213 {
2214 	int err;
2215 	struct renamesnaparg *ra;
2216 	dsl_sync_task_t *dst;
2217 	spa_t *spa;
2218 	char *cp, *fsname = spa_strdup(oldname);
2219 	int len = strlen(oldname) + 1;
2220 
2221 	/* truncate the snapshot name to get the fsname */
2222 	cp = strchr(fsname, '@');
2223 	*cp = '\0';
2224 
2225 	err = spa_open(fsname, &spa, FTAG);
2226 	if (err) {
2227 		kmem_free(fsname, len);
2228 		return (err);
2229 	}
2230 	ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2231 	ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2232 
2233 	ra->oldsnap = strchr(oldname, '@') + 1;
2234 	ra->newsnap = strchr(newname, '@') + 1;
2235 	*ra->failed = '\0';
2236 
2237 	err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2238 	    DS_FIND_CHILDREN);
2239 	kmem_free(fsname, len);
2240 
2241 	if (err == 0) {
2242 		err = dsl_sync_task_group_wait(ra->dstg);
2243 	}
2244 
2245 	for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2246 	    dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2247 		dsl_dataset_t *ds = dst->dst_arg1;
2248 		if (dst->dst_err) {
2249 			dsl_dir_name(ds->ds_dir, ra->failed);
2250 			(void) strlcat(ra->failed, "@", sizeof (ra->failed));
2251 			(void) strlcat(ra->failed, ra->newsnap,
2252 			    sizeof (ra->failed));
2253 		}
2254 		dsl_dataset_rele(ds, ra->dstg);
2255 	}
2256 
2257 	if (err)
2258 		(void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2259 
2260 	dsl_sync_task_group_destroy(ra->dstg);
2261 	kmem_free(ra, sizeof (struct renamesnaparg));
2262 	spa_close(spa, FTAG);
2263 	return (err);
2264 }
2265 
2266 static int
2267 dsl_valid_rename(const char *oldname, void *arg)
2268 {
2269 	int delta = *(int *)arg;
2270 
2271 	if (strlen(oldname) + delta >= MAXNAMELEN)
2272 		return (ENAMETOOLONG);
2273 
2274 	return (0);
2275 }
2276 
2277 #pragma weak dmu_objset_rename = dsl_dataset_rename
2278 int
2279 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2280 {
2281 	dsl_dir_t *dd;
2282 	dsl_dataset_t *ds;
2283 	const char *tail;
2284 	int err;
2285 
2286 	err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2287 	if (err)
2288 		return (err);
2289 
2290 	if (tail == NULL) {
2291 		int delta = strlen(newname) - strlen(oldname);
2292 
2293 		/* if we're growing, validate child name lengths */
2294 		if (delta > 0)
2295 			err = dmu_objset_find(oldname, dsl_valid_rename,
2296 			    &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2297 
2298 		if (err == 0)
2299 			err = dsl_dir_rename(dd, newname);
2300 		dsl_dir_close(dd, FTAG);
2301 		return (err);
2302 	}
2303 
2304 	if (tail[0] != '@') {
2305 		/* the name ended in a nonexistent component */
2306 		dsl_dir_close(dd, FTAG);
2307 		return (ENOENT);
2308 	}
2309 
2310 	dsl_dir_close(dd, FTAG);
2311 
2312 	/* new name must be snapshot in same filesystem */
2313 	tail = strchr(newname, '@');
2314 	if (tail == NULL)
2315 		return (EINVAL);
2316 	tail++;
2317 	if (strncmp(oldname, newname, tail - newname) != 0)
2318 		return (EXDEV);
2319 
2320 	if (recursive) {
2321 		err = dsl_recursive_rename(oldname, newname);
2322 	} else {
2323 		err = dsl_dataset_hold(oldname, FTAG, &ds);
2324 		if (err)
2325 			return (err);
2326 
2327 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2328 		    dsl_dataset_snapshot_rename_check,
2329 		    dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2330 
2331 		dsl_dataset_rele(ds, FTAG);
2332 	}
2333 
2334 	return (err);
2335 }
2336 
2337 struct promotenode {
2338 	list_node_t link;
2339 	dsl_dataset_t *ds;
2340 };
2341 
2342 struct promotearg {
2343 	list_t shared_snaps, origin_snaps, clone_snaps;
2344 	dsl_dataset_t *origin_origin;
2345 	uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2346 	char *err_ds;
2347 };
2348 
2349 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2350 static boolean_t snaplist_unstable(list_t *l);
2351 
2352 static int
2353 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2354 {
2355 	dsl_dataset_t *hds = arg1;
2356 	struct promotearg *pa = arg2;
2357 	struct promotenode *snap = list_head(&pa->shared_snaps);
2358 	dsl_dataset_t *origin_ds = snap->ds;
2359 	int err;
2360 
2361 	/* Check that it is a real clone */
2362 	if (!dsl_dir_is_clone(hds->ds_dir))
2363 		return (EINVAL);
2364 
2365 	/* Since this is so expensive, don't do the preliminary check */
2366 	if (!dmu_tx_is_syncing(tx))
2367 		return (0);
2368 
2369 	if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2370 		return (EXDEV);
2371 
2372 	/* compute origin's new unique space */
2373 	snap = list_tail(&pa->clone_snaps);
2374 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2375 	err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2376 	    origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique);
2377 	if (err)
2378 		return (err);
2379 
2380 	/*
2381 	 * Walk the snapshots that we are moving
2382 	 *
2383 	 * Compute space to transfer.  Consider the incremental changes
2384 	 * to used for each snapshot:
2385 	 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2386 	 * So each snapshot gave birth to:
2387 	 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2388 	 * So a sequence would look like:
2389 	 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2390 	 * Which simplifies to:
2391 	 * uN + kN + kN-1 + ... + k1 + k0
2392 	 * Note however, if we stop before we reach the ORIGIN we get:
2393 	 * uN + kN + kN-1 + ... + kM - uM-1
2394 	 */
2395 	pa->used = origin_ds->ds_phys->ds_used_bytes;
2396 	pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2397 	pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2398 	for (snap = list_head(&pa->shared_snaps); snap;
2399 	    snap = list_next(&pa->shared_snaps, snap)) {
2400 		uint64_t val, dlused, dlcomp, dluncomp;
2401 		dsl_dataset_t *ds = snap->ds;
2402 
2403 		/* Check that the snapshot name does not conflict */
2404 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2405 		err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2406 		if (err == 0) {
2407 			err = EEXIST;
2408 			goto out;
2409 		}
2410 		if (err != ENOENT)
2411 			goto out;
2412 
2413 		/* The very first snapshot does not have a deadlist */
2414 		if (ds->ds_phys->ds_prev_snap_obj == 0)
2415 			continue;
2416 
2417 		if (err = bplist_space(&ds->ds_deadlist,
2418 		    &dlused, &dlcomp, &dluncomp))
2419 			goto out;
2420 		pa->used += dlused;
2421 		pa->comp += dlcomp;
2422 		pa->uncomp += dluncomp;
2423 	}
2424 
2425 	/*
2426 	 * If we are a clone of a clone then we never reached ORIGIN,
2427 	 * so we need to subtract out the clone origin's used space.
2428 	 */
2429 	if (pa->origin_origin) {
2430 		pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2431 		pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2432 		pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2433 	}
2434 
2435 	/* Check that there is enough space here */
2436 	err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2437 	    pa->used);
2438 	if (err)
2439 		return (err);
2440 
2441 	/*
2442 	 * Compute the amounts of space that will be used by snapshots
2443 	 * after the promotion (for both origin and clone).  For each,
2444 	 * it is the amount of space that will be on all of their
2445 	 * deadlists (that was not born before their new origin).
2446 	 */
2447 	if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2448 		uint64_t space;
2449 
2450 		/*
2451 		 * Note, typically this will not be a clone of a clone,
2452 		 * so dd_origin_txg will be < TXG_INITIAL, so
2453 		 * these snaplist_space() -> bplist_space_birthrange()
2454 		 * calls will be fast because they do not have to
2455 		 * iterate over all bps.
2456 		 */
2457 		snap = list_head(&pa->origin_snaps);
2458 		err = snaplist_space(&pa->shared_snaps,
2459 		    snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2460 		if (err)
2461 			return (err);
2462 
2463 		err = snaplist_space(&pa->clone_snaps,
2464 		    snap->ds->ds_dir->dd_origin_txg, &space);
2465 		if (err)
2466 			return (err);
2467 		pa->cloneusedsnap += space;
2468 	}
2469 	if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2470 		err = snaplist_space(&pa->origin_snaps,
2471 		    origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2472 		if (err)
2473 			return (err);
2474 	}
2475 
2476 	return (0);
2477 out:
2478 	pa->err_ds =  snap->ds->ds_snapname;
2479 	return (err);
2480 }
2481 
2482 static void
2483 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2484 {
2485 	dsl_dataset_t *hds = arg1;
2486 	struct promotearg *pa = arg2;
2487 	struct promotenode *snap = list_head(&pa->shared_snaps);
2488 	dsl_dataset_t *origin_ds = snap->ds;
2489 	dsl_dataset_t *origin_head;
2490 	dsl_dir_t *dd = hds->ds_dir;
2491 	dsl_pool_t *dp = hds->ds_dir->dd_pool;
2492 	dsl_dir_t *odd = NULL;
2493 	uint64_t oldnext_obj;
2494 	int64_t delta;
2495 
2496 	ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2497 
2498 	snap = list_head(&pa->origin_snaps);
2499 	origin_head = snap->ds;
2500 
2501 	/*
2502 	 * We need to explicitly open odd, since origin_ds's dd will be
2503 	 * changing.
2504 	 */
2505 	VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2506 	    NULL, FTAG, &odd));
2507 
2508 	/* change origin's next snap */
2509 	dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2510 	oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2511 	snap = list_tail(&pa->clone_snaps);
2512 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2513 	origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2514 
2515 	/* change the origin's next clone */
2516 	if (origin_ds->ds_phys->ds_next_clones_obj) {
2517 		remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2518 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2519 		    origin_ds->ds_phys->ds_next_clones_obj,
2520 		    oldnext_obj, tx));
2521 	}
2522 
2523 	/* change origin */
2524 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
2525 	ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2526 	dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2527 	dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2528 	dmu_buf_will_dirty(odd->dd_dbuf, tx);
2529 	odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2530 	origin_head->ds_dir->dd_origin_txg =
2531 	    origin_ds->ds_phys->ds_creation_txg;
2532 
2533 	/* move snapshots to this dir */
2534 	for (snap = list_head(&pa->shared_snaps); snap;
2535 	    snap = list_next(&pa->shared_snaps, snap)) {
2536 		dsl_dataset_t *ds = snap->ds;
2537 
2538 		/* unregister props as dsl_dir is changing */
2539 		if (ds->ds_objset) {
2540 			dmu_objset_evict(ds->ds_objset);
2541 			ds->ds_objset = NULL;
2542 		}
2543 		/* move snap name entry */
2544 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2545 		VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2546 		    ds->ds_snapname, tx));
2547 		VERIFY(0 == zap_add(dp->dp_meta_objset,
2548 		    hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2549 		    8, 1, &ds->ds_object, tx));
2550 		/* change containing dsl_dir */
2551 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2552 		ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2553 		ds->ds_phys->ds_dir_obj = dd->dd_object;
2554 		ASSERT3P(ds->ds_dir, ==, odd);
2555 		dsl_dir_close(ds->ds_dir, ds);
2556 		VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2557 		    NULL, ds, &ds->ds_dir));
2558 
2559 		ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2560 	}
2561 
2562 	/*
2563 	 * Change space accounting.
2564 	 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2565 	 * both be valid, or both be 0 (resulting in delta == 0).  This
2566 	 * is true for each of {clone,origin} independently.
2567 	 */
2568 
2569 	delta = pa->cloneusedsnap -
2570 	    dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2571 	ASSERT3S(delta, >=, 0);
2572 	ASSERT3U(pa->used, >=, delta);
2573 	dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2574 	dsl_dir_diduse_space(dd, DD_USED_HEAD,
2575 	    pa->used - delta, pa->comp, pa->uncomp, tx);
2576 
2577 	delta = pa->originusedsnap -
2578 	    odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2579 	ASSERT3S(delta, <=, 0);
2580 	ASSERT3U(pa->used, >=, -delta);
2581 	dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2582 	dsl_dir_diduse_space(odd, DD_USED_HEAD,
2583 	    -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2584 
2585 	origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2586 
2587 	/* log history record */
2588 	spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2589 	    "dataset = %llu", hds->ds_object);
2590 
2591 	dsl_dir_close(odd, FTAG);
2592 }
2593 
2594 static char *snaplist_tag = "snaplist";
2595 /*
2596  * Make a list of dsl_dataset_t's for the snapshots between first_obj
2597  * (exclusive) and last_obj (inclusive).  The list will be in reverse
2598  * order (last_obj will be the list_head()).  If first_obj == 0, do all
2599  * snapshots back to this dataset's origin.
2600  */
2601 static int
2602 snaplist_make(dsl_pool_t *dp, boolean_t own,
2603     uint64_t first_obj, uint64_t last_obj, list_t *l)
2604 {
2605 	uint64_t obj = last_obj;
2606 
2607 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2608 
2609 	list_create(l, sizeof (struct promotenode),
2610 	    offsetof(struct promotenode, link));
2611 
2612 	while (obj != first_obj) {
2613 		dsl_dataset_t *ds;
2614 		struct promotenode *snap;
2615 		int err;
2616 
2617 		if (own) {
2618 			err = dsl_dataset_own_obj(dp, obj,
2619 			    0, snaplist_tag, &ds);
2620 			if (err == 0)
2621 				dsl_dataset_make_exclusive(ds, snaplist_tag);
2622 		} else {
2623 			err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2624 		}
2625 		if (err == ENOENT) {
2626 			/* lost race with snapshot destroy */
2627 			struct promotenode *last = list_tail(l);
2628 			ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2629 			obj = last->ds->ds_phys->ds_prev_snap_obj;
2630 			continue;
2631 		} else if (err) {
2632 			return (err);
2633 		}
2634 
2635 		if (first_obj == 0)
2636 			first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2637 
2638 		snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2639 		snap->ds = ds;
2640 		list_insert_tail(l, snap);
2641 		obj = ds->ds_phys->ds_prev_snap_obj;
2642 	}
2643 
2644 	return (0);
2645 }
2646 
2647 static int
2648 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2649 {
2650 	struct promotenode *snap;
2651 
2652 	*spacep = 0;
2653 	for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2654 		uint64_t used;
2655 		int err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2656 		    mintxg, UINT64_MAX, &used);
2657 		if (err)
2658 			return (err);
2659 		*spacep += used;
2660 	}
2661 	return (0);
2662 }
2663 
2664 static void
2665 snaplist_destroy(list_t *l, boolean_t own)
2666 {
2667 	struct promotenode *snap;
2668 
2669 	if (!l || !list_link_active(&l->list_head))
2670 		return;
2671 
2672 	while ((snap = list_tail(l)) != NULL) {
2673 		list_remove(l, snap);
2674 		if (own)
2675 			dsl_dataset_disown(snap->ds, snaplist_tag);
2676 		else
2677 			dsl_dataset_rele(snap->ds, snaplist_tag);
2678 		kmem_free(snap, sizeof (struct promotenode));
2679 	}
2680 	list_destroy(l);
2681 }
2682 
2683 /*
2684  * Promote a clone.  Nomenclature note:
2685  * "clone" or "cds": the original clone which is being promoted
2686  * "origin" or "ods": the snapshot which is originally clone's origin
2687  * "origin head" or "ohds": the dataset which is the head
2688  * (filesystem/volume) for the origin
2689  * "origin origin": the origin of the origin's filesystem (typically
2690  * NULL, indicating that the clone is not a clone of a clone).
2691  */
2692 int
2693 dsl_dataset_promote(const char *name, char *conflsnap)
2694 {
2695 	dsl_dataset_t *ds;
2696 	dsl_dir_t *dd;
2697 	dsl_pool_t *dp;
2698 	dmu_object_info_t doi;
2699 	struct promotearg pa = { 0 };
2700 	struct promotenode *snap;
2701 	int err;
2702 
2703 	err = dsl_dataset_hold(name, FTAG, &ds);
2704 	if (err)
2705 		return (err);
2706 	dd = ds->ds_dir;
2707 	dp = dd->dd_pool;
2708 
2709 	err = dmu_object_info(dp->dp_meta_objset,
2710 	    ds->ds_phys->ds_snapnames_zapobj, &doi);
2711 	if (err) {
2712 		dsl_dataset_rele(ds, FTAG);
2713 		return (err);
2714 	}
2715 
2716 	if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2717 		dsl_dataset_rele(ds, FTAG);
2718 		return (EINVAL);
2719 	}
2720 
2721 	/*
2722 	 * We are going to inherit all the snapshots taken before our
2723 	 * origin (i.e., our new origin will be our parent's origin).
2724 	 * Take ownership of them so that we can rename them into our
2725 	 * namespace.
2726 	 */
2727 	rw_enter(&dp->dp_config_rwlock, RW_READER);
2728 
2729 	err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2730 	    &pa.shared_snaps);
2731 	if (err != 0)
2732 		goto out;
2733 
2734 	err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2735 	if (err != 0)
2736 		goto out;
2737 
2738 	snap = list_head(&pa.shared_snaps);
2739 	ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2740 	err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2741 	    snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2742 	if (err != 0)
2743 		goto out;
2744 
2745 	if (dsl_dir_is_clone(snap->ds->ds_dir)) {
2746 		err = dsl_dataset_own_obj(dp,
2747 		    snap->ds->ds_dir->dd_phys->dd_origin_obj,
2748 		    0, FTAG, &pa.origin_origin);
2749 		if (err != 0)
2750 			goto out;
2751 	}
2752 
2753 out:
2754 	rw_exit(&dp->dp_config_rwlock);
2755 
2756 	/*
2757 	 * Add in 128x the snapnames zapobj size, since we will be moving
2758 	 * a bunch of snapnames to the promoted ds, and dirtying their
2759 	 * bonus buffers.
2760 	 */
2761 	if (err == 0) {
2762 		err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2763 		    dsl_dataset_promote_sync, ds, &pa,
2764 		    2 + 2 * doi.doi_physical_blocks_512);
2765 		if (err && pa.err_ds && conflsnap)
2766 			(void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
2767 	}
2768 
2769 	snaplist_destroy(&pa.shared_snaps, B_TRUE);
2770 	snaplist_destroy(&pa.clone_snaps, B_FALSE);
2771 	snaplist_destroy(&pa.origin_snaps, B_FALSE);
2772 	if (pa.origin_origin)
2773 		dsl_dataset_disown(pa.origin_origin, FTAG);
2774 	dsl_dataset_rele(ds, FTAG);
2775 	return (err);
2776 }
2777 
2778 struct cloneswaparg {
2779 	dsl_dataset_t *cds; /* clone dataset */
2780 	dsl_dataset_t *ohds; /* origin's head dataset */
2781 	boolean_t force;
2782 	int64_t unused_refres_delta; /* change in unconsumed refreservation */
2783 };
2784 
2785 /* ARGSUSED */
2786 static int
2787 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2788 {
2789 	struct cloneswaparg *csa = arg1;
2790 
2791 	/* they should both be heads */
2792 	if (dsl_dataset_is_snapshot(csa->cds) ||
2793 	    dsl_dataset_is_snapshot(csa->ohds))
2794 		return (EINVAL);
2795 
2796 	/* the branch point should be just before them */
2797 	if (csa->cds->ds_prev != csa->ohds->ds_prev)
2798 		return (EINVAL);
2799 
2800 	/* cds should be the clone (unless they are unrelated) */
2801 	if (csa->cds->ds_prev != NULL &&
2802 	    csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
2803 	    csa->ohds->ds_object !=
2804 	    csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
2805 		return (EINVAL);
2806 
2807 	/* the clone should be a child of the origin */
2808 	if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2809 		return (EINVAL);
2810 
2811 	/* ohds shouldn't be modified unless 'force' */
2812 	if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2813 		return (ETXTBSY);
2814 
2815 	/* adjust amount of any unconsumed refreservation */
2816 	csa->unused_refres_delta =
2817 	    (int64_t)MIN(csa->ohds->ds_reserved,
2818 	    csa->ohds->ds_phys->ds_unique_bytes) -
2819 	    (int64_t)MIN(csa->ohds->ds_reserved,
2820 	    csa->cds->ds_phys->ds_unique_bytes);
2821 
2822 	if (csa->unused_refres_delta > 0 &&
2823 	    csa->unused_refres_delta >
2824 	    dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2825 		return (ENOSPC);
2826 
2827 	if (csa->ohds->ds_quota != 0 &&
2828 	    csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
2829 		return (EDQUOT);
2830 
2831 	return (0);
2832 }
2833 
2834 /* ARGSUSED */
2835 static void
2836 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2837 {
2838 	struct cloneswaparg *csa = arg1;
2839 	dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2840 
2841 	ASSERT(csa->cds->ds_reserved == 0);
2842 	ASSERT(csa->ohds->ds_quota == 0 ||
2843 	    csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
2844 
2845 	dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2846 	dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2847 
2848 	if (csa->cds->ds_objset != NULL) {
2849 		dmu_objset_evict(csa->cds->ds_objset);
2850 		csa->cds->ds_objset = NULL;
2851 	}
2852 
2853 	if (csa->ohds->ds_objset != NULL) {
2854 		dmu_objset_evict(csa->ohds->ds_objset);
2855 		csa->ohds->ds_objset = NULL;
2856 	}
2857 
2858 	/*
2859 	 * Reset origin's unique bytes, if it exists.
2860 	 */
2861 	if (csa->cds->ds_prev) {
2862 		dsl_dataset_t *origin = csa->cds->ds_prev;
2863 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
2864 		VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2865 		    origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2866 		    &origin->ds_phys->ds_unique_bytes));
2867 	}
2868 
2869 	/* swap blkptrs */
2870 	{
2871 		blkptr_t tmp;
2872 		tmp = csa->ohds->ds_phys->ds_bp;
2873 		csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2874 		csa->cds->ds_phys->ds_bp = tmp;
2875 	}
2876 
2877 	/* set dd_*_bytes */
2878 	{
2879 		int64_t dused, dcomp, duncomp;
2880 		uint64_t cdl_used, cdl_comp, cdl_uncomp;
2881 		uint64_t odl_used, odl_comp, odl_uncomp;
2882 
2883 		ASSERT3U(csa->cds->ds_dir->dd_phys->
2884 		    dd_used_breakdown[DD_USED_SNAP], ==, 0);
2885 
2886 		VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2887 		    &cdl_comp, &cdl_uncomp));
2888 		VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2889 		    &odl_comp, &odl_uncomp));
2890 
2891 		dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2892 		    (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2893 		dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2894 		    (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2895 		duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2896 		    cdl_uncomp -
2897 		    (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2898 
2899 		dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
2900 		    dused, dcomp, duncomp, tx);
2901 		dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
2902 		    -dused, -dcomp, -duncomp, tx);
2903 
2904 		/*
2905 		 * The difference in the space used by snapshots is the
2906 		 * difference in snapshot space due to the head's
2907 		 * deadlist (since that's the only thing that's
2908 		 * changing that affects the snapused).
2909 		 */
2910 		VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2911 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, &cdl_used));
2912 		VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist,
2913 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, &odl_used));
2914 		dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
2915 		    DD_USED_HEAD, DD_USED_SNAP, tx);
2916 	}
2917 
2918 #define	SWITCH64(x, y) \
2919 	{ \
2920 		uint64_t __tmp = (x); \
2921 		(x) = (y); \
2922 		(y) = __tmp; \
2923 	}
2924 
2925 	/* swap ds_*_bytes */
2926 	SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2927 	    csa->cds->ds_phys->ds_used_bytes);
2928 	SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2929 	    csa->cds->ds_phys->ds_compressed_bytes);
2930 	SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2931 	    csa->cds->ds_phys->ds_uncompressed_bytes);
2932 	SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2933 	    csa->cds->ds_phys->ds_unique_bytes);
2934 
2935 	/* apply any parent delta for change in unconsumed refreservation */
2936 	dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
2937 	    csa->unused_refres_delta, 0, 0, tx);
2938 
2939 	/* swap deadlists */
2940 	bplist_close(&csa->cds->ds_deadlist);
2941 	bplist_close(&csa->ohds->ds_deadlist);
2942 	SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2943 	    csa->cds->ds_phys->ds_deadlist_obj);
2944 	VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2945 	    csa->cds->ds_phys->ds_deadlist_obj));
2946 	VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2947 	    csa->ohds->ds_phys->ds_deadlist_obj));
2948 
2949 	dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
2950 }
2951 
2952 /*
2953  * Swap 'clone' with its origin head datasets.  Used at the end of "zfs
2954  * recv" into an existing fs to swizzle the file system to the new
2955  * version, and by "zfs rollback".  Can also be used to swap two
2956  * independent head datasets if neither has any snapshots.
2957  */
2958 int
2959 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2960     boolean_t force)
2961 {
2962 	struct cloneswaparg csa;
2963 	int error;
2964 
2965 	ASSERT(clone->ds_owner);
2966 	ASSERT(origin_head->ds_owner);
2967 retry:
2968 	/* Need exclusive access for the swap */
2969 	rw_enter(&clone->ds_rwlock, RW_WRITER);
2970 	if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2971 		rw_exit(&clone->ds_rwlock);
2972 		rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2973 		if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2974 			rw_exit(&origin_head->ds_rwlock);
2975 			goto retry;
2976 		}
2977 	}
2978 	csa.cds = clone;
2979 	csa.ohds = origin_head;
2980 	csa.force = force;
2981 	error = dsl_sync_task_do(clone->ds_dir->dd_pool,
2982 	    dsl_dataset_clone_swap_check,
2983 	    dsl_dataset_clone_swap_sync, &csa, NULL, 9);
2984 	return (error);
2985 }
2986 
2987 /*
2988  * Given a pool name and a dataset object number in that pool,
2989  * return the name of that dataset.
2990  */
2991 int
2992 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
2993 {
2994 	spa_t *spa;
2995 	dsl_pool_t *dp;
2996 	dsl_dataset_t *ds;
2997 	int error;
2998 
2999 	if ((error = spa_open(pname, &spa, FTAG)) != 0)
3000 		return (error);
3001 	dp = spa_get_dsl(spa);
3002 	rw_enter(&dp->dp_config_rwlock, RW_READER);
3003 	if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
3004 		dsl_dataset_name(ds, buf);
3005 		dsl_dataset_rele(ds, FTAG);
3006 	}
3007 	rw_exit(&dp->dp_config_rwlock);
3008 	spa_close(spa, FTAG);
3009 
3010 	return (error);
3011 }
3012 
3013 int
3014 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3015     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3016 {
3017 	int error = 0;
3018 
3019 	ASSERT3S(asize, >, 0);
3020 
3021 	/*
3022 	 * *ref_rsrv is the portion of asize that will come from any
3023 	 * unconsumed refreservation space.
3024 	 */
3025 	*ref_rsrv = 0;
3026 
3027 	mutex_enter(&ds->ds_lock);
3028 	/*
3029 	 * Make a space adjustment for reserved bytes.
3030 	 */
3031 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3032 		ASSERT3U(*used, >=,
3033 		    ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3034 		*used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3035 		*ref_rsrv =
3036 		    asize - MIN(asize, parent_delta(ds, asize + inflight));
3037 	}
3038 
3039 	if (!check_quota || ds->ds_quota == 0) {
3040 		mutex_exit(&ds->ds_lock);
3041 		return (0);
3042 	}
3043 	/*
3044 	 * If they are requesting more space, and our current estimate
3045 	 * is over quota, they get to try again unless the actual
3046 	 * on-disk is over quota and there are no pending changes (which
3047 	 * may free up space for us).
3048 	 */
3049 	if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3050 		if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3051 			error = ERESTART;
3052 		else
3053 			error = EDQUOT;
3054 	}
3055 	mutex_exit(&ds->ds_lock);
3056 
3057 	return (error);
3058 }
3059 
3060 /* ARGSUSED */
3061 static int
3062 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3063 {
3064 	dsl_dataset_t *ds = arg1;
3065 	dsl_prop_setarg_t *psa = arg2;
3066 	int err;
3067 
3068 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3069 		return (ENOTSUP);
3070 
3071 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3072 		return (err);
3073 
3074 	if (psa->psa_effective_value == 0)
3075 		return (0);
3076 
3077 	if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3078 	    psa->psa_effective_value < ds->ds_reserved)
3079 		return (ENOSPC);
3080 
3081 	return (0);
3082 }
3083 
3084 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3085 
3086 void
3087 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3088 {
3089 	dsl_dataset_t *ds = arg1;
3090 	dsl_prop_setarg_t *psa = arg2;
3091 	uint64_t effective_value = psa->psa_effective_value;
3092 
3093 	dsl_prop_set_sync(ds, psa, tx);
3094 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3095 
3096 	if (ds->ds_quota != effective_value) {
3097 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3098 		ds->ds_quota = effective_value;
3099 
3100 		spa_history_log_internal(LOG_DS_REFQUOTA,
3101 		    ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3102 		    (longlong_t)ds->ds_quota, ds->ds_object);
3103 	}
3104 }
3105 
3106 int
3107 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3108 {
3109 	dsl_dataset_t *ds;
3110 	dsl_prop_setarg_t psa;
3111 	int err;
3112 
3113 	dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3114 
3115 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3116 	if (err)
3117 		return (err);
3118 
3119 	/*
3120 	 * If someone removes a file, then tries to set the quota, we
3121 	 * want to make sure the file freeing takes effect.
3122 	 */
3123 	txg_wait_open(ds->ds_dir->dd_pool, 0);
3124 
3125 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3126 	    dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3127 	    ds, &psa, 0);
3128 
3129 	dsl_dataset_rele(ds, FTAG);
3130 	return (err);
3131 }
3132 
3133 static int
3134 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3135 {
3136 	dsl_dataset_t *ds = arg1;
3137 	dsl_prop_setarg_t *psa = arg2;
3138 	uint64_t effective_value;
3139 	uint64_t unique;
3140 	int err;
3141 
3142 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3143 	    SPA_VERSION_REFRESERVATION)
3144 		return (ENOTSUP);
3145 
3146 	if (dsl_dataset_is_snapshot(ds))
3147 		return (EINVAL);
3148 
3149 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3150 		return (err);
3151 
3152 	effective_value = psa->psa_effective_value;
3153 
3154 	/*
3155 	 * If we are doing the preliminary check in open context, the
3156 	 * space estimates may be inaccurate.
3157 	 */
3158 	if (!dmu_tx_is_syncing(tx))
3159 		return (0);
3160 
3161 	mutex_enter(&ds->ds_lock);
3162 	if (!DS_UNIQUE_IS_ACCURATE(ds))
3163 		dsl_dataset_recalc_head_uniq(ds);
3164 	unique = ds->ds_phys->ds_unique_bytes;
3165 	mutex_exit(&ds->ds_lock);
3166 
3167 	if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3168 		uint64_t delta = MAX(unique, effective_value) -
3169 		    MAX(unique, ds->ds_reserved);
3170 
3171 		if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3172 			return (ENOSPC);
3173 		if (ds->ds_quota > 0 &&
3174 		    effective_value > ds->ds_quota)
3175 			return (ENOSPC);
3176 	}
3177 
3178 	return (0);
3179 }
3180 
3181 static void
3182 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3183 {
3184 	dsl_dataset_t *ds = arg1;
3185 	dsl_prop_setarg_t *psa = arg2;
3186 	uint64_t effective_value = psa->psa_effective_value;
3187 	uint64_t unique;
3188 	int64_t delta;
3189 
3190 	dsl_prop_set_sync(ds, psa, tx);
3191 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3192 
3193 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
3194 
3195 	mutex_enter(&ds->ds_dir->dd_lock);
3196 	mutex_enter(&ds->ds_lock);
3197 	ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3198 	unique = ds->ds_phys->ds_unique_bytes;
3199 	delta = MAX(0, (int64_t)(effective_value - unique)) -
3200 	    MAX(0, (int64_t)(ds->ds_reserved - unique));
3201 	ds->ds_reserved = effective_value;
3202 	mutex_exit(&ds->ds_lock);
3203 
3204 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3205 	mutex_exit(&ds->ds_dir->dd_lock);
3206 
3207 	spa_history_log_internal(LOG_DS_REFRESERV,
3208 	    ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3209 	    (longlong_t)effective_value, ds->ds_object);
3210 }
3211 
3212 int
3213 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3214     uint64_t reservation)
3215 {
3216 	dsl_dataset_t *ds;
3217 	dsl_prop_setarg_t psa;
3218 	int err;
3219 
3220 	dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3221 	    &reservation);
3222 
3223 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3224 	if (err)
3225 		return (err);
3226 
3227 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3228 	    dsl_dataset_set_reservation_check,
3229 	    dsl_dataset_set_reservation_sync, ds, &psa, 0);
3230 
3231 	dsl_dataset_rele(ds, FTAG);
3232 	return (err);
3233 }
3234 
3235 struct dsl_ds_holdarg {
3236 	dsl_sync_task_group_t *dstg;
3237 	char *htag;
3238 	char *snapname;
3239 	boolean_t recursive;
3240 	boolean_t gotone;
3241 	boolean_t temphold;
3242 	char failed[MAXPATHLEN];
3243 };
3244 
3245 /*
3246  * The max length of a temporary tag prefix is the number of hex digits
3247  * required to express UINT64_MAX plus one for the hyphen.
3248  */
3249 #define	MAX_TAG_PREFIX_LEN	17
3250 
3251 static int
3252 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3253 {
3254 	dsl_dataset_t *ds = arg1;
3255 	struct dsl_ds_holdarg *ha = arg2;
3256 	char *htag = ha->htag;
3257 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3258 	int error = 0;
3259 
3260 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3261 		return (ENOTSUP);
3262 
3263 	if (!dsl_dataset_is_snapshot(ds))
3264 		return (EINVAL);
3265 
3266 	/* tags must be unique */
3267 	mutex_enter(&ds->ds_lock);
3268 	if (ds->ds_phys->ds_userrefs_obj) {
3269 		error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3270 		    8, 1, tx);
3271 		if (error == 0)
3272 			error = EEXIST;
3273 		else if (error == ENOENT)
3274 			error = 0;
3275 	}
3276 	mutex_exit(&ds->ds_lock);
3277 
3278 	if (error == 0 && ha->temphold &&
3279 	    strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3280 		error = E2BIG;
3281 
3282 	return (error);
3283 }
3284 
3285 static void
3286 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3287 {
3288 	dsl_dataset_t *ds = arg1;
3289 	struct dsl_ds_holdarg *ha = arg2;
3290 	char *htag = ha->htag;
3291 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3292 	objset_t *mos = dp->dp_meta_objset;
3293 	uint64_t now = gethrestime_sec();
3294 	uint64_t zapobj;
3295 
3296 	mutex_enter(&ds->ds_lock);
3297 	if (ds->ds_phys->ds_userrefs_obj == 0) {
3298 		/*
3299 		 * This is the first user hold for this dataset.  Create
3300 		 * the userrefs zap object.
3301 		 */
3302 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3303 		zapobj = ds->ds_phys->ds_userrefs_obj =
3304 		    zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3305 	} else {
3306 		zapobj = ds->ds_phys->ds_userrefs_obj;
3307 	}
3308 	ds->ds_userrefs++;
3309 	mutex_exit(&ds->ds_lock);
3310 
3311 	VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3312 
3313 	if (ha->temphold) {
3314 		VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3315 		    htag, &now, tx));
3316 	}
3317 
3318 	spa_history_log_internal(LOG_DS_USER_HOLD,
3319 	    dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3320 	    (int)ha->temphold, ds->ds_object);
3321 }
3322 
3323 static int
3324 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3325 {
3326 	struct dsl_ds_holdarg *ha = arg;
3327 	dsl_dataset_t *ds;
3328 	int error;
3329 	char *name;
3330 
3331 	/* alloc a buffer to hold dsname@snapname plus terminating NULL */
3332 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3333 	error = dsl_dataset_hold(name, ha->dstg, &ds);
3334 	strfree(name);
3335 	if (error == 0) {
3336 		ha->gotone = B_TRUE;
3337 		dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3338 		    dsl_dataset_user_hold_sync, ds, ha, 0);
3339 	} else if (error == ENOENT && ha->recursive) {
3340 		error = 0;
3341 	} else {
3342 		(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3343 	}
3344 	return (error);
3345 }
3346 
3347 int
3348 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3349     boolean_t recursive, boolean_t temphold)
3350 {
3351 	struct dsl_ds_holdarg *ha;
3352 	dsl_sync_task_t *dst;
3353 	spa_t *spa;
3354 	int error;
3355 
3356 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3357 
3358 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3359 
3360 	error = spa_open(dsname, &spa, FTAG);
3361 	if (error) {
3362 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3363 		return (error);
3364 	}
3365 
3366 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3367 	ha->htag = htag;
3368 	ha->snapname = snapname;
3369 	ha->recursive = recursive;
3370 	ha->temphold = temphold;
3371 	if (recursive) {
3372 		error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3373 		    ha, DS_FIND_CHILDREN);
3374 	} else {
3375 		error = dsl_dataset_user_hold_one(dsname, ha);
3376 	}
3377 	if (error == 0)
3378 		error = dsl_sync_task_group_wait(ha->dstg);
3379 
3380 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3381 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3382 		dsl_dataset_t *ds = dst->dst_arg1;
3383 
3384 		if (dst->dst_err) {
3385 			dsl_dataset_name(ds, ha->failed);
3386 			*strchr(ha->failed, '@') = '\0';
3387 		}
3388 		dsl_dataset_rele(ds, ha->dstg);
3389 	}
3390 
3391 	if (error == 0 && recursive && !ha->gotone)
3392 		error = ENOENT;
3393 
3394 	if (error)
3395 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3396 
3397 	dsl_sync_task_group_destroy(ha->dstg);
3398 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3399 	spa_close(spa, FTAG);
3400 	return (error);
3401 }
3402 
3403 struct dsl_ds_releasearg {
3404 	dsl_dataset_t *ds;
3405 	const char *htag;
3406 	boolean_t own;		/* do we own or just hold ds? */
3407 };
3408 
3409 static int
3410 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3411     boolean_t *might_destroy)
3412 {
3413 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3414 	uint64_t zapobj;
3415 	uint64_t tmp;
3416 	int error;
3417 
3418 	*might_destroy = B_FALSE;
3419 
3420 	mutex_enter(&ds->ds_lock);
3421 	zapobj = ds->ds_phys->ds_userrefs_obj;
3422 	if (zapobj == 0) {
3423 		/* The tag can't possibly exist */
3424 		mutex_exit(&ds->ds_lock);
3425 		return (ESRCH);
3426 	}
3427 
3428 	/* Make sure the tag exists */
3429 	error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3430 	if (error) {
3431 		mutex_exit(&ds->ds_lock);
3432 		if (error == ENOENT)
3433 			error = ESRCH;
3434 		return (error);
3435 	}
3436 
3437 	if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3438 	    DS_IS_DEFER_DESTROY(ds))
3439 		*might_destroy = B_TRUE;
3440 
3441 	mutex_exit(&ds->ds_lock);
3442 	return (0);
3443 }
3444 
3445 static int
3446 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3447 {
3448 	struct dsl_ds_releasearg *ra = arg1;
3449 	dsl_dataset_t *ds = ra->ds;
3450 	boolean_t might_destroy;
3451 	int error;
3452 
3453 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3454 		return (ENOTSUP);
3455 
3456 	error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3457 	if (error)
3458 		return (error);
3459 
3460 	if (might_destroy) {
3461 		struct dsl_ds_destroyarg dsda = {0};
3462 
3463 		if (dmu_tx_is_syncing(tx)) {
3464 			/*
3465 			 * If we're not prepared to remove the snapshot,
3466 			 * we can't allow the release to happen right now.
3467 			 */
3468 			if (!ra->own)
3469 				return (EBUSY);
3470 		}
3471 		dsda.ds = ds;
3472 		dsda.releasing = B_TRUE;
3473 		return (dsl_dataset_destroy_check(&dsda, tag, tx));
3474 	}
3475 
3476 	return (0);
3477 }
3478 
3479 static void
3480 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3481 {
3482 	struct dsl_ds_releasearg *ra = arg1;
3483 	dsl_dataset_t *ds = ra->ds;
3484 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3485 	objset_t *mos = dp->dp_meta_objset;
3486 	uint64_t zapobj;
3487 	uint64_t dsobj = ds->ds_object;
3488 	uint64_t refs;
3489 	int error;
3490 
3491 	if (ds->ds_objset) {
3492 		dmu_objset_evict(ds->ds_objset);
3493 		ds->ds_objset = NULL;
3494 	}
3495 
3496 	mutex_enter(&ds->ds_lock);
3497 	ds->ds_userrefs--;
3498 	refs = ds->ds_userrefs;
3499 	mutex_exit(&ds->ds_lock);
3500 	error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3501 	VERIFY(error == 0 || error == ENOENT);
3502 	zapobj = ds->ds_phys->ds_userrefs_obj;
3503 	VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3504 	if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3505 	    DS_IS_DEFER_DESTROY(ds)) {
3506 		struct dsl_ds_destroyarg dsda = {0};
3507 
3508 		ASSERT(ra->own);
3509 		dsda.ds = ds;
3510 		dsda.releasing = B_TRUE;
3511 		/* We already did the destroy_check */
3512 		dsl_dataset_destroy_sync(&dsda, tag, tx);
3513 	}
3514 
3515 	spa_history_log_internal(LOG_DS_USER_RELEASE,
3516 	    dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3517 	    ra->htag, (longlong_t)refs, dsobj);
3518 }
3519 
3520 static int
3521 dsl_dataset_user_release_one(const char *dsname, void *arg)
3522 {
3523 	struct dsl_ds_holdarg *ha = arg;
3524 	struct dsl_ds_releasearg *ra;
3525 	dsl_dataset_t *ds;
3526 	int error;
3527 	void *dtag = ha->dstg;
3528 	char *name;
3529 	boolean_t own = B_FALSE;
3530 	boolean_t might_destroy;
3531 
3532 	/* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3533 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3534 	error = dsl_dataset_hold(name, dtag, &ds);
3535 	strfree(name);
3536 	if (error == ENOENT && ha->recursive)
3537 		return (0);
3538 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3539 	if (error)
3540 		return (error);
3541 
3542 	ha->gotone = B_TRUE;
3543 
3544 	ASSERT(dsl_dataset_is_snapshot(ds));
3545 
3546 	error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3547 	if (error) {
3548 		dsl_dataset_rele(ds, dtag);
3549 		return (error);
3550 	}
3551 
3552 	if (might_destroy) {
3553 #ifdef _KERNEL
3554 		name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3555 		error = zfs_unmount_snap(name, NULL);
3556 		strfree(name);
3557 		if (error) {
3558 			dsl_dataset_rele(ds, dtag);
3559 			return (error);
3560 		}
3561 #endif
3562 		if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3563 			dsl_dataset_rele(ds, dtag);
3564 			return (EBUSY);
3565 		} else {
3566 			own = B_TRUE;
3567 			dsl_dataset_make_exclusive(ds, dtag);
3568 		}
3569 	}
3570 
3571 	ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3572 	ra->ds = ds;
3573 	ra->htag = ha->htag;
3574 	ra->own = own;
3575 	dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3576 	    dsl_dataset_user_release_sync, ra, dtag, 0);
3577 
3578 	return (0);
3579 }
3580 
3581 int
3582 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3583     boolean_t recursive)
3584 {
3585 	struct dsl_ds_holdarg *ha;
3586 	dsl_sync_task_t *dst;
3587 	spa_t *spa;
3588 	int error;
3589 
3590 top:
3591 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3592 
3593 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3594 
3595 	error = spa_open(dsname, &spa, FTAG);
3596 	if (error) {
3597 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3598 		return (error);
3599 	}
3600 
3601 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3602 	ha->htag = htag;
3603 	ha->snapname = snapname;
3604 	ha->recursive = recursive;
3605 	if (recursive) {
3606 		error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3607 		    ha, DS_FIND_CHILDREN);
3608 	} else {
3609 		error = dsl_dataset_user_release_one(dsname, ha);
3610 	}
3611 	if (error == 0)
3612 		error = dsl_sync_task_group_wait(ha->dstg);
3613 
3614 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3615 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3616 		struct dsl_ds_releasearg *ra = dst->dst_arg1;
3617 		dsl_dataset_t *ds = ra->ds;
3618 
3619 		if (dst->dst_err)
3620 			dsl_dataset_name(ds, ha->failed);
3621 
3622 		if (ra->own)
3623 			dsl_dataset_disown(ds, ha->dstg);
3624 		else
3625 			dsl_dataset_rele(ds, ha->dstg);
3626 
3627 		kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3628 	}
3629 
3630 	if (error == 0 && recursive && !ha->gotone)
3631 		error = ENOENT;
3632 
3633 	if (error && error != EBUSY)
3634 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3635 
3636 	dsl_sync_task_group_destroy(ha->dstg);
3637 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3638 	spa_close(spa, FTAG);
3639 
3640 	/*
3641 	 * We can get EBUSY if we were racing with deferred destroy and
3642 	 * dsl_dataset_user_release_check() hadn't done the necessary
3643 	 * open context setup.  We can also get EBUSY if we're racing
3644 	 * with destroy and that thread is the ds_owner.  Either way
3645 	 * the busy condition should be transient, and we should retry
3646 	 * the release operation.
3647 	 */
3648 	if (error == EBUSY)
3649 		goto top;
3650 
3651 	return (error);
3652 }
3653 
3654 /*
3655  * Called at spa_load time to release a stale temporary user hold.
3656  */
3657 int
3658 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag)
3659 {
3660 	dsl_dataset_t *ds;
3661 	char *snap;
3662 	char *name;
3663 	int namelen;
3664 	int error;
3665 
3666 	rw_enter(&dp->dp_config_rwlock, RW_READER);
3667 	error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3668 	rw_exit(&dp->dp_config_rwlock);
3669 	if (error)
3670 		return (error);
3671 	namelen = dsl_dataset_namelen(ds)+1;
3672 	name = kmem_alloc(namelen, KM_SLEEP);
3673 	dsl_dataset_name(ds, name);
3674 	dsl_dataset_rele(ds, FTAG);
3675 
3676 	snap = strchr(name, '@');
3677 	*snap = '\0';
3678 	++snap;
3679 	return (dsl_dataset_user_release(name, snap, htag, B_FALSE));
3680 }
3681 
3682 int
3683 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3684 {
3685 	dsl_dataset_t *ds;
3686 	int err;
3687 
3688 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3689 	if (err)
3690 		return (err);
3691 
3692 	VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
3693 	if (ds->ds_phys->ds_userrefs_obj != 0) {
3694 		zap_attribute_t *za;
3695 		zap_cursor_t zc;
3696 
3697 		za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
3698 		for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
3699 		    ds->ds_phys->ds_userrefs_obj);
3700 		    zap_cursor_retrieve(&zc, za) == 0;
3701 		    zap_cursor_advance(&zc)) {
3702 			VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
3703 			    za->za_first_integer));
3704 		}
3705 		zap_cursor_fini(&zc);
3706 		kmem_free(za, sizeof (zap_attribute_t));
3707 	}
3708 	dsl_dataset_rele(ds, FTAG);
3709 	return (0);
3710 }
3711 
3712 /*
3713  * Note, this fuction is used as the callback for dmu_objset_find().  We
3714  * always return 0 so that we will continue to find and process
3715  * inconsistent datasets, even if we encounter an error trying to
3716  * process one of them.
3717  */
3718 /* ARGSUSED */
3719 int
3720 dsl_destroy_inconsistent(const char *dsname, void *arg)
3721 {
3722 	dsl_dataset_t *ds;
3723 
3724 	if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
3725 		if (DS_IS_INCONSISTENT(ds))
3726 			(void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
3727 		else
3728 			dsl_dataset_disown(ds, FTAG);
3729 	}
3730 	return (0);
3731 }
3732