xref: /titanic_41/usr/src/uts/common/fs/zfs/dsl_dataset.c (revision 3fb517f786391b507780c78aabb8d98bfea9efe9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/dmu_objset.h>
26 #include <sys/dsl_dataset.h>
27 #include <sys/dsl_dir.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_synctask.h>
30 #include <sys/dmu_traverse.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/arc.h>
33 #include <sys/zio.h>
34 #include <sys/zap.h>
35 #include <sys/unique.h>
36 #include <sys/zfs_context.h>
37 #include <sys/zfs_ioctl.h>
38 #include <sys/spa.h>
39 #include <sys/zfs_znode.h>
40 #include <sys/zvol.h>
41 #include <sys/dsl_scan.h>
42 
43 static char *dsl_reaper = "the grim reaper";
44 
45 static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
46 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
47 static dsl_syncfunc_t dsl_dataset_set_reservation_sync;
48 
49 #define	DS_REF_MAX	(1ULL << 62)
50 
51 #define	DSL_DEADLIST_BLOCKSIZE	SPA_MAXBLOCKSIZE
52 
53 #define	DSL_DATASET_IS_DESTROYED(ds)	((ds)->ds_owner == dsl_reaper)
54 
55 
56 /*
57  * Figure out how much of this delta should be propogated to the dsl_dir
58  * layer.  If there's a refreservation, that space has already been
59  * partially accounted for in our ancestors.
60  */
61 static int64_t
62 parent_delta(dsl_dataset_t *ds, int64_t delta)
63 {
64 	uint64_t old_bytes, new_bytes;
65 
66 	if (ds->ds_reserved == 0)
67 		return (delta);
68 
69 	old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
70 	new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
71 
72 	ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
73 	return (new_bytes - old_bytes);
74 }
75 
76 void
77 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
78 {
79 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
80 	int compressed = BP_GET_PSIZE(bp);
81 	int uncompressed = BP_GET_UCSIZE(bp);
82 	int64_t delta;
83 
84 	dprintf_bp(bp, "ds=%p", ds);
85 
86 	ASSERT(dmu_tx_is_syncing(tx));
87 	/* It could have been compressed away to nothing */
88 	if (BP_IS_HOLE(bp))
89 		return;
90 	ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
91 	ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES);
92 	if (ds == NULL) {
93 		/*
94 		 * Account for the meta-objset space in its placeholder
95 		 * dsl_dir.
96 		 */
97 		ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */
98 		dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
99 		    used, compressed, uncompressed, tx);
100 		dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
101 		return;
102 	}
103 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
104 
105 	mutex_enter(&ds->ds_dir->dd_lock);
106 	mutex_enter(&ds->ds_lock);
107 	delta = parent_delta(ds, used);
108 	ds->ds_phys->ds_used_bytes += used;
109 	ds->ds_phys->ds_compressed_bytes += compressed;
110 	ds->ds_phys->ds_uncompressed_bytes += uncompressed;
111 	ds->ds_phys->ds_unique_bytes += used;
112 	mutex_exit(&ds->ds_lock);
113 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
114 	    compressed, uncompressed, tx);
115 	dsl_dir_transfer_space(ds->ds_dir, used - delta,
116 	    DD_USED_REFRSRV, DD_USED_HEAD, tx);
117 	mutex_exit(&ds->ds_dir->dd_lock);
118 }
119 
120 int
121 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
122     boolean_t async)
123 {
124 	if (BP_IS_HOLE(bp))
125 		return (0);
126 
127 	ASSERT(dmu_tx_is_syncing(tx));
128 	ASSERT(bp->blk_birth <= tx->tx_txg);
129 
130 	int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
131 	int compressed = BP_GET_PSIZE(bp);
132 	int uncompressed = BP_GET_UCSIZE(bp);
133 
134 	ASSERT(used > 0);
135 	if (ds == NULL) {
136 		/*
137 		 * Account for the meta-objset space in its placeholder
138 		 * dataset.
139 		 */
140 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
141 
142 		dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD,
143 		    -used, -compressed, -uncompressed, tx);
144 		dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx);
145 		return (used);
146 	}
147 	ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
148 
149 	ASSERT(!dsl_dataset_is_snapshot(ds));
150 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
151 
152 	if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) {
153 		int64_t delta;
154 
155 		dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
156 		dsl_free(tx->tx_pool, tx->tx_txg, bp);
157 
158 		mutex_enter(&ds->ds_dir->dd_lock);
159 		mutex_enter(&ds->ds_lock);
160 		ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
161 		    !DS_UNIQUE_IS_ACCURATE(ds));
162 		delta = parent_delta(ds, -used);
163 		ds->ds_phys->ds_unique_bytes -= used;
164 		mutex_exit(&ds->ds_lock);
165 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
166 		    delta, -compressed, -uncompressed, tx);
167 		dsl_dir_transfer_space(ds->ds_dir, -used - delta,
168 		    DD_USED_REFRSRV, DD_USED_HEAD, tx);
169 		mutex_exit(&ds->ds_dir->dd_lock);
170 	} else {
171 		dprintf_bp(bp, "putting on dead list: %s", "");
172 		if (async) {
173 			/*
174 			 * We are here as part of zio's write done callback,
175 			 * which means we're a zio interrupt thread.  We can't
176 			 * call bplist_enqueue() now because it may block
177 			 * waiting for I/O.  Instead, put bp on the deferred
178 			 * queue and let dsl_pool_sync() finish the job.
179 			 */
180 			bplist_enqueue_deferred(&ds->ds_deadlist, bp);
181 		} else {
182 			VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx));
183 		}
184 		ASSERT3U(ds->ds_prev->ds_object, ==,
185 		    ds->ds_phys->ds_prev_snap_obj);
186 		ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0);
187 		/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
188 		if (ds->ds_prev->ds_phys->ds_next_snap_obj ==
189 		    ds->ds_object && bp->blk_birth >
190 		    ds->ds_prev->ds_phys->ds_prev_snap_txg) {
191 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
192 			mutex_enter(&ds->ds_prev->ds_lock);
193 			ds->ds_prev->ds_phys->ds_unique_bytes += used;
194 			mutex_exit(&ds->ds_prev->ds_lock);
195 		}
196 		if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
197 			dsl_dir_transfer_space(ds->ds_dir, used,
198 			    DD_USED_HEAD, DD_USED_SNAP, tx);
199 		}
200 	}
201 	mutex_enter(&ds->ds_lock);
202 	ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used);
203 	ds->ds_phys->ds_used_bytes -= used;
204 	ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed);
205 	ds->ds_phys->ds_compressed_bytes -= compressed;
206 	ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed);
207 	ds->ds_phys->ds_uncompressed_bytes -= uncompressed;
208 	mutex_exit(&ds->ds_lock);
209 
210 	return (used);
211 }
212 
213 uint64_t
214 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds)
215 {
216 	uint64_t trysnap = 0;
217 
218 	if (ds == NULL)
219 		return (0);
220 	/*
221 	 * The snapshot creation could fail, but that would cause an
222 	 * incorrect FALSE return, which would only result in an
223 	 * overestimation of the amount of space that an operation would
224 	 * consume, which is OK.
225 	 *
226 	 * There's also a small window where we could miss a pending
227 	 * snapshot, because we could set the sync task in the quiescing
228 	 * phase.  So this should only be used as a guess.
229 	 */
230 	if (ds->ds_trysnap_txg >
231 	    spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa))
232 		trysnap = ds->ds_trysnap_txg;
233 	return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap));
234 }
235 
236 boolean_t
237 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth)
238 {
239 	return (blk_birth > dsl_dataset_prev_snap_txg(ds));
240 }
241 
242 /* ARGSUSED */
243 static void
244 dsl_dataset_evict(dmu_buf_t *db, void *dsv)
245 {
246 	dsl_dataset_t *ds = dsv;
247 
248 	ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds));
249 
250 	unique_remove(ds->ds_fsid_guid);
251 
252 	if (ds->ds_objset != NULL)
253 		dmu_objset_evict(ds->ds_objset);
254 
255 	if (ds->ds_prev) {
256 		dsl_dataset_drop_ref(ds->ds_prev, ds);
257 		ds->ds_prev = NULL;
258 	}
259 
260 	bplist_close(&ds->ds_deadlist);
261 	if (ds->ds_dir)
262 		dsl_dir_close(ds->ds_dir, ds);
263 
264 	ASSERT(!list_link_active(&ds->ds_synced_link));
265 
266 	mutex_destroy(&ds->ds_lock);
267 	mutex_destroy(&ds->ds_recvlock);
268 	mutex_destroy(&ds->ds_opening_lock);
269 	rw_destroy(&ds->ds_rwlock);
270 	cv_destroy(&ds->ds_exclusive_cv);
271 	bplist_fini(&ds->ds_deadlist);
272 
273 	kmem_free(ds, sizeof (dsl_dataset_t));
274 }
275 
276 static int
277 dsl_dataset_get_snapname(dsl_dataset_t *ds)
278 {
279 	dsl_dataset_phys_t *headphys;
280 	int err;
281 	dmu_buf_t *headdbuf;
282 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
283 	objset_t *mos = dp->dp_meta_objset;
284 
285 	if (ds->ds_snapname[0])
286 		return (0);
287 	if (ds->ds_phys->ds_next_snap_obj == 0)
288 		return (0);
289 
290 	err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj,
291 	    FTAG, &headdbuf);
292 	if (err)
293 		return (err);
294 	headphys = headdbuf->db_data;
295 	err = zap_value_search(dp->dp_meta_objset,
296 	    headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
297 	dmu_buf_rele(headdbuf, FTAG);
298 	return (err);
299 }
300 
301 static int
302 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
303 {
304 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
305 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
306 	matchtype_t mt;
307 	int err;
308 
309 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
310 		mt = MT_FIRST;
311 	else
312 		mt = MT_EXACT;
313 
314 	err = zap_lookup_norm(mos, snapobj, name, 8, 1,
315 	    value, mt, NULL, 0, NULL);
316 	if (err == ENOTSUP && mt == MT_FIRST)
317 		err = zap_lookup(mos, snapobj, name, 8, 1, value);
318 	return (err);
319 }
320 
321 static int
322 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx)
323 {
324 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
325 	uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj;
326 	matchtype_t mt;
327 	int err;
328 
329 	dsl_dir_snap_cmtime_update(ds->ds_dir);
330 
331 	if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
332 		mt = MT_FIRST;
333 	else
334 		mt = MT_EXACT;
335 
336 	err = zap_remove_norm(mos, snapobj, name, mt, tx);
337 	if (err == ENOTSUP && mt == MT_FIRST)
338 		err = zap_remove(mos, snapobj, name, tx);
339 	return (err);
340 }
341 
342 static int
343 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
344     dsl_dataset_t **dsp)
345 {
346 	objset_t *mos = dp->dp_meta_objset;
347 	dmu_buf_t *dbuf;
348 	dsl_dataset_t *ds;
349 	int err;
350 
351 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
352 	    dsl_pool_sync_context(dp));
353 
354 	err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
355 	if (err)
356 		return (err);
357 	ds = dmu_buf_get_user(dbuf);
358 	if (ds == NULL) {
359 		dsl_dataset_t *winner;
360 
361 		ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
362 		ds->ds_dbuf = dbuf;
363 		ds->ds_object = dsobj;
364 		ds->ds_phys = dbuf->db_data;
365 
366 		mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
367 		mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL);
368 		mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
369 		rw_init(&ds->ds_rwlock, 0, 0, 0);
370 		cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL);
371 		bplist_init(&ds->ds_deadlist);
372 
373 		err = bplist_open(&ds->ds_deadlist,
374 		    mos, ds->ds_phys->ds_deadlist_obj);
375 		if (err == 0) {
376 			err = dsl_dir_open_obj(dp,
377 			    ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir);
378 		}
379 		if (err) {
380 			/*
381 			 * we don't really need to close the blist if we
382 			 * just opened it.
383 			 */
384 			mutex_destroy(&ds->ds_lock);
385 			mutex_destroy(&ds->ds_recvlock);
386 			mutex_destroy(&ds->ds_opening_lock);
387 			rw_destroy(&ds->ds_rwlock);
388 			cv_destroy(&ds->ds_exclusive_cv);
389 			bplist_fini(&ds->ds_deadlist);
390 			kmem_free(ds, sizeof (dsl_dataset_t));
391 			dmu_buf_rele(dbuf, tag);
392 			return (err);
393 		}
394 
395 		if (!dsl_dataset_is_snapshot(ds)) {
396 			ds->ds_snapname[0] = '\0';
397 			if (ds->ds_phys->ds_prev_snap_obj) {
398 				err = dsl_dataset_get_ref(dp,
399 				    ds->ds_phys->ds_prev_snap_obj,
400 				    ds, &ds->ds_prev);
401 			}
402 		} else {
403 			if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
404 				err = dsl_dataset_get_snapname(ds);
405 			if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) {
406 				err = zap_count(
407 				    ds->ds_dir->dd_pool->dp_meta_objset,
408 				    ds->ds_phys->ds_userrefs_obj,
409 				    &ds->ds_userrefs);
410 			}
411 		}
412 
413 		if (err == 0 && !dsl_dataset_is_snapshot(ds)) {
414 			/*
415 			 * In sync context, we're called with either no lock
416 			 * or with the write lock.  If we're not syncing,
417 			 * we're always called with the read lock held.
418 			 */
419 			boolean_t need_lock =
420 			    !RW_WRITE_HELD(&dp->dp_config_rwlock) &&
421 			    dsl_pool_sync_context(dp);
422 
423 			if (need_lock)
424 				rw_enter(&dp->dp_config_rwlock, RW_READER);
425 
426 			err = dsl_prop_get_ds(ds,
427 			    "refreservation", sizeof (uint64_t), 1,
428 			    &ds->ds_reserved, NULL);
429 			if (err == 0) {
430 				err = dsl_prop_get_ds(ds,
431 				    "refquota", sizeof (uint64_t), 1,
432 				    &ds->ds_quota, NULL);
433 			}
434 
435 			if (need_lock)
436 				rw_exit(&dp->dp_config_rwlock);
437 		} else {
438 			ds->ds_reserved = ds->ds_quota = 0;
439 		}
440 
441 		if (err == 0) {
442 			winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys,
443 			    dsl_dataset_evict);
444 		}
445 		if (err || winner) {
446 			bplist_close(&ds->ds_deadlist);
447 			if (ds->ds_prev)
448 				dsl_dataset_drop_ref(ds->ds_prev, ds);
449 			dsl_dir_close(ds->ds_dir, ds);
450 			mutex_destroy(&ds->ds_lock);
451 			mutex_destroy(&ds->ds_recvlock);
452 			mutex_destroy(&ds->ds_opening_lock);
453 			rw_destroy(&ds->ds_rwlock);
454 			cv_destroy(&ds->ds_exclusive_cv);
455 			bplist_fini(&ds->ds_deadlist);
456 			kmem_free(ds, sizeof (dsl_dataset_t));
457 			if (err) {
458 				dmu_buf_rele(dbuf, tag);
459 				return (err);
460 			}
461 			ds = winner;
462 		} else {
463 			ds->ds_fsid_guid =
464 			    unique_insert(ds->ds_phys->ds_fsid_guid);
465 		}
466 	}
467 	ASSERT3P(ds->ds_dbuf, ==, dbuf);
468 	ASSERT3P(ds->ds_phys, ==, dbuf->db_data);
469 	ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 ||
470 	    spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
471 	    dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
472 	mutex_enter(&ds->ds_lock);
473 	if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) {
474 		mutex_exit(&ds->ds_lock);
475 		dmu_buf_rele(ds->ds_dbuf, tag);
476 		return (ENOENT);
477 	}
478 	mutex_exit(&ds->ds_lock);
479 	*dsp = ds;
480 	return (0);
481 }
482 
483 static int
484 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag)
485 {
486 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
487 
488 	/*
489 	 * In syncing context we don't want the rwlock lock: there
490 	 * may be an existing writer waiting for sync phase to
491 	 * finish.  We don't need to worry about such writers, since
492 	 * sync phase is single-threaded, so the writer can't be
493 	 * doing anything while we are active.
494 	 */
495 	if (dsl_pool_sync_context(dp)) {
496 		ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
497 		return (0);
498 	}
499 
500 	/*
501 	 * Normal users will hold the ds_rwlock as a READER until they
502 	 * are finished (i.e., call dsl_dataset_rele()).  "Owners" will
503 	 * drop their READER lock after they set the ds_owner field.
504 	 *
505 	 * If the dataset is being destroyed, the destroy thread will
506 	 * obtain a WRITER lock for exclusive access after it's done its
507 	 * open-context work and then change the ds_owner to
508 	 * dsl_reaper once destruction is assured.  So threads
509 	 * may block here temporarily, until the "destructability" of
510 	 * the dataset is determined.
511 	 */
512 	ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock));
513 	mutex_enter(&ds->ds_lock);
514 	while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) {
515 		rw_exit(&dp->dp_config_rwlock);
516 		cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock);
517 		if (DSL_DATASET_IS_DESTROYED(ds)) {
518 			mutex_exit(&ds->ds_lock);
519 			dsl_dataset_drop_ref(ds, tag);
520 			rw_enter(&dp->dp_config_rwlock, RW_READER);
521 			return (ENOENT);
522 		}
523 		/*
524 		 * The dp_config_rwlock lives above the ds_lock. And
525 		 * we need to check DSL_DATASET_IS_DESTROYED() while
526 		 * holding the ds_lock, so we have to drop and reacquire
527 		 * the ds_lock here.
528 		 */
529 		mutex_exit(&ds->ds_lock);
530 		rw_enter(&dp->dp_config_rwlock, RW_READER);
531 		mutex_enter(&ds->ds_lock);
532 	}
533 	mutex_exit(&ds->ds_lock);
534 	return (0);
535 }
536 
537 int
538 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
539     dsl_dataset_t **dsp)
540 {
541 	int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp);
542 
543 	if (err)
544 		return (err);
545 	return (dsl_dataset_hold_ref(*dsp, tag));
546 }
547 
548 int
549 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok,
550     void *tag, dsl_dataset_t **dsp)
551 {
552 	int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp);
553 	if (err)
554 		return (err);
555 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
556 		dsl_dataset_rele(*dsp, tag);
557 		*dsp = NULL;
558 		return (EBUSY);
559 	}
560 	return (0);
561 }
562 
563 int
564 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp)
565 {
566 	dsl_dir_t *dd;
567 	dsl_pool_t *dp;
568 	const char *snapname;
569 	uint64_t obj;
570 	int err = 0;
571 
572 	err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname);
573 	if (err)
574 		return (err);
575 
576 	dp = dd->dd_pool;
577 	obj = dd->dd_phys->dd_head_dataset_obj;
578 	rw_enter(&dp->dp_config_rwlock, RW_READER);
579 	if (obj)
580 		err = dsl_dataset_get_ref(dp, obj, tag, dsp);
581 	else
582 		err = ENOENT;
583 	if (err)
584 		goto out;
585 
586 	err = dsl_dataset_hold_ref(*dsp, tag);
587 
588 	/* we may be looking for a snapshot */
589 	if (err == 0 && snapname != NULL) {
590 		dsl_dataset_t *ds = NULL;
591 
592 		if (*snapname++ != '@') {
593 			dsl_dataset_rele(*dsp, tag);
594 			err = ENOENT;
595 			goto out;
596 		}
597 
598 		dprintf("looking for snapshot '%s'\n", snapname);
599 		err = dsl_dataset_snap_lookup(*dsp, snapname, &obj);
600 		if (err == 0)
601 			err = dsl_dataset_get_ref(dp, obj, tag, &ds);
602 		dsl_dataset_rele(*dsp, tag);
603 
604 		ASSERT3U((err == 0), ==, (ds != NULL));
605 
606 		if (ds) {
607 			mutex_enter(&ds->ds_lock);
608 			if (ds->ds_snapname[0] == 0)
609 				(void) strlcpy(ds->ds_snapname, snapname,
610 				    sizeof (ds->ds_snapname));
611 			mutex_exit(&ds->ds_lock);
612 			err = dsl_dataset_hold_ref(ds, tag);
613 			*dsp = err ? NULL : ds;
614 		}
615 	}
616 out:
617 	rw_exit(&dp->dp_config_rwlock);
618 	dsl_dir_close(dd, FTAG);
619 	return (err);
620 }
621 
622 int
623 dsl_dataset_own(const char *name, boolean_t inconsistentok,
624     void *tag, dsl_dataset_t **dsp)
625 {
626 	int err = dsl_dataset_hold(name, tag, dsp);
627 	if (err)
628 		return (err);
629 	if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) {
630 		dsl_dataset_rele(*dsp, tag);
631 		return (EBUSY);
632 	}
633 	return (0);
634 }
635 
636 void
637 dsl_dataset_name(dsl_dataset_t *ds, char *name)
638 {
639 	if (ds == NULL) {
640 		(void) strcpy(name, "mos");
641 	} else {
642 		dsl_dir_name(ds->ds_dir, name);
643 		VERIFY(0 == dsl_dataset_get_snapname(ds));
644 		if (ds->ds_snapname[0]) {
645 			(void) strcat(name, "@");
646 			/*
647 			 * We use a "recursive" mutex so that we
648 			 * can call dprintf_ds() with ds_lock held.
649 			 */
650 			if (!MUTEX_HELD(&ds->ds_lock)) {
651 				mutex_enter(&ds->ds_lock);
652 				(void) strcat(name, ds->ds_snapname);
653 				mutex_exit(&ds->ds_lock);
654 			} else {
655 				(void) strcat(name, ds->ds_snapname);
656 			}
657 		}
658 	}
659 }
660 
661 static int
662 dsl_dataset_namelen(dsl_dataset_t *ds)
663 {
664 	int result;
665 
666 	if (ds == NULL) {
667 		result = 3;	/* "mos" */
668 	} else {
669 		result = dsl_dir_namelen(ds->ds_dir);
670 		VERIFY(0 == dsl_dataset_get_snapname(ds));
671 		if (ds->ds_snapname[0]) {
672 			++result;	/* adding one for the @-sign */
673 			if (!MUTEX_HELD(&ds->ds_lock)) {
674 				mutex_enter(&ds->ds_lock);
675 				result += strlen(ds->ds_snapname);
676 				mutex_exit(&ds->ds_lock);
677 			} else {
678 				result += strlen(ds->ds_snapname);
679 			}
680 		}
681 	}
682 
683 	return (result);
684 }
685 
686 void
687 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag)
688 {
689 	dmu_buf_rele(ds->ds_dbuf, tag);
690 }
691 
692 void
693 dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
694 {
695 	if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) {
696 		rw_exit(&ds->ds_rwlock);
697 	}
698 	dsl_dataset_drop_ref(ds, tag);
699 }
700 
701 void
702 dsl_dataset_disown(dsl_dataset_t *ds, void *tag)
703 {
704 	ASSERT((ds->ds_owner == tag && ds->ds_dbuf) ||
705 	    (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL));
706 
707 	mutex_enter(&ds->ds_lock);
708 	ds->ds_owner = NULL;
709 	if (RW_WRITE_HELD(&ds->ds_rwlock)) {
710 		rw_exit(&ds->ds_rwlock);
711 		cv_broadcast(&ds->ds_exclusive_cv);
712 	}
713 	mutex_exit(&ds->ds_lock);
714 	if (ds->ds_dbuf)
715 		dsl_dataset_drop_ref(ds, tag);
716 	else
717 		dsl_dataset_evict(ds->ds_dbuf, ds);
718 }
719 
720 boolean_t
721 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag)
722 {
723 	boolean_t gotit = FALSE;
724 
725 	mutex_enter(&ds->ds_lock);
726 	if (ds->ds_owner == NULL &&
727 	    (!DS_IS_INCONSISTENT(ds) || inconsistentok)) {
728 		ds->ds_owner = tag;
729 		if (!dsl_pool_sync_context(ds->ds_dir->dd_pool))
730 			rw_exit(&ds->ds_rwlock);
731 		gotit = TRUE;
732 	}
733 	mutex_exit(&ds->ds_lock);
734 	return (gotit);
735 }
736 
737 void
738 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner)
739 {
740 	ASSERT3P(owner, ==, ds->ds_owner);
741 	if (!RW_WRITE_HELD(&ds->ds_rwlock))
742 		rw_enter(&ds->ds_rwlock, RW_WRITER);
743 }
744 
745 uint64_t
746 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
747     uint64_t flags, dmu_tx_t *tx)
748 {
749 	dsl_pool_t *dp = dd->dd_pool;
750 	dmu_buf_t *dbuf;
751 	dsl_dataset_phys_t *dsphys;
752 	uint64_t dsobj;
753 	objset_t *mos = dp->dp_meta_objset;
754 
755 	if (origin == NULL)
756 		origin = dp->dp_origin_snap;
757 
758 	ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
759 	ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0);
760 	ASSERT(dmu_tx_is_syncing(tx));
761 	ASSERT(dd->dd_phys->dd_head_dataset_obj == 0);
762 
763 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
764 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
765 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
766 	dmu_buf_will_dirty(dbuf, tx);
767 	dsphys = dbuf->db_data;
768 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
769 	dsphys->ds_dir_obj = dd->dd_object;
770 	dsphys->ds_flags = flags;
771 	dsphys->ds_fsid_guid = unique_create();
772 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
773 	    sizeof (dsphys->ds_guid));
774 	dsphys->ds_snapnames_zapobj =
775 	    zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
776 	    DMU_OT_NONE, 0, tx);
777 	dsphys->ds_creation_time = gethrestime_sec();
778 	dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
779 	dsphys->ds_deadlist_obj =
780 	    bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
781 
782 	if (origin) {
783 		dsphys->ds_prev_snap_obj = origin->ds_object;
784 		dsphys->ds_prev_snap_txg =
785 		    origin->ds_phys->ds_creation_txg;
786 		dsphys->ds_used_bytes =
787 		    origin->ds_phys->ds_used_bytes;
788 		dsphys->ds_compressed_bytes =
789 		    origin->ds_phys->ds_compressed_bytes;
790 		dsphys->ds_uncompressed_bytes =
791 		    origin->ds_phys->ds_uncompressed_bytes;
792 		dsphys->ds_bp = origin->ds_phys->ds_bp;
793 		dsphys->ds_flags |= origin->ds_phys->ds_flags;
794 
795 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
796 		origin->ds_phys->ds_num_children++;
797 
798 		if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
799 			if (origin->ds_phys->ds_next_clones_obj == 0) {
800 				origin->ds_phys->ds_next_clones_obj =
801 				    zap_create(mos,
802 				    DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
803 			}
804 			VERIFY(0 == zap_add_int(mos,
805 			    origin->ds_phys->ds_next_clones_obj,
806 			    dsobj, tx));
807 		}
808 
809 		dmu_buf_will_dirty(dd->dd_dbuf, tx);
810 		dd->dd_phys->dd_origin_obj = origin->ds_object;
811 	}
812 
813 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
814 		dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
815 
816 	dmu_buf_rele(dbuf, FTAG);
817 
818 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
819 	dd->dd_phys->dd_head_dataset_obj = dsobj;
820 
821 	return (dsobj);
822 }
823 
824 uint64_t
825 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
826     dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx)
827 {
828 	dsl_pool_t *dp = pdd->dd_pool;
829 	uint64_t dsobj, ddobj;
830 	dsl_dir_t *dd;
831 
832 	ASSERT(lastname[0] != '@');
833 
834 	ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
835 	VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd));
836 
837 	dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx);
838 
839 	dsl_deleg_set_create_perms(dd, tx, cr);
840 
841 	dsl_dir_close(dd, FTAG);
842 
843 	return (dsobj);
844 }
845 
846 struct destroyarg {
847 	dsl_sync_task_group_t *dstg;
848 	char *snapname;
849 	char *failed;
850 	boolean_t defer;
851 };
852 
853 static int
854 dsl_snapshot_destroy_one(const char *name, void *arg)
855 {
856 	struct destroyarg *da = arg;
857 	dsl_dataset_t *ds;
858 	int err;
859 	char *dsname;
860 
861 	dsname = kmem_asprintf("%s@%s", name, da->snapname);
862 	err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds);
863 	strfree(dsname);
864 	if (err == 0) {
865 		struct dsl_ds_destroyarg *dsda;
866 
867 		dsl_dataset_make_exclusive(ds, da->dstg);
868 		dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP);
869 		dsda->ds = ds;
870 		dsda->defer = da->defer;
871 		dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
872 		    dsl_dataset_destroy_sync, dsda, da->dstg, 0);
873 	} else if (err == ENOENT) {
874 		err = 0;
875 	} else {
876 		(void) strcpy(da->failed, name);
877 	}
878 	return (err);
879 }
880 
881 /*
882  * Destroy 'snapname' in all descendants of 'fsname'.
883  */
884 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy
885 int
886 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer)
887 {
888 	int err;
889 	struct destroyarg da;
890 	dsl_sync_task_t *dst;
891 	spa_t *spa;
892 
893 	err = spa_open(fsname, &spa, FTAG);
894 	if (err)
895 		return (err);
896 	da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
897 	da.snapname = snapname;
898 	da.failed = fsname;
899 	da.defer = defer;
900 
901 	err = dmu_objset_find(fsname,
902 	    dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN);
903 
904 	if (err == 0)
905 		err = dsl_sync_task_group_wait(da.dstg);
906 
907 	for (dst = list_head(&da.dstg->dstg_tasks); dst;
908 	    dst = list_next(&da.dstg->dstg_tasks, dst)) {
909 		struct dsl_ds_destroyarg *dsda = dst->dst_arg1;
910 		dsl_dataset_t *ds = dsda->ds;
911 
912 		/*
913 		 * Return the file system name that triggered the error
914 		 */
915 		if (dst->dst_err) {
916 			dsl_dataset_name(ds, fsname);
917 			*strchr(fsname, '@') = '\0';
918 		}
919 		ASSERT3P(dsda->rm_origin, ==, NULL);
920 		dsl_dataset_disown(ds, da.dstg);
921 		kmem_free(dsda, sizeof (struct dsl_ds_destroyarg));
922 	}
923 
924 	dsl_sync_task_group_destroy(da.dstg);
925 	spa_close(spa, FTAG);
926 	return (err);
927 }
928 
929 static boolean_t
930 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds)
931 {
932 	boolean_t might_destroy = B_FALSE;
933 
934 	mutex_enter(&ds->ds_lock);
935 	if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 &&
936 	    DS_IS_DEFER_DESTROY(ds))
937 		might_destroy = B_TRUE;
938 	mutex_exit(&ds->ds_lock);
939 
940 	return (might_destroy);
941 }
942 
943 /*
944  * If we're removing a clone, and these three conditions are true:
945  *	1) the clone's origin has no other children
946  *	2) the clone's origin has no user references
947  *	3) the clone's origin has been marked for deferred destruction
948  * Then, prepare to remove the origin as part of this sync task group.
949  */
950 static int
951 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag)
952 {
953 	dsl_dataset_t *ds = dsda->ds;
954 	dsl_dataset_t *origin = ds->ds_prev;
955 
956 	if (dsl_dataset_might_destroy_origin(origin)) {
957 		char *name;
958 		int namelen;
959 		int error;
960 
961 		namelen = dsl_dataset_namelen(origin) + 1;
962 		name = kmem_alloc(namelen, KM_SLEEP);
963 		dsl_dataset_name(origin, name);
964 #ifdef _KERNEL
965 		error = zfs_unmount_snap(name, NULL);
966 		if (error) {
967 			kmem_free(name, namelen);
968 			return (error);
969 		}
970 #endif
971 		error = dsl_dataset_own(name, B_TRUE, tag, &origin);
972 		kmem_free(name, namelen);
973 		if (error)
974 			return (error);
975 		dsda->rm_origin = origin;
976 		dsl_dataset_make_exclusive(origin, tag);
977 	}
978 
979 	return (0);
980 }
981 
982 /*
983  * ds must be opened as OWNER.  On return (whether successful or not),
984  * ds will be closed and caller can no longer dereference it.
985  */
986 int
987 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer)
988 {
989 	int err;
990 	dsl_sync_task_group_t *dstg;
991 	objset_t *os;
992 	dsl_dir_t *dd;
993 	uint64_t obj;
994 	struct dsl_ds_destroyarg dsda = { 0 };
995 	dsl_dataset_t dummy_ds = { 0 };
996 
997 	dsda.ds = ds;
998 
999 	if (dsl_dataset_is_snapshot(ds)) {
1000 		/* Destroying a snapshot is simpler */
1001 		dsl_dataset_make_exclusive(ds, tag);
1002 
1003 		dsda.defer = defer;
1004 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
1005 		    dsl_dataset_destroy_check, dsl_dataset_destroy_sync,
1006 		    &dsda, tag, 0);
1007 		ASSERT3P(dsda.rm_origin, ==, NULL);
1008 		goto out;
1009 	} else if (defer) {
1010 		err = EINVAL;
1011 		goto out;
1012 	}
1013 
1014 	dd = ds->ds_dir;
1015 	dummy_ds.ds_dir = dd;
1016 	dummy_ds.ds_object = ds->ds_object;
1017 
1018 	/*
1019 	 * Check for errors and mark this ds as inconsistent, in
1020 	 * case we crash while freeing the objects.
1021 	 */
1022 	err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check,
1023 	    dsl_dataset_destroy_begin_sync, ds, NULL, 0);
1024 	if (err)
1025 		goto out;
1026 
1027 	err = dmu_objset_from_ds(ds, &os);
1028 	if (err)
1029 		goto out;
1030 
1031 	/*
1032 	 * remove the objects in open context, so that we won't
1033 	 * have too much to do in syncing context.
1034 	 */
1035 	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE,
1036 	    ds->ds_phys->ds_prev_snap_txg)) {
1037 		/*
1038 		 * Ignore errors, if there is not enough disk space
1039 		 * we will deal with it in dsl_dataset_destroy_sync().
1040 		 */
1041 		(void) dmu_free_object(os, obj);
1042 	}
1043 
1044 	/*
1045 	 * We need to sync out all in-flight IO before we try to evict
1046 	 * (the dataset evict func is trying to clear the cached entries
1047 	 * for this dataset in the ARC).
1048 	 */
1049 	txg_wait_synced(dd->dd_pool, 0);
1050 
1051 	/*
1052 	 * If we managed to free all the objects in open
1053 	 * context, the user space accounting should be zero.
1054 	 */
1055 	if (ds->ds_phys->ds_bp.blk_fill == 0 &&
1056 	    dmu_objset_userused_enabled(os)) {
1057 		uint64_t count;
1058 
1059 		ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 ||
1060 		    count == 0);
1061 		ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 ||
1062 		    count == 0);
1063 	}
1064 
1065 	if (err != ESRCH)
1066 		goto out;
1067 
1068 	rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER);
1069 	err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd);
1070 	rw_exit(&dd->dd_pool->dp_config_rwlock);
1071 
1072 	if (err)
1073 		goto out;
1074 
1075 	/*
1076 	 * Blow away the dsl_dir + head dataset.
1077 	 */
1078 	dsl_dataset_make_exclusive(ds, tag);
1079 	/*
1080 	 * If we're removing a clone, we might also need to remove its
1081 	 * origin.
1082 	 */
1083 	do {
1084 		dsda.need_prep = B_FALSE;
1085 		if (dsl_dir_is_clone(dd)) {
1086 			err = dsl_dataset_origin_rm_prep(&dsda, tag);
1087 			if (err) {
1088 				dsl_dir_close(dd, FTAG);
1089 				goto out;
1090 			}
1091 		}
1092 
1093 		dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool);
1094 		dsl_sync_task_create(dstg, dsl_dataset_destroy_check,
1095 		    dsl_dataset_destroy_sync, &dsda, tag, 0);
1096 		dsl_sync_task_create(dstg, dsl_dir_destroy_check,
1097 		    dsl_dir_destroy_sync, &dummy_ds, FTAG, 0);
1098 		err = dsl_sync_task_group_wait(dstg);
1099 		dsl_sync_task_group_destroy(dstg);
1100 
1101 		/*
1102 		 * We could be racing against 'zfs release' or 'zfs destroy -d'
1103 		 * on the origin snap, in which case we can get EBUSY if we
1104 		 * needed to destroy the origin snap but were not ready to
1105 		 * do so.
1106 		 */
1107 		if (dsda.need_prep) {
1108 			ASSERT(err == EBUSY);
1109 			ASSERT(dsl_dir_is_clone(dd));
1110 			ASSERT(dsda.rm_origin == NULL);
1111 		}
1112 	} while (dsda.need_prep);
1113 
1114 	if (dsda.rm_origin != NULL)
1115 		dsl_dataset_disown(dsda.rm_origin, tag);
1116 
1117 	/* if it is successful, dsl_dir_destroy_sync will close the dd */
1118 	if (err)
1119 		dsl_dir_close(dd, FTAG);
1120 out:
1121 	dsl_dataset_disown(ds, tag);
1122 	return (err);
1123 }
1124 
1125 blkptr_t *
1126 dsl_dataset_get_blkptr(dsl_dataset_t *ds)
1127 {
1128 	return (&ds->ds_phys->ds_bp);
1129 }
1130 
1131 void
1132 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
1133 {
1134 	ASSERT(dmu_tx_is_syncing(tx));
1135 	/* If it's the meta-objset, set dp_meta_rootbp */
1136 	if (ds == NULL) {
1137 		tx->tx_pool->dp_meta_rootbp = *bp;
1138 	} else {
1139 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
1140 		ds->ds_phys->ds_bp = *bp;
1141 	}
1142 }
1143 
1144 spa_t *
1145 dsl_dataset_get_spa(dsl_dataset_t *ds)
1146 {
1147 	return (ds->ds_dir->dd_pool->dp_spa);
1148 }
1149 
1150 void
1151 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
1152 {
1153 	dsl_pool_t *dp;
1154 
1155 	if (ds == NULL) /* this is the meta-objset */
1156 		return;
1157 
1158 	ASSERT(ds->ds_objset != NULL);
1159 
1160 	if (ds->ds_phys->ds_next_snap_obj != 0)
1161 		panic("dirtying snapshot!");
1162 
1163 	dp = ds->ds_dir->dd_pool;
1164 
1165 	if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) {
1166 		/* up the hold count until we can be written out */
1167 		dmu_buf_add_ref(ds->ds_dbuf, ds);
1168 	}
1169 }
1170 
1171 /*
1172  * The unique space in the head dataset can be calculated by subtracting
1173  * the space used in the most recent snapshot, that is still being used
1174  * in this file system, from the space currently in use.  To figure out
1175  * the space in the most recent snapshot still in use, we need to take
1176  * the total space used in the snapshot and subtract out the space that
1177  * has been freed up since the snapshot was taken.
1178  */
1179 static void
1180 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
1181 {
1182 	uint64_t mrs_used;
1183 	uint64_t dlused, dlcomp, dluncomp;
1184 
1185 	ASSERT(!dsl_dataset_is_snapshot(ds));
1186 
1187 	if (ds->ds_phys->ds_prev_snap_obj != 0)
1188 		mrs_used = ds->ds_prev->ds_phys->ds_used_bytes;
1189 	else
1190 		mrs_used = 0;
1191 
1192 	VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp,
1193 	    &dluncomp));
1194 
1195 	ASSERT3U(dlused, <=, mrs_used);
1196 	ds->ds_phys->ds_unique_bytes =
1197 	    ds->ds_phys->ds_used_bytes - (mrs_used - dlused);
1198 
1199 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
1200 	    SPA_VERSION_UNIQUE_ACCURATE)
1201 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1202 }
1203 
1204 struct killarg {
1205 	dsl_dataset_t *ds;
1206 	dmu_tx_t *tx;
1207 };
1208 
1209 /* ARGSUSED */
1210 static int
1211 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
1212     const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
1213 {
1214 	struct killarg *ka = arg;
1215 	dmu_tx_t *tx = ka->tx;
1216 
1217 	if (bp == NULL)
1218 		return (0);
1219 
1220 	if (zb->zb_level == ZB_ZIL_LEVEL) {
1221 		ASSERT(zilog != NULL);
1222 		/*
1223 		 * It's a block in the intent log.  It has no
1224 		 * accounting, so just free it.
1225 		 */
1226 		dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
1227 	} else {
1228 		ASSERT(zilog == NULL);
1229 		ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg);
1230 		(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
1231 	}
1232 
1233 	return (0);
1234 }
1235 
1236 /* ARGSUSED */
1237 static int
1238 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx)
1239 {
1240 	dsl_dataset_t *ds = arg1;
1241 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1242 	uint64_t count;
1243 	int err;
1244 
1245 	/*
1246 	 * Can't delete a head dataset if there are snapshots of it.
1247 	 * (Except if the only snapshots are from the branch we cloned
1248 	 * from.)
1249 	 */
1250 	if (ds->ds_prev != NULL &&
1251 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1252 		return (EBUSY);
1253 
1254 	/*
1255 	 * This is really a dsl_dir thing, but check it here so that
1256 	 * we'll be less likely to leave this dataset inconsistent &
1257 	 * nearly destroyed.
1258 	 */
1259 	err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count);
1260 	if (err)
1261 		return (err);
1262 	if (count != 0)
1263 		return (EEXIST);
1264 
1265 	return (0);
1266 }
1267 
1268 /* ARGSUSED */
1269 static void
1270 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1271 {
1272 	dsl_dataset_t *ds = arg1;
1273 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1274 
1275 	/* Mark it as inconsistent on-disk, in case we crash */
1276 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1277 	ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT;
1278 
1279 	spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx,
1280 	    "dataset = %llu", ds->ds_object);
1281 }
1282 
1283 static int
1284 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag,
1285     dmu_tx_t *tx)
1286 {
1287 	dsl_dataset_t *ds = dsda->ds;
1288 	dsl_dataset_t *ds_prev = ds->ds_prev;
1289 
1290 	if (dsl_dataset_might_destroy_origin(ds_prev)) {
1291 		struct dsl_ds_destroyarg ndsda = {0};
1292 
1293 		/*
1294 		 * If we're not prepared to remove the origin, don't remove
1295 		 * the clone either.
1296 		 */
1297 		if (dsda->rm_origin == NULL) {
1298 			dsda->need_prep = B_TRUE;
1299 			return (EBUSY);
1300 		}
1301 
1302 		ndsda.ds = ds_prev;
1303 		ndsda.is_origin_rm = B_TRUE;
1304 		return (dsl_dataset_destroy_check(&ndsda, tag, tx));
1305 	}
1306 
1307 	/*
1308 	 * If we're not going to remove the origin after all,
1309 	 * undo the open context setup.
1310 	 */
1311 	if (dsda->rm_origin != NULL) {
1312 		dsl_dataset_disown(dsda->rm_origin, tag);
1313 		dsda->rm_origin = NULL;
1314 	}
1315 
1316 	return (0);
1317 }
1318 
1319 /* ARGSUSED */
1320 int
1321 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx)
1322 {
1323 	struct dsl_ds_destroyarg *dsda = arg1;
1324 	dsl_dataset_t *ds = dsda->ds;
1325 
1326 	/* we have an owner hold, so noone else can destroy us */
1327 	ASSERT(!DSL_DATASET_IS_DESTROYED(ds));
1328 
1329 	/*
1330 	 * Only allow deferred destroy on pools that support it.
1331 	 * NOTE: deferred destroy is only supported on snapshots.
1332 	 */
1333 	if (dsda->defer) {
1334 		if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
1335 		    SPA_VERSION_USERREFS)
1336 			return (ENOTSUP);
1337 		ASSERT(dsl_dataset_is_snapshot(ds));
1338 		return (0);
1339 	}
1340 
1341 	/*
1342 	 * Can't delete a head dataset if there are snapshots of it.
1343 	 * (Except if the only snapshots are from the branch we cloned
1344 	 * from.)
1345 	 */
1346 	if (ds->ds_prev != NULL &&
1347 	    ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
1348 		return (EBUSY);
1349 
1350 	/*
1351 	 * If we made changes this txg, traverse_dsl_dataset won't find
1352 	 * them.  Try again.
1353 	 */
1354 	if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg)
1355 		return (EAGAIN);
1356 
1357 	if (dsl_dataset_is_snapshot(ds)) {
1358 		/*
1359 		 * If this snapshot has an elevated user reference count,
1360 		 * we can't destroy it yet.
1361 		 */
1362 		if (ds->ds_userrefs > 0 && !dsda->releasing)
1363 			return (EBUSY);
1364 
1365 		mutex_enter(&ds->ds_lock);
1366 		/*
1367 		 * Can't delete a branch point. However, if we're destroying
1368 		 * a clone and removing its origin due to it having a user
1369 		 * hold count of 0 and having been marked for deferred destroy,
1370 		 * it's OK for the origin to have a single clone.
1371 		 */
1372 		if (ds->ds_phys->ds_num_children >
1373 		    (dsda->is_origin_rm ? 2 : 1)) {
1374 			mutex_exit(&ds->ds_lock);
1375 			return (EEXIST);
1376 		}
1377 		mutex_exit(&ds->ds_lock);
1378 	} else if (dsl_dir_is_clone(ds->ds_dir)) {
1379 		return (dsl_dataset_origin_check(dsda, arg2, tx));
1380 	}
1381 
1382 	/* XXX we should do some i/o error checking... */
1383 	return (0);
1384 }
1385 
1386 struct refsarg {
1387 	kmutex_t lock;
1388 	boolean_t gone;
1389 	kcondvar_t cv;
1390 };
1391 
1392 /* ARGSUSED */
1393 static void
1394 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv)
1395 {
1396 	struct refsarg *arg = argv;
1397 
1398 	mutex_enter(&arg->lock);
1399 	arg->gone = TRUE;
1400 	cv_signal(&arg->cv);
1401 	mutex_exit(&arg->lock);
1402 }
1403 
1404 static void
1405 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag)
1406 {
1407 	struct refsarg arg;
1408 
1409 	mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL);
1410 	cv_init(&arg.cv, NULL, CV_DEFAULT, NULL);
1411 	arg.gone = FALSE;
1412 	(void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys,
1413 	    dsl_dataset_refs_gone);
1414 	dmu_buf_rele(ds->ds_dbuf, tag);
1415 	mutex_enter(&arg.lock);
1416 	while (!arg.gone)
1417 		cv_wait(&arg.cv, &arg.lock);
1418 	ASSERT(arg.gone);
1419 	mutex_exit(&arg.lock);
1420 	ds->ds_dbuf = NULL;
1421 	ds->ds_phys = NULL;
1422 	mutex_destroy(&arg.lock);
1423 	cv_destroy(&arg.cv);
1424 }
1425 
1426 static void
1427 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx)
1428 {
1429 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
1430 	uint64_t count;
1431 	int err;
1432 
1433 	ASSERT(ds->ds_phys->ds_num_children >= 2);
1434 	err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx);
1435 	/*
1436 	 * The err should not be ENOENT, but a bug in a previous version
1437 	 * of the code could cause upgrade_clones_cb() to not set
1438 	 * ds_next_snap_obj when it should, leading to a missing entry.
1439 	 * If we knew that the pool was created after
1440 	 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't
1441 	 * ENOENT.  However, at least we can check that we don't have
1442 	 * too many entries in the next_clones_obj even after failing to
1443 	 * remove this one.
1444 	 */
1445 	if (err != ENOENT) {
1446 		VERIFY3U(err, ==, 0);
1447 	}
1448 	ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj,
1449 	    &count));
1450 	ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2);
1451 }
1452 
1453 void
1454 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx)
1455 {
1456 	struct dsl_ds_destroyarg *dsda = arg1;
1457 	dsl_dataset_t *ds = dsda->ds;
1458 	int err;
1459 	int after_branch_point = FALSE;
1460 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1461 	objset_t *mos = dp->dp_meta_objset;
1462 	dsl_dataset_t *ds_prev = NULL;
1463 	uint64_t obj;
1464 
1465 	ASSERT(ds->ds_owner);
1466 	ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1);
1467 	ASSERT(ds->ds_prev == NULL ||
1468 	    ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object);
1469 	ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg);
1470 
1471 	if (dsda->defer) {
1472 		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1473 		if (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1) {
1474 			dmu_buf_will_dirty(ds->ds_dbuf, tx);
1475 			ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY;
1476 			return;
1477 		}
1478 	}
1479 
1480 	/* signal any waiters that this dataset is going away */
1481 	mutex_enter(&ds->ds_lock);
1482 	ds->ds_owner = dsl_reaper;
1483 	cv_broadcast(&ds->ds_exclusive_cv);
1484 	mutex_exit(&ds->ds_lock);
1485 
1486 	if (ds->ds_objset) {
1487 		dmu_objset_evict(ds->ds_objset);
1488 		ds->ds_objset = NULL;
1489 	}
1490 
1491 	/* Remove our reservation */
1492 	if (ds->ds_reserved != 0) {
1493 		dsl_prop_setarg_t psa;
1494 		uint64_t value = 0;
1495 
1496 		dsl_prop_setarg_init_uint64(&psa, "refreservation",
1497 		    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1498 		    &value);
1499 		psa.psa_effective_value = 0;	/* predict default value */
1500 
1501 		dsl_dataset_set_reservation_sync(ds, &psa, tx);
1502 		ASSERT3U(ds->ds_reserved, ==, 0);
1503 	}
1504 
1505 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1506 
1507 	dsl_scan_ds_destroyed(ds, tx);
1508 
1509 	obj = ds->ds_object;
1510 
1511 	if (ds->ds_phys->ds_prev_snap_obj != 0) {
1512 		if (ds->ds_prev) {
1513 			ds_prev = ds->ds_prev;
1514 		} else {
1515 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1516 			    ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev));
1517 		}
1518 		after_branch_point =
1519 		    (ds_prev->ds_phys->ds_next_snap_obj != obj);
1520 
1521 		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
1522 		if (after_branch_point &&
1523 		    ds_prev->ds_phys->ds_next_clones_obj != 0) {
1524 			remove_from_next_clones(ds_prev, obj, tx);
1525 			if (ds->ds_phys->ds_next_snap_obj != 0) {
1526 				VERIFY(0 == zap_add_int(mos,
1527 				    ds_prev->ds_phys->ds_next_clones_obj,
1528 				    ds->ds_phys->ds_next_snap_obj, tx));
1529 			}
1530 		}
1531 		if (after_branch_point &&
1532 		    ds->ds_phys->ds_next_snap_obj == 0) {
1533 			/* This clone is toast. */
1534 			ASSERT(ds_prev->ds_phys->ds_num_children > 1);
1535 			ds_prev->ds_phys->ds_num_children--;
1536 
1537 			/*
1538 			 * If the clone's origin has no other clones, no
1539 			 * user holds, and has been marked for deferred
1540 			 * deletion, then we should have done the necessary
1541 			 * destroy setup for it.
1542 			 */
1543 			if (ds_prev->ds_phys->ds_num_children == 1 &&
1544 			    ds_prev->ds_userrefs == 0 &&
1545 			    DS_IS_DEFER_DESTROY(ds_prev)) {
1546 				ASSERT3P(dsda->rm_origin, !=, NULL);
1547 			} else {
1548 				ASSERT3P(dsda->rm_origin, ==, NULL);
1549 			}
1550 		} else if (!after_branch_point) {
1551 			ds_prev->ds_phys->ds_next_snap_obj =
1552 			    ds->ds_phys->ds_next_snap_obj;
1553 		}
1554 	}
1555 
1556 	if (dsl_dataset_is_snapshot(ds)) {
1557 		blkptr_t bp;
1558 		zio_t *pio;
1559 		dsl_dataset_t *ds_next;
1560 		uint64_t itor = 0;
1561 		uint64_t old_unique;
1562 		int64_t used = 0, compressed = 0, uncompressed = 0;
1563 
1564 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1565 		    ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next));
1566 		ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj);
1567 
1568 		old_unique = ds_next->ds_phys->ds_unique_bytes;
1569 
1570 		dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
1571 		ds_next->ds_phys->ds_prev_snap_obj =
1572 		    ds->ds_phys->ds_prev_snap_obj;
1573 		ds_next->ds_phys->ds_prev_snap_txg =
1574 		    ds->ds_phys->ds_prev_snap_txg;
1575 		ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1576 		    ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0);
1577 
1578 		pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
1579 
1580 		/*
1581 		 * Transfer to our deadlist (which will become next's
1582 		 * new deadlist) any entries from next's current
1583 		 * deadlist which were born before prev, and free the
1584 		 * other entries.
1585 		 *
1586 		 * XXX we're doing this long task with the config lock held
1587 		 */
1588 		while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) {
1589 			if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) {
1590 				VERIFY(0 == bplist_enqueue(&ds->ds_deadlist,
1591 				    &bp, tx));
1592 				if (ds_prev && !after_branch_point &&
1593 				    bp.blk_birth >
1594 				    ds_prev->ds_phys->ds_prev_snap_txg) {
1595 					ds_prev->ds_phys->ds_unique_bytes +=
1596 					    bp_get_dsize_sync(dp->dp_spa, &bp);
1597 				}
1598 			} else {
1599 				used += bp_get_dsize_sync(dp->dp_spa, &bp);
1600 				compressed += BP_GET_PSIZE(&bp);
1601 				uncompressed += BP_GET_UCSIZE(&bp);
1602 				dsl_free_sync(pio, dp, tx->tx_txg, &bp);
1603 			}
1604 		}
1605 		VERIFY3U(zio_wait(pio), ==, 0);
1606 		ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes);
1607 
1608 		/* change snapused */
1609 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
1610 		    -used, -compressed, -uncompressed, tx);
1611 
1612 		/* free next's deadlist */
1613 		bplist_close(&ds_next->ds_deadlist);
1614 		bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx);
1615 
1616 		/* set next's deadlist to our deadlist */
1617 		bplist_close(&ds->ds_deadlist);
1618 		ds_next->ds_phys->ds_deadlist_obj =
1619 		    ds->ds_phys->ds_deadlist_obj;
1620 		VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos,
1621 		    ds_next->ds_phys->ds_deadlist_obj));
1622 		ds->ds_phys->ds_deadlist_obj = 0;
1623 
1624 		if (dsl_dataset_is_snapshot(ds_next)) {
1625 			/*
1626 			 * Update next's unique to include blocks which
1627 			 * were previously shared by only this snapshot
1628 			 * and it.  Those blocks will be born after the
1629 			 * prev snap and before this snap, and will have
1630 			 * died after the next snap and before the one
1631 			 * after that (ie. be on the snap after next's
1632 			 * deadlist).
1633 			 *
1634 			 * XXX we're doing this long task with the
1635 			 * config lock held
1636 			 */
1637 			dsl_dataset_t *ds_after_next;
1638 			uint64_t space;
1639 
1640 			VERIFY(0 == dsl_dataset_hold_obj(dp,
1641 			    ds_next->ds_phys->ds_next_snap_obj,
1642 			    FTAG, &ds_after_next));
1643 
1644 			VERIFY(0 ==
1645 			    bplist_space_birthrange(&ds_after_next->ds_deadlist,
1646 			    ds->ds_phys->ds_prev_snap_txg,
1647 			    ds->ds_phys->ds_creation_txg, &space));
1648 			ds_next->ds_phys->ds_unique_bytes += space;
1649 
1650 			dsl_dataset_rele(ds_after_next, FTAG);
1651 			ASSERT3P(ds_next->ds_prev, ==, NULL);
1652 		} else {
1653 			ASSERT3P(ds_next->ds_prev, ==, ds);
1654 			dsl_dataset_drop_ref(ds_next->ds_prev, ds_next);
1655 			ds_next->ds_prev = NULL;
1656 			if (ds_prev) {
1657 				VERIFY(0 == dsl_dataset_get_ref(dp,
1658 				    ds->ds_phys->ds_prev_snap_obj,
1659 				    ds_next, &ds_next->ds_prev));
1660 			}
1661 
1662 			dsl_dataset_recalc_head_uniq(ds_next);
1663 
1664 			/*
1665 			 * Reduce the amount of our unconsmed refreservation
1666 			 * being charged to our parent by the amount of
1667 			 * new unique data we have gained.
1668 			 */
1669 			if (old_unique < ds_next->ds_reserved) {
1670 				int64_t mrsdelta;
1671 				uint64_t new_unique =
1672 				    ds_next->ds_phys->ds_unique_bytes;
1673 
1674 				ASSERT(old_unique <= new_unique);
1675 				mrsdelta = MIN(new_unique - old_unique,
1676 				    ds_next->ds_reserved - old_unique);
1677 				dsl_dir_diduse_space(ds->ds_dir,
1678 				    DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
1679 			}
1680 		}
1681 		dsl_dataset_rele(ds_next, FTAG);
1682 	} else {
1683 		/*
1684 		 * There's no next snapshot, so this is a head dataset.
1685 		 * Destroy the deadlist.  Unless it's a clone, the
1686 		 * deadlist should be empty.  (If it's a clone, it's
1687 		 * safe to ignore the deadlist contents.)
1688 		 */
1689 		struct killarg ka;
1690 
1691 		ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist));
1692 		bplist_close(&ds->ds_deadlist);
1693 		bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx);
1694 		ds->ds_phys->ds_deadlist_obj = 0;
1695 
1696 		/*
1697 		 * Free everything that we point to (that's born after
1698 		 * the previous snapshot, if we are a clone)
1699 		 *
1700 		 * NB: this should be very quick, because we already
1701 		 * freed all the objects in open context.
1702 		 */
1703 		ka.ds = ds;
1704 		ka.tx = tx;
1705 		err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg,
1706 		    TRAVERSE_POST, kill_blkptr, &ka);
1707 		ASSERT3U(err, ==, 0);
1708 		ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
1709 		    ds->ds_phys->ds_unique_bytes == 0);
1710 
1711 		if (ds->ds_prev != NULL) {
1712 			dsl_dataset_rele(ds->ds_prev, ds);
1713 			ds->ds_prev = ds_prev = NULL;
1714 		}
1715 	}
1716 
1717 	if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) {
1718 		/* Erase the link in the dir */
1719 		dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1720 		ds->ds_dir->dd_phys->dd_head_dataset_obj = 0;
1721 		ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0);
1722 		err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx);
1723 		ASSERT(err == 0);
1724 	} else {
1725 		/* remove from snapshot namespace */
1726 		dsl_dataset_t *ds_head;
1727 		ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0);
1728 		VERIFY(0 == dsl_dataset_hold_obj(dp,
1729 		    ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head));
1730 		VERIFY(0 == dsl_dataset_get_snapname(ds));
1731 #ifdef ZFS_DEBUG
1732 		{
1733 			uint64_t val;
1734 
1735 			err = dsl_dataset_snap_lookup(ds_head,
1736 			    ds->ds_snapname, &val);
1737 			ASSERT3U(err, ==, 0);
1738 			ASSERT3U(val, ==, obj);
1739 		}
1740 #endif
1741 		err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx);
1742 		ASSERT(err == 0);
1743 		dsl_dataset_rele(ds_head, FTAG);
1744 	}
1745 
1746 	if (ds_prev && ds->ds_prev != ds_prev)
1747 		dsl_dataset_rele(ds_prev, FTAG);
1748 
1749 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1750 	spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx,
1751 	    "dataset = %llu", ds->ds_object);
1752 
1753 	if (ds->ds_phys->ds_next_clones_obj != 0) {
1754 		uint64_t count;
1755 		ASSERT(0 == zap_count(mos,
1756 		    ds->ds_phys->ds_next_clones_obj, &count) && count == 0);
1757 		VERIFY(0 == dmu_object_free(mos,
1758 		    ds->ds_phys->ds_next_clones_obj, tx));
1759 	}
1760 	if (ds->ds_phys->ds_props_obj != 0)
1761 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx));
1762 	if (ds->ds_phys->ds_userrefs_obj != 0)
1763 		VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx));
1764 	dsl_dir_close(ds->ds_dir, ds);
1765 	ds->ds_dir = NULL;
1766 	dsl_dataset_drain_refs(ds, tag);
1767 	VERIFY(0 == dmu_object_free(mos, obj, tx));
1768 
1769 	if (dsda->rm_origin) {
1770 		/*
1771 		 * Remove the origin of the clone we just destroyed.
1772 		 */
1773 		struct dsl_ds_destroyarg ndsda = {0};
1774 
1775 		ndsda.ds = dsda->rm_origin;
1776 		dsl_dataset_destroy_sync(&ndsda, tag, tx);
1777 	}
1778 }
1779 
1780 static int
1781 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
1782 {
1783 	uint64_t asize;
1784 
1785 	if (!dmu_tx_is_syncing(tx))
1786 		return (0);
1787 
1788 	/*
1789 	 * If there's an fs-only reservation, any blocks that might become
1790 	 * owned by the snapshot dataset must be accommodated by space
1791 	 * outside of the reservation.
1792 	 */
1793 	ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
1794 	asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1795 	if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE))
1796 		return (ENOSPC);
1797 
1798 	/*
1799 	 * Propogate any reserved space for this snapshot to other
1800 	 * snapshot checks in this sync group.
1801 	 */
1802 	if (asize > 0)
1803 		dsl_dir_willuse_space(ds->ds_dir, asize, tx);
1804 
1805 	return (0);
1806 }
1807 
1808 int
1809 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx)
1810 {
1811 	dsl_dataset_t *ds = arg1;
1812 	const char *snapname = arg2;
1813 	int err;
1814 	uint64_t value;
1815 
1816 	/*
1817 	 * We don't allow multiple snapshots of the same txg.  If there
1818 	 * is already one, try again.
1819 	 */
1820 	if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
1821 		return (EAGAIN);
1822 
1823 	/*
1824 	 * Check for conflicting name snapshot name.
1825 	 */
1826 	err = dsl_dataset_snap_lookup(ds, snapname, &value);
1827 	if (err == 0)
1828 		return (EEXIST);
1829 	if (err != ENOENT)
1830 		return (err);
1831 
1832 	/*
1833 	 * Check that the dataset's name is not too long.  Name consists
1834 	 * of the dataset's length + 1 for the @-sign + snapshot name's length
1835 	 */
1836 	if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN)
1837 		return (ENAMETOOLONG);
1838 
1839 	err = dsl_dataset_snapshot_reserve_space(ds, tx);
1840 	if (err)
1841 		return (err);
1842 
1843 	ds->ds_trysnap_txg = tx->tx_txg;
1844 	return (0);
1845 }
1846 
1847 void
1848 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx)
1849 {
1850 	dsl_dataset_t *ds = arg1;
1851 	const char *snapname = arg2;
1852 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1853 	dmu_buf_t *dbuf;
1854 	dsl_dataset_phys_t *dsphys;
1855 	uint64_t dsobj, crtxg;
1856 	objset_t *mos = dp->dp_meta_objset;
1857 	int err;
1858 
1859 	ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock));
1860 
1861 	/*
1862 	 * The origin's ds_creation_txg has to be < TXG_INITIAL
1863 	 */
1864 	if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
1865 		crtxg = 1;
1866 	else
1867 		crtxg = tx->tx_txg;
1868 
1869 	dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
1870 	    DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
1871 	VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
1872 	dmu_buf_will_dirty(dbuf, tx);
1873 	dsphys = dbuf->db_data;
1874 	bzero(dsphys, sizeof (dsl_dataset_phys_t));
1875 	dsphys->ds_dir_obj = ds->ds_dir->dd_object;
1876 	dsphys->ds_fsid_guid = unique_create();
1877 	(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
1878 	    sizeof (dsphys->ds_guid));
1879 	dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj;
1880 	dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg;
1881 	dsphys->ds_next_snap_obj = ds->ds_object;
1882 	dsphys->ds_num_children = 1;
1883 	dsphys->ds_creation_time = gethrestime_sec();
1884 	dsphys->ds_creation_txg = crtxg;
1885 	dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj;
1886 	dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes;
1887 	dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes;
1888 	dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes;
1889 	dsphys->ds_flags = ds->ds_phys->ds_flags;
1890 	dsphys->ds_bp = ds->ds_phys->ds_bp;
1891 	dmu_buf_rele(dbuf, FTAG);
1892 
1893 	ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0);
1894 	if (ds->ds_prev) {
1895 		uint64_t next_clones_obj =
1896 		    ds->ds_prev->ds_phys->ds_next_clones_obj;
1897 		ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj ==
1898 		    ds->ds_object ||
1899 		    ds->ds_prev->ds_phys->ds_num_children > 1);
1900 		if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) {
1901 			dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1902 			ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==,
1903 			    ds->ds_prev->ds_phys->ds_creation_txg);
1904 			ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj;
1905 		} else if (next_clones_obj != 0) {
1906 			remove_from_next_clones(ds->ds_prev,
1907 			    dsphys->ds_next_snap_obj, tx);
1908 			VERIFY3U(0, ==, zap_add_int(mos,
1909 			    next_clones_obj, dsobj, tx));
1910 		}
1911 	}
1912 
1913 	/*
1914 	 * If we have a reference-reservation on this dataset, we will
1915 	 * need to increase the amount of refreservation being charged
1916 	 * since our unique space is going to zero.
1917 	 */
1918 	if (ds->ds_reserved) {
1919 		int64_t delta;
1920 		ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
1921 		delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
1922 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
1923 		    delta, 0, 0, tx);
1924 	}
1925 
1926 	bplist_close(&ds->ds_deadlist);
1927 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1928 	ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg);
1929 	ds->ds_phys->ds_prev_snap_obj = dsobj;
1930 	ds->ds_phys->ds_prev_snap_txg = crtxg;
1931 	ds->ds_phys->ds_unique_bytes = 0;
1932 	if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
1933 		ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
1934 	ds->ds_phys->ds_deadlist_obj =
1935 	    bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx);
1936 	VERIFY(0 == bplist_open(&ds->ds_deadlist, mos,
1937 	    ds->ds_phys->ds_deadlist_obj));
1938 
1939 	dprintf("snap '%s' -> obj %llu\n", snapname, dsobj);
1940 	err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj,
1941 	    snapname, 8, 1, &dsobj, tx);
1942 	ASSERT(err == 0);
1943 
1944 	if (ds->ds_prev)
1945 		dsl_dataset_drop_ref(ds->ds_prev, ds);
1946 	VERIFY(0 == dsl_dataset_get_ref(dp,
1947 	    ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
1948 
1949 	dsl_scan_ds_snapshotted(ds, tx);
1950 
1951 	dsl_dir_snap_cmtime_update(ds->ds_dir);
1952 
1953 	spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx,
1954 	    "dataset = %llu", dsobj);
1955 }
1956 
1957 void
1958 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
1959 {
1960 	ASSERT(dmu_tx_is_syncing(tx));
1961 	ASSERT(ds->ds_objset != NULL);
1962 	ASSERT(ds->ds_phys->ds_next_snap_obj == 0);
1963 
1964 	/*
1965 	 * in case we had to change ds_fsid_guid when we opened it,
1966 	 * sync it out now.
1967 	 */
1968 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1969 	ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid;
1970 
1971 	dsl_dir_dirty(ds->ds_dir, tx);
1972 	dmu_objset_sync(ds->ds_objset, zio, tx);
1973 }
1974 
1975 void
1976 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
1977 {
1978 	uint64_t refd, avail, uobjs, aobjs;
1979 
1980 	dsl_dir_stats(ds->ds_dir, nv);
1981 
1982 	dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs);
1983 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail);
1984 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd);
1985 
1986 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
1987 	    ds->ds_phys->ds_creation_time);
1988 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
1989 	    ds->ds_phys->ds_creation_txg);
1990 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
1991 	    ds->ds_quota);
1992 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
1993 	    ds->ds_reserved);
1994 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
1995 	    ds->ds_phys->ds_guid);
1996 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
1997 	    ds->ds_phys->ds_unique_bytes);
1998 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
1999 	    ds->ds_object);
2000 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
2001 	    ds->ds_userrefs);
2002 	dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
2003 	    DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
2004 
2005 	if (ds->ds_phys->ds_next_snap_obj) {
2006 		/*
2007 		 * This is a snapshot; override the dd's space used with
2008 		 * our unique space and compression ratio.
2009 		 */
2010 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
2011 		    ds->ds_phys->ds_unique_bytes);
2012 		dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
2013 		    ds->ds_phys->ds_compressed_bytes == 0 ? 100 :
2014 		    (ds->ds_phys->ds_uncompressed_bytes * 100 /
2015 		    ds->ds_phys->ds_compressed_bytes));
2016 	}
2017 }
2018 
2019 void
2020 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
2021 {
2022 	stat->dds_creation_txg = ds->ds_phys->ds_creation_txg;
2023 	stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT;
2024 	stat->dds_guid = ds->ds_phys->ds_guid;
2025 	if (ds->ds_phys->ds_next_snap_obj) {
2026 		stat->dds_is_snapshot = B_TRUE;
2027 		stat->dds_num_clones = ds->ds_phys->ds_num_children - 1;
2028 	} else {
2029 		stat->dds_is_snapshot = B_FALSE;
2030 		stat->dds_num_clones = 0;
2031 	}
2032 
2033 	/* clone origin is really a dsl_dir thing... */
2034 	rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER);
2035 	if (dsl_dir_is_clone(ds->ds_dir)) {
2036 		dsl_dataset_t *ods;
2037 
2038 		VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool,
2039 		    ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods));
2040 		dsl_dataset_name(ods, stat->dds_origin);
2041 		dsl_dataset_drop_ref(ods, FTAG);
2042 	} else {
2043 		stat->dds_origin[0] = '\0';
2044 	}
2045 	rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock);
2046 }
2047 
2048 uint64_t
2049 dsl_dataset_fsid_guid(dsl_dataset_t *ds)
2050 {
2051 	return (ds->ds_fsid_guid);
2052 }
2053 
2054 void
2055 dsl_dataset_space(dsl_dataset_t *ds,
2056     uint64_t *refdbytesp, uint64_t *availbytesp,
2057     uint64_t *usedobjsp, uint64_t *availobjsp)
2058 {
2059 	*refdbytesp = ds->ds_phys->ds_used_bytes;
2060 	*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
2061 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes)
2062 		*availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes;
2063 	if (ds->ds_quota != 0) {
2064 		/*
2065 		 * Adjust available bytes according to refquota
2066 		 */
2067 		if (*refdbytesp < ds->ds_quota)
2068 			*availbytesp = MIN(*availbytesp,
2069 			    ds->ds_quota - *refdbytesp);
2070 		else
2071 			*availbytesp = 0;
2072 	}
2073 	*usedobjsp = ds->ds_phys->ds_bp.blk_fill;
2074 	*availobjsp = DN_MAX_OBJECT - *usedobjsp;
2075 }
2076 
2077 boolean_t
2078 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
2079 {
2080 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
2081 
2082 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
2083 	    dsl_pool_sync_context(dp));
2084 	if (ds->ds_prev == NULL)
2085 		return (B_FALSE);
2086 	if (ds->ds_phys->ds_bp.blk_birth >
2087 	    ds->ds_prev->ds_phys->ds_creation_txg)
2088 		return (B_TRUE);
2089 	return (B_FALSE);
2090 }
2091 
2092 /* ARGSUSED */
2093 static int
2094 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
2095 {
2096 	dsl_dataset_t *ds = arg1;
2097 	char *newsnapname = arg2;
2098 	dsl_dir_t *dd = ds->ds_dir;
2099 	dsl_dataset_t *hds;
2100 	uint64_t val;
2101 	int err;
2102 
2103 	err = dsl_dataset_hold_obj(dd->dd_pool,
2104 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds);
2105 	if (err)
2106 		return (err);
2107 
2108 	/* new name better not be in use */
2109 	err = dsl_dataset_snap_lookup(hds, newsnapname, &val);
2110 	dsl_dataset_rele(hds, FTAG);
2111 
2112 	if (err == 0)
2113 		err = EEXIST;
2114 	else if (err == ENOENT)
2115 		err = 0;
2116 
2117 	/* dataset name + 1 for the "@" + the new snapshot name must fit */
2118 	if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
2119 		err = ENAMETOOLONG;
2120 
2121 	return (err);
2122 }
2123 
2124 static void
2125 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2126 {
2127 	dsl_dataset_t *ds = arg1;
2128 	const char *newsnapname = arg2;
2129 	dsl_dir_t *dd = ds->ds_dir;
2130 	objset_t *mos = dd->dd_pool->dp_meta_objset;
2131 	dsl_dataset_t *hds;
2132 	int err;
2133 
2134 	ASSERT(ds->ds_phys->ds_next_snap_obj != 0);
2135 
2136 	VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool,
2137 	    dd->dd_phys->dd_head_dataset_obj, FTAG, &hds));
2138 
2139 	VERIFY(0 == dsl_dataset_get_snapname(ds));
2140 	err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx);
2141 	ASSERT3U(err, ==, 0);
2142 	mutex_enter(&ds->ds_lock);
2143 	(void) strcpy(ds->ds_snapname, newsnapname);
2144 	mutex_exit(&ds->ds_lock);
2145 	err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj,
2146 	    ds->ds_snapname, 8, 1, &ds->ds_object, tx);
2147 	ASSERT3U(err, ==, 0);
2148 
2149 	spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx,
2150 	    "dataset = %llu", ds->ds_object);
2151 	dsl_dataset_rele(hds, FTAG);
2152 }
2153 
2154 struct renamesnaparg {
2155 	dsl_sync_task_group_t *dstg;
2156 	char failed[MAXPATHLEN];
2157 	char *oldsnap;
2158 	char *newsnap;
2159 };
2160 
2161 static int
2162 dsl_snapshot_rename_one(const char *name, void *arg)
2163 {
2164 	struct renamesnaparg *ra = arg;
2165 	dsl_dataset_t *ds = NULL;
2166 	char *snapname;
2167 	int err;
2168 
2169 	snapname = kmem_asprintf("%s@%s", name, ra->oldsnap);
2170 	(void) strlcpy(ra->failed, snapname, sizeof (ra->failed));
2171 
2172 	/*
2173 	 * For recursive snapshot renames the parent won't be changing
2174 	 * so we just pass name for both the to/from argument.
2175 	 */
2176 	err = zfs_secpolicy_rename_perms(snapname, snapname, CRED());
2177 	if (err != 0) {
2178 		strfree(snapname);
2179 		return (err == ENOENT ? 0 : err);
2180 	}
2181 
2182 #ifdef _KERNEL
2183 	/*
2184 	 * For all filesystems undergoing rename, we'll need to unmount it.
2185 	 */
2186 	(void) zfs_unmount_snap(snapname, NULL);
2187 #endif
2188 	err = dsl_dataset_hold(snapname, ra->dstg, &ds);
2189 	strfree(snapname);
2190 	if (err != 0)
2191 		return (err == ENOENT ? 0 : err);
2192 
2193 	dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
2194 	    dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
2195 
2196 	return (0);
2197 }
2198 
2199 static int
2200 dsl_recursive_rename(char *oldname, const char *newname)
2201 {
2202 	int err;
2203 	struct renamesnaparg *ra;
2204 	dsl_sync_task_t *dst;
2205 	spa_t *spa;
2206 	char *cp, *fsname = spa_strdup(oldname);
2207 	int len = strlen(oldname) + 1;
2208 
2209 	/* truncate the snapshot name to get the fsname */
2210 	cp = strchr(fsname, '@');
2211 	*cp = '\0';
2212 
2213 	err = spa_open(fsname, &spa, FTAG);
2214 	if (err) {
2215 		kmem_free(fsname, len);
2216 		return (err);
2217 	}
2218 	ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP);
2219 	ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
2220 
2221 	ra->oldsnap = strchr(oldname, '@') + 1;
2222 	ra->newsnap = strchr(newname, '@') + 1;
2223 	*ra->failed = '\0';
2224 
2225 	err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
2226 	    DS_FIND_CHILDREN);
2227 	kmem_free(fsname, len);
2228 
2229 	if (err == 0) {
2230 		err = dsl_sync_task_group_wait(ra->dstg);
2231 	}
2232 
2233 	for (dst = list_head(&ra->dstg->dstg_tasks); dst;
2234 	    dst = list_next(&ra->dstg->dstg_tasks, dst)) {
2235 		dsl_dataset_t *ds = dst->dst_arg1;
2236 		if (dst->dst_err) {
2237 			dsl_dir_name(ds->ds_dir, ra->failed);
2238 			(void) strlcat(ra->failed, "@", sizeof (ra->failed));
2239 			(void) strlcat(ra->failed, ra->newsnap,
2240 			    sizeof (ra->failed));
2241 		}
2242 		dsl_dataset_rele(ds, ra->dstg);
2243 	}
2244 
2245 	if (err)
2246 		(void) strlcpy(oldname, ra->failed, sizeof (ra->failed));
2247 
2248 	dsl_sync_task_group_destroy(ra->dstg);
2249 	kmem_free(ra, sizeof (struct renamesnaparg));
2250 	spa_close(spa, FTAG);
2251 	return (err);
2252 }
2253 
2254 static int
2255 dsl_valid_rename(const char *oldname, void *arg)
2256 {
2257 	int delta = *(int *)arg;
2258 
2259 	if (strlen(oldname) + delta >= MAXNAMELEN)
2260 		return (ENAMETOOLONG);
2261 
2262 	return (0);
2263 }
2264 
2265 #pragma weak dmu_objset_rename = dsl_dataset_rename
2266 int
2267 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive)
2268 {
2269 	dsl_dir_t *dd;
2270 	dsl_dataset_t *ds;
2271 	const char *tail;
2272 	int err;
2273 
2274 	err = dsl_dir_open(oldname, FTAG, &dd, &tail);
2275 	if (err)
2276 		return (err);
2277 
2278 	if (tail == NULL) {
2279 		int delta = strlen(newname) - strlen(oldname);
2280 
2281 		/* if we're growing, validate child name lengths */
2282 		if (delta > 0)
2283 			err = dmu_objset_find(oldname, dsl_valid_rename,
2284 			    &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
2285 
2286 		if (err == 0)
2287 			err = dsl_dir_rename(dd, newname);
2288 		dsl_dir_close(dd, FTAG);
2289 		return (err);
2290 	}
2291 
2292 	if (tail[0] != '@') {
2293 		/* the name ended in a nonexistent component */
2294 		dsl_dir_close(dd, FTAG);
2295 		return (ENOENT);
2296 	}
2297 
2298 	dsl_dir_close(dd, FTAG);
2299 
2300 	/* new name must be snapshot in same filesystem */
2301 	tail = strchr(newname, '@');
2302 	if (tail == NULL)
2303 		return (EINVAL);
2304 	tail++;
2305 	if (strncmp(oldname, newname, tail - newname) != 0)
2306 		return (EXDEV);
2307 
2308 	if (recursive) {
2309 		err = dsl_recursive_rename(oldname, newname);
2310 	} else {
2311 		err = dsl_dataset_hold(oldname, FTAG, &ds);
2312 		if (err)
2313 			return (err);
2314 
2315 		err = dsl_sync_task_do(ds->ds_dir->dd_pool,
2316 		    dsl_dataset_snapshot_rename_check,
2317 		    dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
2318 
2319 		dsl_dataset_rele(ds, FTAG);
2320 	}
2321 
2322 	return (err);
2323 }
2324 
2325 struct promotenode {
2326 	list_node_t link;
2327 	dsl_dataset_t *ds;
2328 };
2329 
2330 struct promotearg {
2331 	list_t shared_snaps, origin_snaps, clone_snaps;
2332 	dsl_dataset_t *origin_origin;
2333 	uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
2334 	char *err_ds;
2335 };
2336 
2337 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
2338 static boolean_t snaplist_unstable(list_t *l);
2339 
2340 static int
2341 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx)
2342 {
2343 	dsl_dataset_t *hds = arg1;
2344 	struct promotearg *pa = arg2;
2345 	struct promotenode *snap = list_head(&pa->shared_snaps);
2346 	dsl_dataset_t *origin_ds = snap->ds;
2347 	int err;
2348 
2349 	/* Check that it is a real clone */
2350 	if (!dsl_dir_is_clone(hds->ds_dir))
2351 		return (EINVAL);
2352 
2353 	/* Since this is so expensive, don't do the preliminary check */
2354 	if (!dmu_tx_is_syncing(tx))
2355 		return (0);
2356 
2357 	if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)
2358 		return (EXDEV);
2359 
2360 	/* compute origin's new unique space */
2361 	snap = list_tail(&pa->clone_snaps);
2362 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2363 	err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2364 	    origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, &pa->unique);
2365 	if (err)
2366 		return (err);
2367 
2368 	/*
2369 	 * Walk the snapshots that we are moving
2370 	 *
2371 	 * Compute space to transfer.  Consider the incremental changes
2372 	 * to used for each snapshot:
2373 	 * (my used) = (prev's used) + (blocks born) - (blocks killed)
2374 	 * So each snapshot gave birth to:
2375 	 * (blocks born) = (my used) - (prev's used) + (blocks killed)
2376 	 * So a sequence would look like:
2377 	 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
2378 	 * Which simplifies to:
2379 	 * uN + kN + kN-1 + ... + k1 + k0
2380 	 * Note however, if we stop before we reach the ORIGIN we get:
2381 	 * uN + kN + kN-1 + ... + kM - uM-1
2382 	 */
2383 	pa->used = origin_ds->ds_phys->ds_used_bytes;
2384 	pa->comp = origin_ds->ds_phys->ds_compressed_bytes;
2385 	pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes;
2386 	for (snap = list_head(&pa->shared_snaps); snap;
2387 	    snap = list_next(&pa->shared_snaps, snap)) {
2388 		uint64_t val, dlused, dlcomp, dluncomp;
2389 		dsl_dataset_t *ds = snap->ds;
2390 
2391 		/* Check that the snapshot name does not conflict */
2392 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2393 		err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
2394 		if (err == 0) {
2395 			err = EEXIST;
2396 			goto out;
2397 		}
2398 		if (err != ENOENT)
2399 			goto out;
2400 
2401 		/* The very first snapshot does not have a deadlist */
2402 		if (ds->ds_phys->ds_prev_snap_obj == 0)
2403 			continue;
2404 
2405 		if (err = bplist_space(&ds->ds_deadlist,
2406 		    &dlused, &dlcomp, &dluncomp))
2407 			goto out;
2408 		pa->used += dlused;
2409 		pa->comp += dlcomp;
2410 		pa->uncomp += dluncomp;
2411 	}
2412 
2413 	/*
2414 	 * If we are a clone of a clone then we never reached ORIGIN,
2415 	 * so we need to subtract out the clone origin's used space.
2416 	 */
2417 	if (pa->origin_origin) {
2418 		pa->used -= pa->origin_origin->ds_phys->ds_used_bytes;
2419 		pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes;
2420 		pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes;
2421 	}
2422 
2423 	/* Check that there is enough space here */
2424 	err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
2425 	    pa->used);
2426 	if (err)
2427 		return (err);
2428 
2429 	/*
2430 	 * Compute the amounts of space that will be used by snapshots
2431 	 * after the promotion (for both origin and clone).  For each,
2432 	 * it is the amount of space that will be on all of their
2433 	 * deadlists (that was not born before their new origin).
2434 	 */
2435 	if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2436 		uint64_t space;
2437 
2438 		/*
2439 		 * Note, typically this will not be a clone of a clone,
2440 		 * so dd_origin_txg will be < TXG_INITIAL, so
2441 		 * these snaplist_space() -> bplist_space_birthrange()
2442 		 * calls will be fast because they do not have to
2443 		 * iterate over all bps.
2444 		 */
2445 		snap = list_head(&pa->origin_snaps);
2446 		err = snaplist_space(&pa->shared_snaps,
2447 		    snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap);
2448 		if (err)
2449 			return (err);
2450 
2451 		err = snaplist_space(&pa->clone_snaps,
2452 		    snap->ds->ds_dir->dd_origin_txg, &space);
2453 		if (err)
2454 			return (err);
2455 		pa->cloneusedsnap += space;
2456 	}
2457 	if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) {
2458 		err = snaplist_space(&pa->origin_snaps,
2459 		    origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap);
2460 		if (err)
2461 			return (err);
2462 	}
2463 
2464 	return (0);
2465 out:
2466 	pa->err_ds =  snap->ds->ds_snapname;
2467 	return (err);
2468 }
2469 
2470 static void
2471 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2472 {
2473 	dsl_dataset_t *hds = arg1;
2474 	struct promotearg *pa = arg2;
2475 	struct promotenode *snap = list_head(&pa->shared_snaps);
2476 	dsl_dataset_t *origin_ds = snap->ds;
2477 	dsl_dataset_t *origin_head;
2478 	dsl_dir_t *dd = hds->ds_dir;
2479 	dsl_pool_t *dp = hds->ds_dir->dd_pool;
2480 	dsl_dir_t *odd = NULL;
2481 	uint64_t oldnext_obj;
2482 	int64_t delta;
2483 
2484 	ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE));
2485 
2486 	snap = list_head(&pa->origin_snaps);
2487 	origin_head = snap->ds;
2488 
2489 	/*
2490 	 * We need to explicitly open odd, since origin_ds's dd will be
2491 	 * changing.
2492 	 */
2493 	VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object,
2494 	    NULL, FTAG, &odd));
2495 
2496 	/* change origin's next snap */
2497 	dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
2498 	oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj;
2499 	snap = list_tail(&pa->clone_snaps);
2500 	ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object);
2501 	origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object;
2502 
2503 	/* change the origin's next clone */
2504 	if (origin_ds->ds_phys->ds_next_clones_obj) {
2505 		remove_from_next_clones(origin_ds, snap->ds->ds_object, tx);
2506 		VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
2507 		    origin_ds->ds_phys->ds_next_clones_obj,
2508 		    oldnext_obj, tx));
2509 	}
2510 
2511 	/* change origin */
2512 	dmu_buf_will_dirty(dd->dd_dbuf, tx);
2513 	ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object);
2514 	dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj;
2515 	dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
2516 	dmu_buf_will_dirty(odd->dd_dbuf, tx);
2517 	odd->dd_phys->dd_origin_obj = origin_ds->ds_object;
2518 	origin_head->ds_dir->dd_origin_txg =
2519 	    origin_ds->ds_phys->ds_creation_txg;
2520 
2521 	/* move snapshots to this dir */
2522 	for (snap = list_head(&pa->shared_snaps); snap;
2523 	    snap = list_next(&pa->shared_snaps, snap)) {
2524 		dsl_dataset_t *ds = snap->ds;
2525 
2526 		/* unregister props as dsl_dir is changing */
2527 		if (ds->ds_objset) {
2528 			dmu_objset_evict(ds->ds_objset);
2529 			ds->ds_objset = NULL;
2530 		}
2531 		/* move snap name entry */
2532 		VERIFY(0 == dsl_dataset_get_snapname(ds));
2533 		VERIFY(0 == dsl_dataset_snap_remove(origin_head,
2534 		    ds->ds_snapname, tx));
2535 		VERIFY(0 == zap_add(dp->dp_meta_objset,
2536 		    hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname,
2537 		    8, 1, &ds->ds_object, tx));
2538 		/* change containing dsl_dir */
2539 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2540 		ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object);
2541 		ds->ds_phys->ds_dir_obj = dd->dd_object;
2542 		ASSERT3P(ds->ds_dir, ==, odd);
2543 		dsl_dir_close(ds->ds_dir, ds);
2544 		VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object,
2545 		    NULL, ds, &ds->ds_dir));
2546 
2547 		ASSERT3U(dsl_prop_numcb(ds), ==, 0);
2548 	}
2549 
2550 	/*
2551 	 * Change space accounting.
2552 	 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
2553 	 * both be valid, or both be 0 (resulting in delta == 0).  This
2554 	 * is true for each of {clone,origin} independently.
2555 	 */
2556 
2557 	delta = pa->cloneusedsnap -
2558 	    dd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2559 	ASSERT3S(delta, >=, 0);
2560 	ASSERT3U(pa->used, >=, delta);
2561 	dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
2562 	dsl_dir_diduse_space(dd, DD_USED_HEAD,
2563 	    pa->used - delta, pa->comp, pa->uncomp, tx);
2564 
2565 	delta = pa->originusedsnap -
2566 	    odd->dd_phys->dd_used_breakdown[DD_USED_SNAP];
2567 	ASSERT3S(delta, <=, 0);
2568 	ASSERT3U(pa->used, >=, -delta);
2569 	dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
2570 	dsl_dir_diduse_space(odd, DD_USED_HEAD,
2571 	    -pa->used - delta, -pa->comp, -pa->uncomp, tx);
2572 
2573 	origin_ds->ds_phys->ds_unique_bytes = pa->unique;
2574 
2575 	/* log history record */
2576 	spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx,
2577 	    "dataset = %llu", hds->ds_object);
2578 
2579 	dsl_dir_close(odd, FTAG);
2580 }
2581 
2582 static char *snaplist_tag = "snaplist";
2583 /*
2584  * Make a list of dsl_dataset_t's for the snapshots between first_obj
2585  * (exclusive) and last_obj (inclusive).  The list will be in reverse
2586  * order (last_obj will be the list_head()).  If first_obj == 0, do all
2587  * snapshots back to this dataset's origin.
2588  */
2589 static int
2590 snaplist_make(dsl_pool_t *dp, boolean_t own,
2591     uint64_t first_obj, uint64_t last_obj, list_t *l)
2592 {
2593 	uint64_t obj = last_obj;
2594 
2595 	ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock));
2596 
2597 	list_create(l, sizeof (struct promotenode),
2598 	    offsetof(struct promotenode, link));
2599 
2600 	while (obj != first_obj) {
2601 		dsl_dataset_t *ds;
2602 		struct promotenode *snap;
2603 		int err;
2604 
2605 		if (own) {
2606 			err = dsl_dataset_own_obj(dp, obj,
2607 			    0, snaplist_tag, &ds);
2608 			if (err == 0)
2609 				dsl_dataset_make_exclusive(ds, snaplist_tag);
2610 		} else {
2611 			err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds);
2612 		}
2613 		if (err == ENOENT) {
2614 			/* lost race with snapshot destroy */
2615 			struct promotenode *last = list_tail(l);
2616 			ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj);
2617 			obj = last->ds->ds_phys->ds_prev_snap_obj;
2618 			continue;
2619 		} else if (err) {
2620 			return (err);
2621 		}
2622 
2623 		if (first_obj == 0)
2624 			first_obj = ds->ds_dir->dd_phys->dd_origin_obj;
2625 
2626 		snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP);
2627 		snap->ds = ds;
2628 		list_insert_tail(l, snap);
2629 		obj = ds->ds_phys->ds_prev_snap_obj;
2630 	}
2631 
2632 	return (0);
2633 }
2634 
2635 static int
2636 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
2637 {
2638 	struct promotenode *snap;
2639 
2640 	*spacep = 0;
2641 	for (snap = list_head(l); snap; snap = list_next(l, snap)) {
2642 		uint64_t used;
2643 		int err = bplist_space_birthrange(&snap->ds->ds_deadlist,
2644 		    mintxg, UINT64_MAX, &used);
2645 		if (err)
2646 			return (err);
2647 		*spacep += used;
2648 	}
2649 	return (0);
2650 }
2651 
2652 static void
2653 snaplist_destroy(list_t *l, boolean_t own)
2654 {
2655 	struct promotenode *snap;
2656 
2657 	if (!l || !list_link_active(&l->list_head))
2658 		return;
2659 
2660 	while ((snap = list_tail(l)) != NULL) {
2661 		list_remove(l, snap);
2662 		if (own)
2663 			dsl_dataset_disown(snap->ds, snaplist_tag);
2664 		else
2665 			dsl_dataset_rele(snap->ds, snaplist_tag);
2666 		kmem_free(snap, sizeof (struct promotenode));
2667 	}
2668 	list_destroy(l);
2669 }
2670 
2671 /*
2672  * Promote a clone.  Nomenclature note:
2673  * "clone" or "cds": the original clone which is being promoted
2674  * "origin" or "ods": the snapshot which is originally clone's origin
2675  * "origin head" or "ohds": the dataset which is the head
2676  * (filesystem/volume) for the origin
2677  * "origin origin": the origin of the origin's filesystem (typically
2678  * NULL, indicating that the clone is not a clone of a clone).
2679  */
2680 int
2681 dsl_dataset_promote(const char *name, char *conflsnap)
2682 {
2683 	dsl_dataset_t *ds;
2684 	dsl_dir_t *dd;
2685 	dsl_pool_t *dp;
2686 	dmu_object_info_t doi;
2687 	struct promotearg pa = { 0 };
2688 	struct promotenode *snap;
2689 	int err;
2690 
2691 	err = dsl_dataset_hold(name, FTAG, &ds);
2692 	if (err)
2693 		return (err);
2694 	dd = ds->ds_dir;
2695 	dp = dd->dd_pool;
2696 
2697 	err = dmu_object_info(dp->dp_meta_objset,
2698 	    ds->ds_phys->ds_snapnames_zapobj, &doi);
2699 	if (err) {
2700 		dsl_dataset_rele(ds, FTAG);
2701 		return (err);
2702 	}
2703 
2704 	if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) {
2705 		dsl_dataset_rele(ds, FTAG);
2706 		return (EINVAL);
2707 	}
2708 
2709 	/*
2710 	 * We are going to inherit all the snapshots taken before our
2711 	 * origin (i.e., our new origin will be our parent's origin).
2712 	 * Take ownership of them so that we can rename them into our
2713 	 * namespace.
2714 	 */
2715 	rw_enter(&dp->dp_config_rwlock, RW_READER);
2716 
2717 	err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj,
2718 	    &pa.shared_snaps);
2719 	if (err != 0)
2720 		goto out;
2721 
2722 	err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps);
2723 	if (err != 0)
2724 		goto out;
2725 
2726 	snap = list_head(&pa.shared_snaps);
2727 	ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj);
2728 	err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj,
2729 	    snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps);
2730 	if (err != 0)
2731 		goto out;
2732 
2733 	if (dsl_dir_is_clone(snap->ds->ds_dir)) {
2734 		err = dsl_dataset_own_obj(dp,
2735 		    snap->ds->ds_dir->dd_phys->dd_origin_obj,
2736 		    0, FTAG, &pa.origin_origin);
2737 		if (err != 0)
2738 			goto out;
2739 	}
2740 
2741 out:
2742 	rw_exit(&dp->dp_config_rwlock);
2743 
2744 	/*
2745 	 * Add in 128x the snapnames zapobj size, since we will be moving
2746 	 * a bunch of snapnames to the promoted ds, and dirtying their
2747 	 * bonus buffers.
2748 	 */
2749 	if (err == 0) {
2750 		err = dsl_sync_task_do(dp, dsl_dataset_promote_check,
2751 		    dsl_dataset_promote_sync, ds, &pa,
2752 		    2 + 2 * doi.doi_physical_blocks_512);
2753 		if (err && pa.err_ds && conflsnap)
2754 			(void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN);
2755 	}
2756 
2757 	snaplist_destroy(&pa.shared_snaps, B_TRUE);
2758 	snaplist_destroy(&pa.clone_snaps, B_FALSE);
2759 	snaplist_destroy(&pa.origin_snaps, B_FALSE);
2760 	if (pa.origin_origin)
2761 		dsl_dataset_disown(pa.origin_origin, FTAG);
2762 	dsl_dataset_rele(ds, FTAG);
2763 	return (err);
2764 }
2765 
2766 struct cloneswaparg {
2767 	dsl_dataset_t *cds; /* clone dataset */
2768 	dsl_dataset_t *ohds; /* origin's head dataset */
2769 	boolean_t force;
2770 	int64_t unused_refres_delta; /* change in unconsumed refreservation */
2771 };
2772 
2773 /* ARGSUSED */
2774 static int
2775 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx)
2776 {
2777 	struct cloneswaparg *csa = arg1;
2778 
2779 	/* they should both be heads */
2780 	if (dsl_dataset_is_snapshot(csa->cds) ||
2781 	    dsl_dataset_is_snapshot(csa->ohds))
2782 		return (EINVAL);
2783 
2784 	/* the branch point should be just before them */
2785 	if (csa->cds->ds_prev != csa->ohds->ds_prev)
2786 		return (EINVAL);
2787 
2788 	/* cds should be the clone (unless they are unrelated) */
2789 	if (csa->cds->ds_prev != NULL &&
2790 	    csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap &&
2791 	    csa->ohds->ds_object !=
2792 	    csa->cds->ds_prev->ds_phys->ds_next_snap_obj)
2793 		return (EINVAL);
2794 
2795 	/* the clone should be a child of the origin */
2796 	if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir)
2797 		return (EINVAL);
2798 
2799 	/* ohds shouldn't be modified unless 'force' */
2800 	if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds))
2801 		return (ETXTBSY);
2802 
2803 	/* adjust amount of any unconsumed refreservation */
2804 	csa->unused_refres_delta =
2805 	    (int64_t)MIN(csa->ohds->ds_reserved,
2806 	    csa->ohds->ds_phys->ds_unique_bytes) -
2807 	    (int64_t)MIN(csa->ohds->ds_reserved,
2808 	    csa->cds->ds_phys->ds_unique_bytes);
2809 
2810 	if (csa->unused_refres_delta > 0 &&
2811 	    csa->unused_refres_delta >
2812 	    dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE))
2813 		return (ENOSPC);
2814 
2815 	if (csa->ohds->ds_quota != 0 &&
2816 	    csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota)
2817 		return (EDQUOT);
2818 
2819 	return (0);
2820 }
2821 
2822 /* ARGSUSED */
2823 static void
2824 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx)
2825 {
2826 	struct cloneswaparg *csa = arg1;
2827 	dsl_pool_t *dp = csa->cds->ds_dir->dd_pool;
2828 
2829 	ASSERT(csa->cds->ds_reserved == 0);
2830 	ASSERT(csa->ohds->ds_quota == 0 ||
2831 	    csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota);
2832 
2833 	dmu_buf_will_dirty(csa->cds->ds_dbuf, tx);
2834 	dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx);
2835 
2836 	if (csa->cds->ds_objset != NULL) {
2837 		dmu_objset_evict(csa->cds->ds_objset);
2838 		csa->cds->ds_objset = NULL;
2839 	}
2840 
2841 	if (csa->ohds->ds_objset != NULL) {
2842 		dmu_objset_evict(csa->ohds->ds_objset);
2843 		csa->ohds->ds_objset = NULL;
2844 	}
2845 
2846 	/*
2847 	 * Reset origin's unique bytes, if it exists.
2848 	 */
2849 	if (csa->cds->ds_prev) {
2850 		dsl_dataset_t *origin = csa->cds->ds_prev;
2851 		dmu_buf_will_dirty(origin->ds_dbuf, tx);
2852 		VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2853 		    origin->ds_phys->ds_prev_snap_txg, UINT64_MAX,
2854 		    &origin->ds_phys->ds_unique_bytes));
2855 	}
2856 
2857 	/* swap blkptrs */
2858 	{
2859 		blkptr_t tmp;
2860 		tmp = csa->ohds->ds_phys->ds_bp;
2861 		csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp;
2862 		csa->cds->ds_phys->ds_bp = tmp;
2863 	}
2864 
2865 	/* set dd_*_bytes */
2866 	{
2867 		int64_t dused, dcomp, duncomp;
2868 		uint64_t cdl_used, cdl_comp, cdl_uncomp;
2869 		uint64_t odl_used, odl_comp, odl_uncomp;
2870 
2871 		ASSERT3U(csa->cds->ds_dir->dd_phys->
2872 		    dd_used_breakdown[DD_USED_SNAP], ==, 0);
2873 
2874 		VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used,
2875 		    &cdl_comp, &cdl_uncomp));
2876 		VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used,
2877 		    &odl_comp, &odl_uncomp));
2878 
2879 		dused = csa->cds->ds_phys->ds_used_bytes + cdl_used -
2880 		    (csa->ohds->ds_phys->ds_used_bytes + odl_used);
2881 		dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp -
2882 		    (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp);
2883 		duncomp = csa->cds->ds_phys->ds_uncompressed_bytes +
2884 		    cdl_uncomp -
2885 		    (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp);
2886 
2887 		dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD,
2888 		    dused, dcomp, duncomp, tx);
2889 		dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD,
2890 		    -dused, -dcomp, -duncomp, tx);
2891 
2892 		/*
2893 		 * The difference in the space used by snapshots is the
2894 		 * difference in snapshot space due to the head's
2895 		 * deadlist (since that's the only thing that's
2896 		 * changing that affects the snapused).
2897 		 */
2898 		VERIFY(0 == bplist_space_birthrange(&csa->cds->ds_deadlist,
2899 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, &cdl_used));
2900 		VERIFY(0 == bplist_space_birthrange(&csa->ohds->ds_deadlist,
2901 		    csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, &odl_used));
2902 		dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used,
2903 		    DD_USED_HEAD, DD_USED_SNAP, tx);
2904 	}
2905 
2906 #define	SWITCH64(x, y) \
2907 	{ \
2908 		uint64_t __tmp = (x); \
2909 		(x) = (y); \
2910 		(y) = __tmp; \
2911 	}
2912 
2913 	/* swap ds_*_bytes */
2914 	SWITCH64(csa->ohds->ds_phys->ds_used_bytes,
2915 	    csa->cds->ds_phys->ds_used_bytes);
2916 	SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes,
2917 	    csa->cds->ds_phys->ds_compressed_bytes);
2918 	SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes,
2919 	    csa->cds->ds_phys->ds_uncompressed_bytes);
2920 	SWITCH64(csa->ohds->ds_phys->ds_unique_bytes,
2921 	    csa->cds->ds_phys->ds_unique_bytes);
2922 
2923 	/* apply any parent delta for change in unconsumed refreservation */
2924 	dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV,
2925 	    csa->unused_refres_delta, 0, 0, tx);
2926 
2927 	/* swap deadlists */
2928 	bplist_close(&csa->cds->ds_deadlist);
2929 	bplist_close(&csa->ohds->ds_deadlist);
2930 	SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj,
2931 	    csa->cds->ds_phys->ds_deadlist_obj);
2932 	VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset,
2933 	    csa->cds->ds_phys->ds_deadlist_obj));
2934 	VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset,
2935 	    csa->ohds->ds_phys->ds_deadlist_obj));
2936 
2937 	dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx);
2938 }
2939 
2940 /*
2941  * Swap 'clone' with its origin head datasets.  Used at the end of "zfs
2942  * recv" into an existing fs to swizzle the file system to the new
2943  * version, and by "zfs rollback".  Can also be used to swap two
2944  * independent head datasets if neither has any snapshots.
2945  */
2946 int
2947 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head,
2948     boolean_t force)
2949 {
2950 	struct cloneswaparg csa;
2951 	int error;
2952 
2953 	ASSERT(clone->ds_owner);
2954 	ASSERT(origin_head->ds_owner);
2955 retry:
2956 	/* Need exclusive access for the swap */
2957 	rw_enter(&clone->ds_rwlock, RW_WRITER);
2958 	if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) {
2959 		rw_exit(&clone->ds_rwlock);
2960 		rw_enter(&origin_head->ds_rwlock, RW_WRITER);
2961 		if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) {
2962 			rw_exit(&origin_head->ds_rwlock);
2963 			goto retry;
2964 		}
2965 	}
2966 	csa.cds = clone;
2967 	csa.ohds = origin_head;
2968 	csa.force = force;
2969 	error = dsl_sync_task_do(clone->ds_dir->dd_pool,
2970 	    dsl_dataset_clone_swap_check,
2971 	    dsl_dataset_clone_swap_sync, &csa, NULL, 9);
2972 	return (error);
2973 }
2974 
2975 /*
2976  * Given a pool name and a dataset object number in that pool,
2977  * return the name of that dataset.
2978  */
2979 int
2980 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
2981 {
2982 	spa_t *spa;
2983 	dsl_pool_t *dp;
2984 	dsl_dataset_t *ds;
2985 	int error;
2986 
2987 	if ((error = spa_open(pname, &spa, FTAG)) != 0)
2988 		return (error);
2989 	dp = spa_get_dsl(spa);
2990 	rw_enter(&dp->dp_config_rwlock, RW_READER);
2991 	if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) {
2992 		dsl_dataset_name(ds, buf);
2993 		dsl_dataset_rele(ds, FTAG);
2994 	}
2995 	rw_exit(&dp->dp_config_rwlock);
2996 	spa_close(spa, FTAG);
2997 
2998 	return (error);
2999 }
3000 
3001 int
3002 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
3003     uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
3004 {
3005 	int error = 0;
3006 
3007 	ASSERT3S(asize, >, 0);
3008 
3009 	/*
3010 	 * *ref_rsrv is the portion of asize that will come from any
3011 	 * unconsumed refreservation space.
3012 	 */
3013 	*ref_rsrv = 0;
3014 
3015 	mutex_enter(&ds->ds_lock);
3016 	/*
3017 	 * Make a space adjustment for reserved bytes.
3018 	 */
3019 	if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) {
3020 		ASSERT3U(*used, >=,
3021 		    ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3022 		*used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes);
3023 		*ref_rsrv =
3024 		    asize - MIN(asize, parent_delta(ds, asize + inflight));
3025 	}
3026 
3027 	if (!check_quota || ds->ds_quota == 0) {
3028 		mutex_exit(&ds->ds_lock);
3029 		return (0);
3030 	}
3031 	/*
3032 	 * If they are requesting more space, and our current estimate
3033 	 * is over quota, they get to try again unless the actual
3034 	 * on-disk is over quota and there are no pending changes (which
3035 	 * may free up space for us).
3036 	 */
3037 	if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) {
3038 		if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota)
3039 			error = ERESTART;
3040 		else
3041 			error = EDQUOT;
3042 	}
3043 	mutex_exit(&ds->ds_lock);
3044 
3045 	return (error);
3046 }
3047 
3048 /* ARGSUSED */
3049 static int
3050 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx)
3051 {
3052 	dsl_dataset_t *ds = arg1;
3053 	dsl_prop_setarg_t *psa = arg2;
3054 	int err;
3055 
3056 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA)
3057 		return (ENOTSUP);
3058 
3059 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3060 		return (err);
3061 
3062 	if (psa->psa_effective_value == 0)
3063 		return (0);
3064 
3065 	if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes ||
3066 	    psa->psa_effective_value < ds->ds_reserved)
3067 		return (ENOSPC);
3068 
3069 	return (0);
3070 }
3071 
3072 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *);
3073 
3074 void
3075 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3076 {
3077 	dsl_dataset_t *ds = arg1;
3078 	dsl_prop_setarg_t *psa = arg2;
3079 	uint64_t effective_value = psa->psa_effective_value;
3080 
3081 	dsl_prop_set_sync(ds, psa, tx);
3082 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3083 
3084 	if (ds->ds_quota != effective_value) {
3085 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3086 		ds->ds_quota = effective_value;
3087 
3088 		spa_history_log_internal(LOG_DS_REFQUOTA,
3089 		    ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ",
3090 		    (longlong_t)ds->ds_quota, ds->ds_object);
3091 	}
3092 }
3093 
3094 int
3095 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota)
3096 {
3097 	dsl_dataset_t *ds;
3098 	dsl_prop_setarg_t psa;
3099 	int err;
3100 
3101 	dsl_prop_setarg_init_uint64(&psa, "refquota", source, &quota);
3102 
3103 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3104 	if (err)
3105 		return (err);
3106 
3107 	/*
3108 	 * If someone removes a file, then tries to set the quota, we
3109 	 * want to make sure the file freeing takes effect.
3110 	 */
3111 	txg_wait_open(ds->ds_dir->dd_pool, 0);
3112 
3113 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3114 	    dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync,
3115 	    ds, &psa, 0);
3116 
3117 	dsl_dataset_rele(ds, FTAG);
3118 	return (err);
3119 }
3120 
3121 static int
3122 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx)
3123 {
3124 	dsl_dataset_t *ds = arg1;
3125 	dsl_prop_setarg_t *psa = arg2;
3126 	uint64_t effective_value;
3127 	uint64_t unique;
3128 	int err;
3129 
3130 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
3131 	    SPA_VERSION_REFRESERVATION)
3132 		return (ENOTSUP);
3133 
3134 	if (dsl_dataset_is_snapshot(ds))
3135 		return (EINVAL);
3136 
3137 	if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0)
3138 		return (err);
3139 
3140 	effective_value = psa->psa_effective_value;
3141 
3142 	/*
3143 	 * If we are doing the preliminary check in open context, the
3144 	 * space estimates may be inaccurate.
3145 	 */
3146 	if (!dmu_tx_is_syncing(tx))
3147 		return (0);
3148 
3149 	mutex_enter(&ds->ds_lock);
3150 	if (!DS_UNIQUE_IS_ACCURATE(ds))
3151 		dsl_dataset_recalc_head_uniq(ds);
3152 	unique = ds->ds_phys->ds_unique_bytes;
3153 	mutex_exit(&ds->ds_lock);
3154 
3155 	if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) {
3156 		uint64_t delta = MAX(unique, effective_value) -
3157 		    MAX(unique, ds->ds_reserved);
3158 
3159 		if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
3160 			return (ENOSPC);
3161 		if (ds->ds_quota > 0 &&
3162 		    effective_value > ds->ds_quota)
3163 			return (ENOSPC);
3164 	}
3165 
3166 	return (0);
3167 }
3168 
3169 static void
3170 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3171 {
3172 	dsl_dataset_t *ds = arg1;
3173 	dsl_prop_setarg_t *psa = arg2;
3174 	uint64_t effective_value = psa->psa_effective_value;
3175 	uint64_t unique;
3176 	int64_t delta;
3177 
3178 	dsl_prop_set_sync(ds, psa, tx);
3179 	DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa);
3180 
3181 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
3182 
3183 	mutex_enter(&ds->ds_dir->dd_lock);
3184 	mutex_enter(&ds->ds_lock);
3185 	ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
3186 	unique = ds->ds_phys->ds_unique_bytes;
3187 	delta = MAX(0, (int64_t)(effective_value - unique)) -
3188 	    MAX(0, (int64_t)(ds->ds_reserved - unique));
3189 	ds->ds_reserved = effective_value;
3190 	mutex_exit(&ds->ds_lock);
3191 
3192 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
3193 	mutex_exit(&ds->ds_dir->dd_lock);
3194 
3195 	spa_history_log_internal(LOG_DS_REFRESERV,
3196 	    ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu",
3197 	    (longlong_t)effective_value, ds->ds_object);
3198 }
3199 
3200 int
3201 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source,
3202     uint64_t reservation)
3203 {
3204 	dsl_dataset_t *ds;
3205 	dsl_prop_setarg_t psa;
3206 	int err;
3207 
3208 	dsl_prop_setarg_init_uint64(&psa, "refreservation", source,
3209 	    &reservation);
3210 
3211 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3212 	if (err)
3213 		return (err);
3214 
3215 	err = dsl_sync_task_do(ds->ds_dir->dd_pool,
3216 	    dsl_dataset_set_reservation_check,
3217 	    dsl_dataset_set_reservation_sync, ds, &psa, 0);
3218 
3219 	dsl_dataset_rele(ds, FTAG);
3220 	return (err);
3221 }
3222 
3223 struct dsl_ds_holdarg {
3224 	dsl_sync_task_group_t *dstg;
3225 	char *htag;
3226 	char *snapname;
3227 	boolean_t recursive;
3228 	boolean_t gotone;
3229 	boolean_t temphold;
3230 	char failed[MAXPATHLEN];
3231 };
3232 
3233 /*
3234  * The max length of a temporary tag prefix is the number of hex digits
3235  * required to express UINT64_MAX plus one for the hyphen.
3236  */
3237 #define	MAX_TAG_PREFIX_LEN	17
3238 
3239 static int
3240 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx)
3241 {
3242 	dsl_dataset_t *ds = arg1;
3243 	struct dsl_ds_holdarg *ha = arg2;
3244 	char *htag = ha->htag;
3245 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3246 	int error = 0;
3247 
3248 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3249 		return (ENOTSUP);
3250 
3251 	if (!dsl_dataset_is_snapshot(ds))
3252 		return (EINVAL);
3253 
3254 	/* tags must be unique */
3255 	mutex_enter(&ds->ds_lock);
3256 	if (ds->ds_phys->ds_userrefs_obj) {
3257 		error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag,
3258 		    8, 1, tx);
3259 		if (error == 0)
3260 			error = EEXIST;
3261 		else if (error == ENOENT)
3262 			error = 0;
3263 	}
3264 	mutex_exit(&ds->ds_lock);
3265 
3266 	if (error == 0 && ha->temphold &&
3267 	    strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
3268 		error = E2BIG;
3269 
3270 	return (error);
3271 }
3272 
3273 static void
3274 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx)
3275 {
3276 	dsl_dataset_t *ds = arg1;
3277 	struct dsl_ds_holdarg *ha = arg2;
3278 	char *htag = ha->htag;
3279 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3280 	objset_t *mos = dp->dp_meta_objset;
3281 	uint64_t now = gethrestime_sec();
3282 	uint64_t zapobj;
3283 
3284 	mutex_enter(&ds->ds_lock);
3285 	if (ds->ds_phys->ds_userrefs_obj == 0) {
3286 		/*
3287 		 * This is the first user hold for this dataset.  Create
3288 		 * the userrefs zap object.
3289 		 */
3290 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3291 		zapobj = ds->ds_phys->ds_userrefs_obj =
3292 		    zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
3293 	} else {
3294 		zapobj = ds->ds_phys->ds_userrefs_obj;
3295 	}
3296 	ds->ds_userrefs++;
3297 	mutex_exit(&ds->ds_lock);
3298 
3299 	VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx));
3300 
3301 	if (ha->temphold) {
3302 		VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object,
3303 		    htag, &now, tx));
3304 	}
3305 
3306 	spa_history_log_internal(LOG_DS_USER_HOLD,
3307 	    dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag,
3308 	    (int)ha->temphold, ds->ds_object);
3309 }
3310 
3311 static int
3312 dsl_dataset_user_hold_one(const char *dsname, void *arg)
3313 {
3314 	struct dsl_ds_holdarg *ha = arg;
3315 	dsl_dataset_t *ds;
3316 	int error;
3317 	char *name;
3318 
3319 	/* alloc a buffer to hold dsname@snapname plus terminating NULL */
3320 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3321 	error = dsl_dataset_hold(name, ha->dstg, &ds);
3322 	strfree(name);
3323 	if (error == 0) {
3324 		ha->gotone = B_TRUE;
3325 		dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check,
3326 		    dsl_dataset_user_hold_sync, ds, ha, 0);
3327 	} else if (error == ENOENT && ha->recursive) {
3328 		error = 0;
3329 	} else {
3330 		(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3331 	}
3332 	return (error);
3333 }
3334 
3335 int
3336 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag,
3337     boolean_t recursive, boolean_t temphold)
3338 {
3339 	struct dsl_ds_holdarg *ha;
3340 	dsl_sync_task_t *dst;
3341 	spa_t *spa;
3342 	int error;
3343 
3344 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3345 
3346 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3347 
3348 	error = spa_open(dsname, &spa, FTAG);
3349 	if (error) {
3350 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3351 		return (error);
3352 	}
3353 
3354 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3355 	ha->htag = htag;
3356 	ha->snapname = snapname;
3357 	ha->recursive = recursive;
3358 	ha->temphold = temphold;
3359 	if (recursive) {
3360 		error = dmu_objset_find(dsname, dsl_dataset_user_hold_one,
3361 		    ha, DS_FIND_CHILDREN);
3362 	} else {
3363 		error = dsl_dataset_user_hold_one(dsname, ha);
3364 	}
3365 	if (error == 0)
3366 		error = dsl_sync_task_group_wait(ha->dstg);
3367 
3368 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3369 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3370 		dsl_dataset_t *ds = dst->dst_arg1;
3371 
3372 		if (dst->dst_err) {
3373 			dsl_dataset_name(ds, ha->failed);
3374 			*strchr(ha->failed, '@') = '\0';
3375 		}
3376 		dsl_dataset_rele(ds, ha->dstg);
3377 	}
3378 
3379 	if (error == 0 && recursive && !ha->gotone)
3380 		error = ENOENT;
3381 
3382 	if (error)
3383 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3384 
3385 	dsl_sync_task_group_destroy(ha->dstg);
3386 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3387 	spa_close(spa, FTAG);
3388 	return (error);
3389 }
3390 
3391 struct dsl_ds_releasearg {
3392 	dsl_dataset_t *ds;
3393 	const char *htag;
3394 	boolean_t own;		/* do we own or just hold ds? */
3395 };
3396 
3397 static int
3398 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag,
3399     boolean_t *might_destroy)
3400 {
3401 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
3402 	uint64_t zapobj;
3403 	uint64_t tmp;
3404 	int error;
3405 
3406 	*might_destroy = B_FALSE;
3407 
3408 	mutex_enter(&ds->ds_lock);
3409 	zapobj = ds->ds_phys->ds_userrefs_obj;
3410 	if (zapobj == 0) {
3411 		/* The tag can't possibly exist */
3412 		mutex_exit(&ds->ds_lock);
3413 		return (ESRCH);
3414 	}
3415 
3416 	/* Make sure the tag exists */
3417 	error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp);
3418 	if (error) {
3419 		mutex_exit(&ds->ds_lock);
3420 		if (error == ENOENT)
3421 			error = ESRCH;
3422 		return (error);
3423 	}
3424 
3425 	if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 &&
3426 	    DS_IS_DEFER_DESTROY(ds))
3427 		*might_destroy = B_TRUE;
3428 
3429 	mutex_exit(&ds->ds_lock);
3430 	return (0);
3431 }
3432 
3433 static int
3434 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx)
3435 {
3436 	struct dsl_ds_releasearg *ra = arg1;
3437 	dsl_dataset_t *ds = ra->ds;
3438 	boolean_t might_destroy;
3439 	int error;
3440 
3441 	if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS)
3442 		return (ENOTSUP);
3443 
3444 	error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy);
3445 	if (error)
3446 		return (error);
3447 
3448 	if (might_destroy) {
3449 		struct dsl_ds_destroyarg dsda = {0};
3450 
3451 		if (dmu_tx_is_syncing(tx)) {
3452 			/*
3453 			 * If we're not prepared to remove the snapshot,
3454 			 * we can't allow the release to happen right now.
3455 			 */
3456 			if (!ra->own)
3457 				return (EBUSY);
3458 		}
3459 		dsda.ds = ds;
3460 		dsda.releasing = B_TRUE;
3461 		return (dsl_dataset_destroy_check(&dsda, tag, tx));
3462 	}
3463 
3464 	return (0);
3465 }
3466 
3467 static void
3468 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx)
3469 {
3470 	struct dsl_ds_releasearg *ra = arg1;
3471 	dsl_dataset_t *ds = ra->ds;
3472 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
3473 	objset_t *mos = dp->dp_meta_objset;
3474 	uint64_t zapobj;
3475 	uint64_t dsobj = ds->ds_object;
3476 	uint64_t refs;
3477 	int error;
3478 
3479 	if (ds->ds_objset) {
3480 		dmu_objset_evict(ds->ds_objset);
3481 		ds->ds_objset = NULL;
3482 	}
3483 
3484 	mutex_enter(&ds->ds_lock);
3485 	ds->ds_userrefs--;
3486 	refs = ds->ds_userrefs;
3487 	mutex_exit(&ds->ds_lock);
3488 	error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx);
3489 	VERIFY(error == 0 || error == ENOENT);
3490 	zapobj = ds->ds_phys->ds_userrefs_obj;
3491 	VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx));
3492 	if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 &&
3493 	    DS_IS_DEFER_DESTROY(ds)) {
3494 		struct dsl_ds_destroyarg dsda = {0};
3495 
3496 		ASSERT(ra->own);
3497 		dsda.ds = ds;
3498 		dsda.releasing = B_TRUE;
3499 		/* We already did the destroy_check */
3500 		dsl_dataset_destroy_sync(&dsda, tag, tx);
3501 	}
3502 
3503 	spa_history_log_internal(LOG_DS_USER_RELEASE,
3504 	    dp->dp_spa, tx, "<%s> %lld dataset = %llu",
3505 	    ra->htag, (longlong_t)refs, dsobj);
3506 }
3507 
3508 static int
3509 dsl_dataset_user_release_one(const char *dsname, void *arg)
3510 {
3511 	struct dsl_ds_holdarg *ha = arg;
3512 	struct dsl_ds_releasearg *ra;
3513 	dsl_dataset_t *ds;
3514 	int error;
3515 	void *dtag = ha->dstg;
3516 	char *name;
3517 	boolean_t own = B_FALSE;
3518 	boolean_t might_destroy;
3519 
3520 	/* alloc a buffer to hold dsname@snapname, plus the terminating NULL */
3521 	name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3522 	error = dsl_dataset_hold(name, dtag, &ds);
3523 	strfree(name);
3524 	if (error == ENOENT && ha->recursive)
3525 		return (0);
3526 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3527 	if (error)
3528 		return (error);
3529 
3530 	ha->gotone = B_TRUE;
3531 
3532 	ASSERT(dsl_dataset_is_snapshot(ds));
3533 
3534 	error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy);
3535 	if (error) {
3536 		dsl_dataset_rele(ds, dtag);
3537 		return (error);
3538 	}
3539 
3540 	if (might_destroy) {
3541 #ifdef _KERNEL
3542 		name = kmem_asprintf("%s@%s", dsname, ha->snapname);
3543 		error = zfs_unmount_snap(name, NULL);
3544 		strfree(name);
3545 		if (error) {
3546 			dsl_dataset_rele(ds, dtag);
3547 			return (error);
3548 		}
3549 #endif
3550 		if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) {
3551 			dsl_dataset_rele(ds, dtag);
3552 			return (EBUSY);
3553 		} else {
3554 			own = B_TRUE;
3555 			dsl_dataset_make_exclusive(ds, dtag);
3556 		}
3557 	}
3558 
3559 	ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP);
3560 	ra->ds = ds;
3561 	ra->htag = ha->htag;
3562 	ra->own = own;
3563 	dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check,
3564 	    dsl_dataset_user_release_sync, ra, dtag, 0);
3565 
3566 	return (0);
3567 }
3568 
3569 int
3570 dsl_dataset_user_release(char *dsname, char *snapname, char *htag,
3571     boolean_t recursive)
3572 {
3573 	struct dsl_ds_holdarg *ha;
3574 	dsl_sync_task_t *dst;
3575 	spa_t *spa;
3576 	int error;
3577 
3578 top:
3579 	ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP);
3580 
3581 	(void) strlcpy(ha->failed, dsname, sizeof (ha->failed));
3582 
3583 	error = spa_open(dsname, &spa, FTAG);
3584 	if (error) {
3585 		kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3586 		return (error);
3587 	}
3588 
3589 	ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
3590 	ha->htag = htag;
3591 	ha->snapname = snapname;
3592 	ha->recursive = recursive;
3593 	if (recursive) {
3594 		error = dmu_objset_find(dsname, dsl_dataset_user_release_one,
3595 		    ha, DS_FIND_CHILDREN);
3596 	} else {
3597 		error = dsl_dataset_user_release_one(dsname, ha);
3598 	}
3599 	if (error == 0)
3600 		error = dsl_sync_task_group_wait(ha->dstg);
3601 
3602 	for (dst = list_head(&ha->dstg->dstg_tasks); dst;
3603 	    dst = list_next(&ha->dstg->dstg_tasks, dst)) {
3604 		struct dsl_ds_releasearg *ra = dst->dst_arg1;
3605 		dsl_dataset_t *ds = ra->ds;
3606 
3607 		if (dst->dst_err)
3608 			dsl_dataset_name(ds, ha->failed);
3609 
3610 		if (ra->own)
3611 			dsl_dataset_disown(ds, ha->dstg);
3612 		else
3613 			dsl_dataset_rele(ds, ha->dstg);
3614 
3615 		kmem_free(ra, sizeof (struct dsl_ds_releasearg));
3616 	}
3617 
3618 	if (error == 0 && recursive && !ha->gotone)
3619 		error = ENOENT;
3620 
3621 	if (error && error != EBUSY)
3622 		(void) strlcpy(dsname, ha->failed, sizeof (ha->failed));
3623 
3624 	dsl_sync_task_group_destroy(ha->dstg);
3625 	kmem_free(ha, sizeof (struct dsl_ds_holdarg));
3626 	spa_close(spa, FTAG);
3627 
3628 	/*
3629 	 * We can get EBUSY if we were racing with deferred destroy and
3630 	 * dsl_dataset_user_release_check() hadn't done the necessary
3631 	 * open context setup.  We can also get EBUSY if we're racing
3632 	 * with destroy and that thread is the ds_owner.  Either way
3633 	 * the busy condition should be transient, and we should retry
3634 	 * the release operation.
3635 	 */
3636 	if (error == EBUSY)
3637 		goto top;
3638 
3639 	return (error);
3640 }
3641 
3642 /*
3643  * Called at spa_load time to release a stale temporary user hold.
3644  */
3645 int
3646 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag)
3647 {
3648 	dsl_dataset_t *ds;
3649 	char *snap;
3650 	char *name;
3651 	int namelen;
3652 	int error;
3653 
3654 	rw_enter(&dp->dp_config_rwlock, RW_READER);
3655 	error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
3656 	rw_exit(&dp->dp_config_rwlock);
3657 	if (error)
3658 		return (error);
3659 	namelen = dsl_dataset_namelen(ds)+1;
3660 	name = kmem_alloc(namelen, KM_SLEEP);
3661 	dsl_dataset_name(ds, name);
3662 	dsl_dataset_rele(ds, FTAG);
3663 
3664 	snap = strchr(name, '@');
3665 	*snap = '\0';
3666 	++snap;
3667 	return (dsl_dataset_user_release(name, snap, htag, B_FALSE));
3668 }
3669 
3670 int
3671 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp)
3672 {
3673 	dsl_dataset_t *ds;
3674 	int err;
3675 
3676 	err = dsl_dataset_hold(dsname, FTAG, &ds);
3677 	if (err)
3678 		return (err);
3679 
3680 	VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
3681 	if (ds->ds_phys->ds_userrefs_obj != 0) {
3682 		zap_attribute_t *za;
3683 		zap_cursor_t zc;
3684 
3685 		za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
3686 		for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
3687 		    ds->ds_phys->ds_userrefs_obj);
3688 		    zap_cursor_retrieve(&zc, za) == 0;
3689 		    zap_cursor_advance(&zc)) {
3690 			VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name,
3691 			    za->za_first_integer));
3692 		}
3693 		zap_cursor_fini(&zc);
3694 		kmem_free(za, sizeof (zap_attribute_t));
3695 	}
3696 	dsl_dataset_rele(ds, FTAG);
3697 	return (0);
3698 }
3699 
3700 /*
3701  * Note, this fuction is used as the callback for dmu_objset_find().  We
3702  * always return 0 so that we will continue to find and process
3703  * inconsistent datasets, even if we encounter an error trying to
3704  * process one of them.
3705  */
3706 /* ARGSUSED */
3707 int
3708 dsl_destroy_inconsistent(const char *dsname, void *arg)
3709 {
3710 	dsl_dataset_t *ds;
3711 
3712 	if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) {
3713 		if (DS_IS_INCONSISTENT(ds))
3714 			(void) dsl_dataset_destroy(ds, FTAG, B_FALSE);
3715 		else
3716 			dsl_dataset_disown(ds, FTAG);
3717 	}
3718 	return (0);
3719 }
3720