xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_destroy.c (revision 1fa2a66491e7d8ae0be84e7da4da8e812480c710)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25  * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26  * Copyright (c) 2014 Integros [integros.com]
27  */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_destroy.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_scan.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/zap.h>
41 #include <sys/zfeature.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/dsl_deleg.h>
44 #include <sys/dmu_impl.h>
45 #include <sys/zcp.h>
46 
47 int
48 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
49 {
50 	if (!ds->ds_is_snapshot)
51 		return (SET_ERROR(EINVAL));
52 
53 	if (dsl_dataset_long_held(ds))
54 		return (SET_ERROR(EBUSY));
55 
56 	/*
57 	 * Only allow deferred destroy on pools that support it.
58 	 * NOTE: deferred destroy is only supported on snapshots.
59 	 */
60 	if (defer) {
61 		if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
62 		    SPA_VERSION_USERREFS)
63 			return (SET_ERROR(ENOTSUP));
64 		return (0);
65 	}
66 
67 	/*
68 	 * If this snapshot has an elevated user reference count,
69 	 * we can't destroy it yet.
70 	 */
71 	if (ds->ds_userrefs > 0)
72 		return (SET_ERROR(EBUSY));
73 
74 	/*
75 	 * Can't delete a branch point.
76 	 */
77 	if (dsl_dataset_phys(ds)->ds_num_children > 1)
78 		return (SET_ERROR(EEXIST));
79 
80 	return (0);
81 }
82 
83 int
84 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
85 {
86 	dsl_destroy_snapshot_arg_t *ddsa = arg;
87 	const char *dsname = ddsa->ddsa_name;
88 	boolean_t defer = ddsa->ddsa_defer;
89 
90 	dsl_pool_t *dp = dmu_tx_pool(tx);
91 	int error = 0;
92 	dsl_dataset_t *ds;
93 
94 	error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
95 
96 	/*
97 	 * If the snapshot does not exist, silently ignore it, and
98 	 * dsl_destroy_snapshot_sync() will be a no-op
99 	 * (it's "already destroyed").
100 	 */
101 	if (error == ENOENT)
102 		return (0);
103 
104 	if (error == 0) {
105 		error = dsl_destroy_snapshot_check_impl(ds, defer);
106 		dsl_dataset_rele(ds, FTAG);
107 	}
108 
109 	return (error);
110 }
111 
112 struct process_old_arg {
113 	dsl_dataset_t *ds;
114 	dsl_dataset_t *ds_prev;
115 	boolean_t after_branch_point;
116 	zio_t *pio;
117 	uint64_t used, comp, uncomp;
118 };
119 
120 static int
121 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
122 {
123 	struct process_old_arg *poa = arg;
124 	dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
125 
126 	ASSERT(!BP_IS_HOLE(bp));
127 
128 	if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
129 		dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
130 		if (poa->ds_prev && !poa->after_branch_point &&
131 		    bp->blk_birth >
132 		    dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
133 			dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
134 			    bp_get_dsize_sync(dp->dp_spa, bp);
135 		}
136 	} else {
137 		poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
138 		poa->comp += BP_GET_PSIZE(bp);
139 		poa->uncomp += BP_GET_UCSIZE(bp);
140 		dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
141 	}
142 	return (0);
143 }
144 
145 static void
146 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
147     dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
148 {
149 	struct process_old_arg poa = { 0 };
150 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
151 	objset_t *mos = dp->dp_meta_objset;
152 	uint64_t deadlist_obj;
153 
154 	ASSERT(ds->ds_deadlist.dl_oldfmt);
155 	ASSERT(ds_next->ds_deadlist.dl_oldfmt);
156 
157 	poa.ds = ds;
158 	poa.ds_prev = ds_prev;
159 	poa.after_branch_point = after_branch_point;
160 	poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
161 	VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
162 	    process_old_cb, &poa, tx));
163 	VERIFY0(zio_wait(poa.pio));
164 	ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
165 
166 	/* change snapused */
167 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
168 	    -poa.used, -poa.comp, -poa.uncomp, tx);
169 
170 	/* swap next's deadlist to our deadlist */
171 	dsl_deadlist_close(&ds->ds_deadlist);
172 	dsl_deadlist_close(&ds_next->ds_deadlist);
173 	deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
174 	dsl_dataset_phys(ds)->ds_deadlist_obj =
175 	    dsl_dataset_phys(ds_next)->ds_deadlist_obj;
176 	dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
177 	dsl_deadlist_open(&ds->ds_deadlist, mos,
178 	    dsl_dataset_phys(ds)->ds_deadlist_obj);
179 	dsl_deadlist_open(&ds_next->ds_deadlist, mos,
180 	    dsl_dataset_phys(ds_next)->ds_deadlist_obj);
181 }
182 
183 static void
184 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
185 {
186 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
187 	zap_cursor_t zc;
188 	zap_attribute_t za;
189 
190 	/*
191 	 * If it is the old version, dd_clones doesn't exist so we can't
192 	 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
193 	 * doesn't matter.
194 	 */
195 	if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
196 		return;
197 
198 	for (zap_cursor_init(&zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
199 	    zap_cursor_retrieve(&zc, &za) == 0;
200 	    zap_cursor_advance(&zc)) {
201 		dsl_dataset_t *clone;
202 
203 		VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
204 		    za.za_first_integer, FTAG, &clone));
205 		if (clone->ds_dir->dd_origin_txg > mintxg) {
206 			dsl_deadlist_remove_key(&clone->ds_deadlist,
207 			    mintxg, tx);
208 			if (dsl_dataset_remap_deadlist_exists(clone)) {
209 				dsl_deadlist_remove_key(
210 				    &clone->ds_remap_deadlist, mintxg, tx);
211 			}
212 			dsl_dataset_remove_clones_key(clone, mintxg, tx);
213 		}
214 		dsl_dataset_rele(clone, FTAG);
215 	}
216 	zap_cursor_fini(&zc);
217 }
218 
219 static void
220 dsl_destroy_snapshot_handle_remaps(dsl_dataset_t *ds, dsl_dataset_t *ds_next,
221     dmu_tx_t *tx)
222 {
223 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
224 
225 	/* Move blocks to be obsoleted to pool's obsolete list. */
226 	if (dsl_dataset_remap_deadlist_exists(ds_next)) {
227 		if (!bpobj_is_open(&dp->dp_obsolete_bpobj))
228 			dsl_pool_create_obsolete_bpobj(dp, tx);
229 
230 		dsl_deadlist_move_bpobj(&ds_next->ds_remap_deadlist,
231 		    &dp->dp_obsolete_bpobj,
232 		    dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
233 	}
234 
235 	/* Merge our deadlist into next's and free it. */
236 	if (dsl_dataset_remap_deadlist_exists(ds)) {
237 		uint64_t remap_deadlist_object =
238 		    dsl_dataset_get_remap_deadlist_object(ds);
239 		ASSERT(remap_deadlist_object != 0);
240 
241 		mutex_enter(&ds_next->ds_remap_deadlist_lock);
242 		if (!dsl_dataset_remap_deadlist_exists(ds_next))
243 			dsl_dataset_create_remap_deadlist(ds_next, tx);
244 		mutex_exit(&ds_next->ds_remap_deadlist_lock);
245 
246 		dsl_deadlist_merge(&ds_next->ds_remap_deadlist,
247 		    remap_deadlist_object, tx);
248 		dsl_dataset_destroy_remap_deadlist(ds, tx);
249 	}
250 }
251 
252 void
253 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
254 {
255 	int err;
256 	int after_branch_point = FALSE;
257 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
258 	objset_t *mos = dp->dp_meta_objset;
259 	dsl_dataset_t *ds_prev = NULL;
260 	uint64_t obj;
261 
262 	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
263 	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
264 	ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
265 	rrw_exit(&ds->ds_bp_rwlock, FTAG);
266 	ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
267 
268 	if (defer &&
269 	    (ds->ds_userrefs > 0 ||
270 	    dsl_dataset_phys(ds)->ds_num_children > 1)) {
271 		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
272 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
273 		dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
274 		spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
275 		return;
276 	}
277 
278 	ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
279 
280 	/* We need to log before removing it from the namespace. */
281 	spa_history_log_internal_ds(ds, "destroy", tx, "");
282 
283 	dsl_scan_ds_destroyed(ds, tx);
284 
285 	obj = ds->ds_object;
286 
287 	for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
288 		if (ds->ds_feature_inuse[f]) {
289 			dsl_dataset_deactivate_feature(obj, f, tx);
290 			ds->ds_feature_inuse[f] = B_FALSE;
291 		}
292 	}
293 	if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
294 		ASSERT3P(ds->ds_prev, ==, NULL);
295 		VERIFY0(dsl_dataset_hold_obj(dp,
296 		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
297 		after_branch_point =
298 		    (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
299 
300 		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
301 		if (after_branch_point &&
302 		    dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
303 			dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
304 			if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
305 				VERIFY0(zap_add_int(mos,
306 				    dsl_dataset_phys(ds_prev)->
307 				    ds_next_clones_obj,
308 				    dsl_dataset_phys(ds)->ds_next_snap_obj,
309 				    tx));
310 			}
311 		}
312 		if (!after_branch_point) {
313 			dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
314 			    dsl_dataset_phys(ds)->ds_next_snap_obj;
315 		}
316 	}
317 
318 	dsl_dataset_t *ds_next;
319 	uint64_t old_unique;
320 	uint64_t used = 0, comp = 0, uncomp = 0;
321 
322 	VERIFY0(dsl_dataset_hold_obj(dp,
323 	    dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
324 	ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
325 
326 	old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
327 
328 	dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
329 	dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
330 	    dsl_dataset_phys(ds)->ds_prev_snap_obj;
331 	dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
332 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
333 	ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
334 	    ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
335 
336 	if (ds_next->ds_deadlist.dl_oldfmt) {
337 		process_old_deadlist(ds, ds_prev, ds_next,
338 		    after_branch_point, tx);
339 	} else {
340 		/* Adjust prev's unique space. */
341 		if (ds_prev && !after_branch_point) {
342 			dsl_deadlist_space_range(&ds_next->ds_deadlist,
343 			    dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
344 			    dsl_dataset_phys(ds)->ds_prev_snap_txg,
345 			    &used, &comp, &uncomp);
346 			dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
347 		}
348 
349 		/* Adjust snapused. */
350 		dsl_deadlist_space_range(&ds_next->ds_deadlist,
351 		    dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
352 		    &used, &comp, &uncomp);
353 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
354 		    -used, -comp, -uncomp, tx);
355 
356 		/* Move blocks to be freed to pool's free list. */
357 		dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
358 		    &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
359 		    tx);
360 		dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
361 		    DD_USED_HEAD, used, comp, uncomp, tx);
362 
363 		/* Merge our deadlist into next's and free it. */
364 		dsl_deadlist_merge(&ds_next->ds_deadlist,
365 		    dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
366 	}
367 
368 	dsl_deadlist_close(&ds->ds_deadlist);
369 	dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
370 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
371 	dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
372 
373 	dsl_destroy_snapshot_handle_remaps(ds, ds_next, tx);
374 
375 	/* Collapse range in clone heads */
376 	dsl_dataset_remove_clones_key(ds,
377 	    dsl_dataset_phys(ds)->ds_creation_txg, tx);
378 
379 	if (ds_next->ds_is_snapshot) {
380 		dsl_dataset_t *ds_nextnext;
381 
382 		/*
383 		 * Update next's unique to include blocks which
384 		 * were previously shared by only this snapshot
385 		 * and it.  Those blocks will be born after the
386 		 * prev snap and before this snap, and will have
387 		 * died after the next snap and before the one
388 		 * after that (ie. be on the snap after next's
389 		 * deadlist).
390 		 */
391 		VERIFY0(dsl_dataset_hold_obj(dp,
392 		    dsl_dataset_phys(ds_next)->ds_next_snap_obj,
393 		    FTAG, &ds_nextnext));
394 		dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
395 		    dsl_dataset_phys(ds)->ds_prev_snap_txg,
396 		    dsl_dataset_phys(ds)->ds_creation_txg,
397 		    &used, &comp, &uncomp);
398 		dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
399 		dsl_dataset_rele(ds_nextnext, FTAG);
400 		ASSERT3P(ds_next->ds_prev, ==, NULL);
401 
402 		/* Collapse range in this head. */
403 		dsl_dataset_t *hds;
404 		VERIFY0(dsl_dataset_hold_obj(dp,
405 		    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
406 		dsl_deadlist_remove_key(&hds->ds_deadlist,
407 		    dsl_dataset_phys(ds)->ds_creation_txg, tx);
408 		if (dsl_dataset_remap_deadlist_exists(hds)) {
409 			dsl_deadlist_remove_key(&hds->ds_remap_deadlist,
410 			    dsl_dataset_phys(ds)->ds_creation_txg, tx);
411 		}
412 		dsl_dataset_rele(hds, FTAG);
413 
414 	} else {
415 		ASSERT3P(ds_next->ds_prev, ==, ds);
416 		dsl_dataset_rele(ds_next->ds_prev, ds_next);
417 		ds_next->ds_prev = NULL;
418 		if (ds_prev) {
419 			VERIFY0(dsl_dataset_hold_obj(dp,
420 			    dsl_dataset_phys(ds)->ds_prev_snap_obj,
421 			    ds_next, &ds_next->ds_prev));
422 		}
423 
424 		dsl_dataset_recalc_head_uniq(ds_next);
425 
426 		/*
427 		 * Reduce the amount of our unconsumed refreservation
428 		 * being charged to our parent by the amount of
429 		 * new unique data we have gained.
430 		 */
431 		if (old_unique < ds_next->ds_reserved) {
432 			int64_t mrsdelta;
433 			uint64_t new_unique =
434 			    dsl_dataset_phys(ds_next)->ds_unique_bytes;
435 
436 			ASSERT(old_unique <= new_unique);
437 			mrsdelta = MIN(new_unique - old_unique,
438 			    ds_next->ds_reserved - old_unique);
439 			dsl_dir_diduse_space(ds->ds_dir,
440 			    DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
441 		}
442 	}
443 	dsl_dataset_rele(ds_next, FTAG);
444 
445 	/*
446 	 * This must be done after the dsl_traverse(), because it will
447 	 * re-open the objset.
448 	 */
449 	if (ds->ds_objset) {
450 		dmu_objset_evict(ds->ds_objset);
451 		ds->ds_objset = NULL;
452 	}
453 
454 	/* remove from snapshot namespace */
455 	dsl_dataset_t *ds_head;
456 	ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
457 	VERIFY0(dsl_dataset_hold_obj(dp,
458 	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
459 	VERIFY0(dsl_dataset_get_snapname(ds));
460 #ifdef ZFS_DEBUG
461 	{
462 		uint64_t val;
463 
464 		err = dsl_dataset_snap_lookup(ds_head,
465 		    ds->ds_snapname, &val);
466 		ASSERT0(err);
467 		ASSERT3U(val, ==, obj);
468 	}
469 #endif
470 	VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
471 	dsl_dataset_rele(ds_head, FTAG);
472 
473 	if (ds_prev != NULL)
474 		dsl_dataset_rele(ds_prev, FTAG);
475 
476 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
477 
478 	if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
479 		uint64_t count;
480 		ASSERT0(zap_count(mos,
481 		    dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
482 		    count == 0);
483 		VERIFY0(dmu_object_free(mos,
484 		    dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
485 	}
486 	if (dsl_dataset_phys(ds)->ds_props_obj != 0)
487 		VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
488 		    tx));
489 	if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
490 		VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
491 		    tx));
492 	dsl_dir_rele(ds->ds_dir, ds);
493 	ds->ds_dir = NULL;
494 	dmu_object_free_zapified(mos, obj, tx);
495 }
496 
497 void
498 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
499 {
500 	dsl_destroy_snapshot_arg_t *ddsa = arg;
501 	const char *dsname = ddsa->ddsa_name;
502 	boolean_t defer = ddsa->ddsa_defer;
503 
504 	dsl_pool_t *dp = dmu_tx_pool(tx);
505 	dsl_dataset_t *ds;
506 
507 	int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
508 	if (error == ENOENT)
509 		return;
510 	ASSERT0(error);
511 	dsl_destroy_snapshot_sync_impl(ds, defer, tx);
512 	dsl_dataset_rele(ds, FTAG);
513 }
514 
515 /*
516  * The semantics of this function are described in the comment above
517  * lzc_destroy_snaps().  To summarize:
518  *
519  * The snapshots must all be in the same pool.
520  *
521  * Snapshots that don't exist will be silently ignored (considered to be
522  * "already deleted").
523  *
524  * On success, all snaps will be destroyed and this will return 0.
525  * On failure, no snaps will be destroyed, the errlist will be filled in,
526  * and this will return an errno.
527  */
528 int
529 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
530     nvlist_t *errlist)
531 {
532 	if (nvlist_next_nvpair(snaps, NULL) == NULL)
533 		return (0);
534 
535 	/*
536 	 * lzc_destroy_snaps() is documented to take an nvlist whose
537 	 * values "don't matter".  We need to convert that nvlist to
538 	 * one that we know can be converted to LUA. We also don't
539 	 * care about any duplicate entries because the nvlist will
540 	 * be converted to a LUA table which should take care of this.
541 	 */
542 	nvlist_t *snaps_normalized;
543 	VERIFY0(nvlist_alloc(&snaps_normalized, 0, KM_SLEEP));
544 	for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
545 	    pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
546 		fnvlist_add_boolean_value(snaps_normalized,
547 		    nvpair_name(pair), B_TRUE);
548 	}
549 
550 	nvlist_t *arg;
551 	VERIFY0(nvlist_alloc(&arg, 0, KM_SLEEP));
552 	fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
553 	fnvlist_free(snaps_normalized);
554 	fnvlist_add_boolean_value(arg, "defer", defer);
555 
556 	nvlist_t *wrapper;
557 	VERIFY0(nvlist_alloc(&wrapper, 0, KM_SLEEP));
558 	fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
559 	fnvlist_free(arg);
560 
561 	const char *program =
562 	    "arg = ...\n"
563 	    "snaps = arg['snaps']\n"
564 	    "defer = arg['defer']\n"
565 	    "errors = { }\n"
566 	    "has_errors = false\n"
567 	    "for snap, v in pairs(snaps) do\n"
568 	    "    errno = zfs.check.destroy{snap, defer=defer}\n"
569 	    "    zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
570 	    "    if errno == ENOENT then\n"
571 	    "        snaps[snap] = nil\n"
572 	    "    elseif errno ~= 0 then\n"
573 	    "        errors[snap] = errno\n"
574 	    "        has_errors = true\n"
575 	    "    end\n"
576 	    "end\n"
577 	    "if has_errors then\n"
578 	    "    return errors\n"
579 	    "end\n"
580 	    "for snap, v in pairs(snaps) do\n"
581 	    "    errno = zfs.sync.destroy{snap, defer=defer}\n"
582 	    "    assert(errno == 0)\n"
583 	    "end\n"
584 	    "return { }\n";
585 
586 	nvlist_t *result = fnvlist_alloc();
587 	int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
588 	    program,
589 	    B_TRUE,
590 	    0,
591 	    zfs_lua_max_memlimit,
592 	    nvlist_next_nvpair(wrapper, NULL), result);
593 	if (error != 0) {
594 		char *errorstr = NULL;
595 		(void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
596 		if (errorstr != NULL) {
597 			zfs_dbgmsg(errorstr);
598 		}
599 		return (error);
600 	}
601 	fnvlist_free(wrapper);
602 
603 	/*
604 	 * lzc_destroy_snaps() is documented to fill the errlist with
605 	 * int32 values, so we need to covert the int64 values that are
606 	 * returned from LUA.
607 	 */
608 	int rv = 0;
609 	nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
610 	for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
611 	    pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
612 		int32_t val = (int32_t)fnvpair_value_int64(pair);
613 		if (rv == 0)
614 			rv = val;
615 		fnvlist_add_int32(errlist, nvpair_name(pair), val);
616 	}
617 	fnvlist_free(result);
618 	return (rv);
619 }
620 
621 int
622 dsl_destroy_snapshot(const char *name, boolean_t defer)
623 {
624 	int error;
625 	nvlist_t *nvl = fnvlist_alloc();
626 	nvlist_t *errlist = fnvlist_alloc();
627 
628 	fnvlist_add_boolean(nvl, name);
629 	error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
630 	fnvlist_free(errlist);
631 	fnvlist_free(nvl);
632 	return (error);
633 }
634 
635 struct killarg {
636 	dsl_dataset_t *ds;
637 	dmu_tx_t *tx;
638 };
639 
640 /* ARGSUSED */
641 static int
642 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
643     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
644 {
645 	struct killarg *ka = arg;
646 	dmu_tx_t *tx = ka->tx;
647 
648 	if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
649 		return (0);
650 
651 	if (zb->zb_level == ZB_ZIL_LEVEL) {
652 		ASSERT(zilog != NULL);
653 		/*
654 		 * It's a block in the intent log.  It has no
655 		 * accounting, so just free it.
656 		 */
657 		dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
658 	} else {
659 		ASSERT(zilog == NULL);
660 		ASSERT3U(bp->blk_birth, >,
661 		    dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
662 		(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
663 	}
664 
665 	return (0);
666 }
667 
668 static void
669 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
670 {
671 	struct killarg ka;
672 
673 	/*
674 	 * Free everything that we point to (that's born after
675 	 * the previous snapshot, if we are a clone)
676 	 *
677 	 * NB: this should be very quick, because we already
678 	 * freed all the objects in open context.
679 	 */
680 	ka.ds = ds;
681 	ka.tx = tx;
682 	VERIFY0(traverse_dataset(ds,
683 	    dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST |
684 	    TRAVERSE_NO_DECRYPT, kill_blkptr, &ka));
685 	ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
686 	    dsl_dataset_phys(ds)->ds_unique_bytes == 0);
687 }
688 
689 int
690 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
691 {
692 	int error;
693 	uint64_t count;
694 	objset_t *mos;
695 
696 	ASSERT(!ds->ds_is_snapshot);
697 	if (ds->ds_is_snapshot)
698 		return (SET_ERROR(EINVAL));
699 
700 	if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
701 		return (SET_ERROR(EBUSY));
702 
703 	mos = ds->ds_dir->dd_pool->dp_meta_objset;
704 
705 	/*
706 	 * Can't delete a head dataset if there are snapshots of it.
707 	 * (Except if the only snapshots are from the branch we cloned
708 	 * from.)
709 	 */
710 	if (ds->ds_prev != NULL &&
711 	    dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
712 		return (SET_ERROR(EBUSY));
713 
714 	/*
715 	 * Can't delete if there are children of this fs.
716 	 */
717 	error = zap_count(mos,
718 	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
719 	if (error != 0)
720 		return (error);
721 	if (count != 0)
722 		return (SET_ERROR(EEXIST));
723 
724 	if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
725 	    dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
726 	    ds->ds_prev->ds_userrefs == 0) {
727 		/* We need to remove the origin snapshot as well. */
728 		if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
729 			return (SET_ERROR(EBUSY));
730 	}
731 	return (0);
732 }
733 
734 int
735 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
736 {
737 	dsl_destroy_head_arg_t *ddha = arg;
738 	dsl_pool_t *dp = dmu_tx_pool(tx);
739 	dsl_dataset_t *ds;
740 	int error;
741 
742 	error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
743 	if (error != 0)
744 		return (error);
745 
746 	error = dsl_destroy_head_check_impl(ds, 0);
747 	dsl_dataset_rele(ds, FTAG);
748 	return (error);
749 }
750 
751 static void
752 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
753 {
754 	dsl_dir_t *dd;
755 	dsl_pool_t *dp = dmu_tx_pool(tx);
756 	objset_t *mos = dp->dp_meta_objset;
757 	dd_used_t t;
758 
759 	ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
760 
761 	VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
762 
763 	ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
764 
765 	/*
766 	 * Decrement the filesystem count for all parent filesystems.
767 	 *
768 	 * When we receive an incremental stream into a filesystem that already
769 	 * exists, a temporary clone is created.  We never count this temporary
770 	 * clone, whose name begins with a '%'.
771 	 */
772 	if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
773 		dsl_fs_ss_count_adjust(dd->dd_parent, -1,
774 		    DD_FIELD_FILESYSTEM_COUNT, tx);
775 
776 	/*
777 	 * Remove our reservation. The impl() routine avoids setting the
778 	 * actual property, which would require the (already destroyed) ds.
779 	 */
780 	dsl_dir_set_reservation_sync_impl(dd, 0, tx);
781 
782 	ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
783 	ASSERT0(dsl_dir_phys(dd)->dd_reserved);
784 	for (t = 0; t < DD_USED_NUM; t++)
785 		ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
786 
787 	if (dd->dd_crypto_obj != 0) {
788 		dsl_crypto_key_destroy_sync(dd->dd_crypto_obj, tx);
789 		(void) spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
790 	}
791 
792 	VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
793 	VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
794 	if (dsl_dir_phys(dd)->dd_clones != 0)
795 		VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_clones, tx));
796 	VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
797 	VERIFY0(zap_remove(mos,
798 	    dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
799 	    dd->dd_myname, tx));
800 
801 	dsl_dir_rele(dd, FTAG);
802 	dmu_object_free_zapified(mos, ddobj, tx);
803 }
804 
805 void
806 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
807 {
808 	dsl_pool_t *dp = dmu_tx_pool(tx);
809 	objset_t *mos = dp->dp_meta_objset;
810 	uint64_t obj, ddobj, prevobj = 0;
811 	boolean_t rmorigin;
812 
813 	ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
814 	ASSERT(ds->ds_prev == NULL ||
815 	    dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
816 	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
817 	ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
818 	rrw_exit(&ds->ds_bp_rwlock, FTAG);
819 	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
820 
821 	/* We need to log before removing it from the namespace. */
822 	spa_history_log_internal_ds(ds, "destroy", tx, "");
823 
824 	rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
825 	    DS_IS_DEFER_DESTROY(ds->ds_prev) &&
826 	    dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
827 	    ds->ds_prev->ds_userrefs == 0);
828 
829 	/* Remove our reservation. */
830 	if (ds->ds_reserved != 0) {
831 		dsl_dataset_set_refreservation_sync_impl(ds,
832 		    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
833 		    0, tx);
834 		ASSERT0(ds->ds_reserved);
835 	}
836 
837 	obj = ds->ds_object;
838 
839 	for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
840 		if (ds->ds_feature_inuse[f]) {
841 			dsl_dataset_deactivate_feature(obj, f, tx);
842 			ds->ds_feature_inuse[f] = B_FALSE;
843 		}
844 	}
845 
846 	dsl_scan_ds_destroyed(ds, tx);
847 
848 	if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
849 		/* This is a clone */
850 		ASSERT(ds->ds_prev != NULL);
851 		ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
852 		    obj);
853 		ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
854 
855 		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
856 		if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
857 			dsl_dataset_remove_from_next_clones(ds->ds_prev,
858 			    obj, tx);
859 		}
860 
861 		ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
862 		dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
863 	}
864 
865 	/*
866 	 * Destroy the deadlist.  Unless it's a clone, the
867 	 * deadlist should be empty since the dataset has no snapshots.
868 	 * (If it's a clone, it's safe to ignore the deadlist contents
869 	 * since they are still referenced by the origin snapshot.)
870 	 */
871 	dsl_deadlist_close(&ds->ds_deadlist);
872 	dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
873 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
874 	dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
875 
876 	if (dsl_dataset_remap_deadlist_exists(ds))
877 		dsl_dataset_destroy_remap_deadlist(ds, tx);
878 
879 	objset_t *os;
880 	VERIFY0(dmu_objset_from_ds(ds, &os));
881 
882 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
883 		old_synchronous_dataset_destroy(ds, tx);
884 	} else {
885 		/*
886 		 * Move the bptree into the pool's list of trees to
887 		 * clean up and update space accounting information.
888 		 */
889 		uint64_t used, comp, uncomp;
890 
891 		zil_destroy_sync(dmu_objset_zil(os), tx);
892 
893 		if (!spa_feature_is_active(dp->dp_spa,
894 		    SPA_FEATURE_ASYNC_DESTROY)) {
895 			dsl_scan_t *scn = dp->dp_scan;
896 			spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
897 			    tx);
898 			dp->dp_bptree_obj = bptree_alloc(mos, tx);
899 			VERIFY0(zap_add(mos,
900 			    DMU_POOL_DIRECTORY_OBJECT,
901 			    DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
902 			    &dp->dp_bptree_obj, tx));
903 			ASSERT(!scn->scn_async_destroying);
904 			scn->scn_async_destroying = B_TRUE;
905 		}
906 
907 		used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
908 		comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
909 		uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
910 
911 		ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
912 		    dsl_dataset_phys(ds)->ds_unique_bytes == used);
913 
914 		rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
915 		bptree_add(mos, dp->dp_bptree_obj,
916 		    &dsl_dataset_phys(ds)->ds_bp,
917 		    dsl_dataset_phys(ds)->ds_prev_snap_txg,
918 		    used, comp, uncomp, tx);
919 		rrw_exit(&ds->ds_bp_rwlock, FTAG);
920 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
921 		    -used, -comp, -uncomp, tx);
922 		dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
923 		    used, comp, uncomp, tx);
924 	}
925 
926 	if (ds->ds_prev != NULL) {
927 		if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
928 			VERIFY0(zap_remove_int(mos,
929 			    dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
930 			    ds->ds_object, tx));
931 		}
932 		prevobj = ds->ds_prev->ds_object;
933 		dsl_dataset_rele(ds->ds_prev, ds);
934 		ds->ds_prev = NULL;
935 	}
936 
937 	/*
938 	 * This must be done after the dsl_traverse(), because it will
939 	 * re-open the objset.
940 	 */
941 	if (ds->ds_objset) {
942 		dmu_objset_evict(ds->ds_objset);
943 		ds->ds_objset = NULL;
944 	}
945 
946 	/* Erase the link in the dir */
947 	dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
948 	dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
949 	ddobj = ds->ds_dir->dd_object;
950 	ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
951 	VERIFY0(zap_destroy(mos,
952 	    dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
953 
954 	if (ds->ds_bookmarks != 0) {
955 		VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
956 		spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
957 	}
958 
959 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
960 
961 	ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
962 	ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
963 	ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
964 	dsl_dir_rele(ds->ds_dir, ds);
965 	ds->ds_dir = NULL;
966 	dmu_object_free_zapified(mos, obj, tx);
967 
968 	dsl_dir_destroy_sync(ddobj, tx);
969 
970 	if (rmorigin) {
971 		dsl_dataset_t *prev;
972 		VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
973 		dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
974 		dsl_dataset_rele(prev, FTAG);
975 	}
976 }
977 
978 void
979 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
980 {
981 	dsl_destroy_head_arg_t *ddha = arg;
982 	dsl_pool_t *dp = dmu_tx_pool(tx);
983 	dsl_dataset_t *ds;
984 
985 	VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
986 	dsl_destroy_head_sync_impl(ds, tx);
987 	dsl_dataset_rele(ds, FTAG);
988 }
989 
990 static void
991 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
992 {
993 	dsl_destroy_head_arg_t *ddha = arg;
994 	dsl_pool_t *dp = dmu_tx_pool(tx);
995 	dsl_dataset_t *ds;
996 
997 	VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
998 
999 	/* Mark it as inconsistent on-disk, in case we crash */
1000 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1001 	dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1002 
1003 	spa_history_log_internal_ds(ds, "destroy begin", tx, "");
1004 	dsl_dataset_rele(ds, FTAG);
1005 }
1006 
1007 int
1008 dsl_destroy_head(const char *name)
1009 {
1010 	dsl_destroy_head_arg_t ddha;
1011 	int error;
1012 	spa_t *spa;
1013 	boolean_t isenabled;
1014 
1015 #ifdef _KERNEL
1016 	zfs_destroy_unmount_origin(name);
1017 #endif
1018 
1019 	error = spa_open(name, &spa, FTAG);
1020 	if (error != 0)
1021 		return (error);
1022 	isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
1023 	spa_close(spa, FTAG);
1024 
1025 	ddha.ddha_name = name;
1026 
1027 	if (!isenabled) {
1028 		objset_t *os;
1029 
1030 		error = dsl_sync_task(name, dsl_destroy_head_check,
1031 		    dsl_destroy_head_begin_sync, &ddha,
1032 		    0, ZFS_SPACE_CHECK_DESTROY);
1033 		if (error != 0)
1034 			return (error);
1035 
1036 		/*
1037 		 * Head deletion is processed in one txg on old pools;
1038 		 * remove the objects from open context so that the txg sync
1039 		 * is not too long.
1040 		 */
1041 		error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_FALSE,
1042 		    FTAG, &os);
1043 		if (error == 0) {
1044 			uint64_t prev_snap_txg =
1045 			    dsl_dataset_phys(dmu_objset_ds(os))->
1046 			    ds_prev_snap_txg;
1047 			for (uint64_t obj = 0; error == 0;
1048 			    error = dmu_object_next(os, &obj, FALSE,
1049 			    prev_snap_txg))
1050 				(void) dmu_free_long_object(os, obj);
1051 			/* sync out all frees */
1052 			txg_wait_synced(dmu_objset_pool(os), 0);
1053 			dmu_objset_disown(os, B_FALSE, FTAG);
1054 		}
1055 	}
1056 
1057 	return (dsl_sync_task(name, dsl_destroy_head_check,
1058 	    dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_DESTROY));
1059 }
1060 
1061 /*
1062  * Note, this function is used as the callback for dmu_objset_find().  We
1063  * always return 0 so that we will continue to find and process
1064  * inconsistent datasets, even if we encounter an error trying to
1065  * process one of them.
1066  */
1067 /* ARGSUSED */
1068 int
1069 dsl_destroy_inconsistent(const char *dsname, void *arg)
1070 {
1071 	objset_t *os;
1072 
1073 	if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
1074 		boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
1075 
1076 		/*
1077 		 * If the dataset is inconsistent because a resumable receive
1078 		 * has failed, then do not destroy it.
1079 		 */
1080 		if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
1081 			need_destroy = B_FALSE;
1082 
1083 		dmu_objset_rele(os, FTAG);
1084 		if (need_destroy)
1085 			(void) dsl_destroy_head(dsname);
1086 	}
1087 	return (0);
1088 }
1089