xref: /illumos-gate/usr/src/uts/common/fs/zfs/dsl_destroy.c (revision a71e11eee4676204c7609c4c9703cc98fbf4669d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
24  * Copyright (c) 2013 Steven Hartland. All rights reserved.
25  * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
26  * Copyright (c) 2014 Integros [integros.com]
27  */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/dsl_userhold.h>
31 #include <sys/dsl_dataset.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/dsl_destroy.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/dsl_dir.h>
37 #include <sys/dmu_traverse.h>
38 #include <sys/dsl_scan.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/zap.h>
41 #include <sys/zfeature.h>
42 #include <sys/zfs_ioctl.h>
43 #include <sys/dsl_deleg.h>
44 #include <sys/dmu_impl.h>
45 #include <sys/zcp.h>
46 
47 int
48 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
49 {
50 	if (!ds->ds_is_snapshot)
51 		return (SET_ERROR(EINVAL));
52 
53 	if (dsl_dataset_long_held(ds))
54 		return (SET_ERROR(EBUSY));
55 
56 	/*
57 	 * Only allow deferred destroy on pools that support it.
58 	 * NOTE: deferred destroy is only supported on snapshots.
59 	 */
60 	if (defer) {
61 		if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
62 		    SPA_VERSION_USERREFS)
63 			return (SET_ERROR(ENOTSUP));
64 		return (0);
65 	}
66 
67 	/*
68 	 * If this snapshot has an elevated user reference count,
69 	 * we can't destroy it yet.
70 	 */
71 	if (ds->ds_userrefs > 0)
72 		return (SET_ERROR(EBUSY));
73 
74 	/*
75 	 * Can't delete a branch point.
76 	 */
77 	if (dsl_dataset_phys(ds)->ds_num_children > 1)
78 		return (SET_ERROR(EEXIST));
79 
80 	return (0);
81 }
82 
83 int
84 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
85 {
86 	dsl_destroy_snapshot_arg_t *ddsa = arg;
87 	const char *dsname = ddsa->ddsa_name;
88 	boolean_t defer = ddsa->ddsa_defer;
89 
90 	dsl_pool_t *dp = dmu_tx_pool(tx);
91 	int error = 0;
92 	dsl_dataset_t *ds;
93 
94 	error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
95 
96 	/*
97 	 * If the snapshot does not exist, silently ignore it, and
98 	 * dsl_destroy_snapshot_sync() will be a no-op
99 	 * (it's "already destroyed").
100 	 */
101 	if (error == ENOENT)
102 		return (0);
103 
104 	if (error == 0) {
105 		error = dsl_destroy_snapshot_check_impl(ds, defer);
106 		dsl_dataset_rele(ds, FTAG);
107 	}
108 
109 	return (error);
110 }
111 
112 struct process_old_arg {
113 	dsl_dataset_t *ds;
114 	dsl_dataset_t *ds_prev;
115 	boolean_t after_branch_point;
116 	zio_t *pio;
117 	uint64_t used, comp, uncomp;
118 };
119 
120 static int
121 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
122 {
123 	struct process_old_arg *poa = arg;
124 	dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
125 
126 	ASSERT(!BP_IS_HOLE(bp));
127 
128 	if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
129 		dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
130 		if (poa->ds_prev && !poa->after_branch_point &&
131 		    bp->blk_birth >
132 		    dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
133 			dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
134 			    bp_get_dsize_sync(dp->dp_spa, bp);
135 		}
136 	} else {
137 		poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
138 		poa->comp += BP_GET_PSIZE(bp);
139 		poa->uncomp += BP_GET_UCSIZE(bp);
140 		dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
141 	}
142 	return (0);
143 }
144 
145 static void
146 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
147     dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
148 {
149 	struct process_old_arg poa = { 0 };
150 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
151 	objset_t *mos = dp->dp_meta_objset;
152 	uint64_t deadlist_obj;
153 
154 	ASSERT(ds->ds_deadlist.dl_oldfmt);
155 	ASSERT(ds_next->ds_deadlist.dl_oldfmt);
156 
157 	poa.ds = ds;
158 	poa.ds_prev = ds_prev;
159 	poa.after_branch_point = after_branch_point;
160 	poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
161 	VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
162 	    process_old_cb, &poa, tx));
163 	VERIFY0(zio_wait(poa.pio));
164 	ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
165 
166 	/* change snapused */
167 	dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
168 	    -poa.used, -poa.comp, -poa.uncomp, tx);
169 
170 	/* swap next's deadlist to our deadlist */
171 	dsl_deadlist_close(&ds->ds_deadlist);
172 	dsl_deadlist_close(&ds_next->ds_deadlist);
173 	deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
174 	dsl_dataset_phys(ds)->ds_deadlist_obj =
175 	    dsl_dataset_phys(ds_next)->ds_deadlist_obj;
176 	dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
177 	dsl_deadlist_open(&ds->ds_deadlist, mos,
178 	    dsl_dataset_phys(ds)->ds_deadlist_obj);
179 	dsl_deadlist_open(&ds_next->ds_deadlist, mos,
180 	    dsl_dataset_phys(ds_next)->ds_deadlist_obj);
181 }
182 
183 static void
184 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
185 {
186 	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
187 	zap_cursor_t zc;
188 	zap_attribute_t za;
189 
190 	/*
191 	 * If it is the old version, dd_clones doesn't exist so we can't
192 	 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
193 	 * doesn't matter.
194 	 */
195 	if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
196 		return;
197 
198 	for (zap_cursor_init(&zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
199 	    zap_cursor_retrieve(&zc, &za) == 0;
200 	    zap_cursor_advance(&zc)) {
201 		dsl_dataset_t *clone;
202 
203 		VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
204 		    za.za_first_integer, FTAG, &clone));
205 		if (clone->ds_dir->dd_origin_txg > mintxg) {
206 			dsl_deadlist_remove_key(&clone->ds_deadlist,
207 			    mintxg, tx);
208 			dsl_dataset_remove_clones_key(clone, mintxg, tx);
209 		}
210 		dsl_dataset_rele(clone, FTAG);
211 	}
212 	zap_cursor_fini(&zc);
213 }
214 
215 void
216 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
217 {
218 	int err;
219 	int after_branch_point = FALSE;
220 	dsl_pool_t *dp = ds->ds_dir->dd_pool;
221 	objset_t *mos = dp->dp_meta_objset;
222 	dsl_dataset_t *ds_prev = NULL;
223 	uint64_t obj;
224 
225 	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
226 	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
227 	ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
228 	rrw_exit(&ds->ds_bp_rwlock, FTAG);
229 	ASSERT(refcount_is_zero(&ds->ds_longholds));
230 
231 	if (defer &&
232 	    (ds->ds_userrefs > 0 ||
233 	    dsl_dataset_phys(ds)->ds_num_children > 1)) {
234 		ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
235 		dmu_buf_will_dirty(ds->ds_dbuf, tx);
236 		dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
237 		spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
238 		return;
239 	}
240 
241 	ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
242 
243 	/* We need to log before removing it from the namespace. */
244 	spa_history_log_internal_ds(ds, "destroy", tx, "");
245 
246 	dsl_scan_ds_destroyed(ds, tx);
247 
248 	obj = ds->ds_object;
249 
250 	for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
251 		if (ds->ds_feature_inuse[f]) {
252 			dsl_dataset_deactivate_feature(obj, f, tx);
253 			ds->ds_feature_inuse[f] = B_FALSE;
254 		}
255 	}
256 	if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
257 		ASSERT3P(ds->ds_prev, ==, NULL);
258 		VERIFY0(dsl_dataset_hold_obj(dp,
259 		    dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
260 		after_branch_point =
261 		    (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
262 
263 		dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
264 		if (after_branch_point &&
265 		    dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
266 			dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
267 			if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
268 				VERIFY0(zap_add_int(mos,
269 				    dsl_dataset_phys(ds_prev)->
270 				    ds_next_clones_obj,
271 				    dsl_dataset_phys(ds)->ds_next_snap_obj,
272 				    tx));
273 			}
274 		}
275 		if (!after_branch_point) {
276 			dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
277 			    dsl_dataset_phys(ds)->ds_next_snap_obj;
278 		}
279 	}
280 
281 	dsl_dataset_t *ds_next;
282 	uint64_t old_unique;
283 	uint64_t used = 0, comp = 0, uncomp = 0;
284 
285 	VERIFY0(dsl_dataset_hold_obj(dp,
286 	    dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
287 	ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
288 
289 	old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
290 
291 	dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
292 	dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
293 	    dsl_dataset_phys(ds)->ds_prev_snap_obj;
294 	dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
295 	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
296 	ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
297 	    ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
298 
299 	if (ds_next->ds_deadlist.dl_oldfmt) {
300 		process_old_deadlist(ds, ds_prev, ds_next,
301 		    after_branch_point, tx);
302 	} else {
303 		/* Adjust prev's unique space. */
304 		if (ds_prev && !after_branch_point) {
305 			dsl_deadlist_space_range(&ds_next->ds_deadlist,
306 			    dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
307 			    dsl_dataset_phys(ds)->ds_prev_snap_txg,
308 			    &used, &comp, &uncomp);
309 			dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
310 		}
311 
312 		/* Adjust snapused. */
313 		dsl_deadlist_space_range(&ds_next->ds_deadlist,
314 		    dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
315 		    &used, &comp, &uncomp);
316 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
317 		    -used, -comp, -uncomp, tx);
318 
319 		/* Move blocks to be freed to pool's free list. */
320 		dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
321 		    &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
322 		    tx);
323 		dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
324 		    DD_USED_HEAD, used, comp, uncomp, tx);
325 
326 		/* Merge our deadlist into next's and free it. */
327 		dsl_deadlist_merge(&ds_next->ds_deadlist,
328 		    dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
329 	}
330 	dsl_deadlist_close(&ds->ds_deadlist);
331 	dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
332 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
333 	dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
334 
335 	/* Collapse range in clone heads */
336 	dsl_dataset_remove_clones_key(ds,
337 	    dsl_dataset_phys(ds)->ds_creation_txg, tx);
338 
339 	if (ds_next->ds_is_snapshot) {
340 		dsl_dataset_t *ds_nextnext;
341 
342 		/*
343 		 * Update next's unique to include blocks which
344 		 * were previously shared by only this snapshot
345 		 * and it.  Those blocks will be born after the
346 		 * prev snap and before this snap, and will have
347 		 * died after the next snap and before the one
348 		 * after that (ie. be on the snap after next's
349 		 * deadlist).
350 		 */
351 		VERIFY0(dsl_dataset_hold_obj(dp,
352 		    dsl_dataset_phys(ds_next)->ds_next_snap_obj,
353 		    FTAG, &ds_nextnext));
354 		dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
355 		    dsl_dataset_phys(ds)->ds_prev_snap_txg,
356 		    dsl_dataset_phys(ds)->ds_creation_txg,
357 		    &used, &comp, &uncomp);
358 		dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
359 		dsl_dataset_rele(ds_nextnext, FTAG);
360 		ASSERT3P(ds_next->ds_prev, ==, NULL);
361 
362 		/* Collapse range in this head. */
363 		dsl_dataset_t *hds;
364 		VERIFY0(dsl_dataset_hold_obj(dp,
365 		    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
366 		dsl_deadlist_remove_key(&hds->ds_deadlist,
367 		    dsl_dataset_phys(ds)->ds_creation_txg, tx);
368 		dsl_dataset_rele(hds, FTAG);
369 
370 	} else {
371 		ASSERT3P(ds_next->ds_prev, ==, ds);
372 		dsl_dataset_rele(ds_next->ds_prev, ds_next);
373 		ds_next->ds_prev = NULL;
374 		if (ds_prev) {
375 			VERIFY0(dsl_dataset_hold_obj(dp,
376 			    dsl_dataset_phys(ds)->ds_prev_snap_obj,
377 			    ds_next, &ds_next->ds_prev));
378 		}
379 
380 		dsl_dataset_recalc_head_uniq(ds_next);
381 
382 		/*
383 		 * Reduce the amount of our unconsumed refreservation
384 		 * being charged to our parent by the amount of
385 		 * new unique data we have gained.
386 		 */
387 		if (old_unique < ds_next->ds_reserved) {
388 			int64_t mrsdelta;
389 			uint64_t new_unique =
390 			    dsl_dataset_phys(ds_next)->ds_unique_bytes;
391 
392 			ASSERT(old_unique <= new_unique);
393 			mrsdelta = MIN(new_unique - old_unique,
394 			    ds_next->ds_reserved - old_unique);
395 			dsl_dir_diduse_space(ds->ds_dir,
396 			    DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
397 		}
398 	}
399 	dsl_dataset_rele(ds_next, FTAG);
400 
401 	/*
402 	 * This must be done after the dsl_traverse(), because it will
403 	 * re-open the objset.
404 	 */
405 	if (ds->ds_objset) {
406 		dmu_objset_evict(ds->ds_objset);
407 		ds->ds_objset = NULL;
408 	}
409 
410 	/* remove from snapshot namespace */
411 	dsl_dataset_t *ds_head;
412 	ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
413 	VERIFY0(dsl_dataset_hold_obj(dp,
414 	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
415 	VERIFY0(dsl_dataset_get_snapname(ds));
416 #ifdef ZFS_DEBUG
417 	{
418 		uint64_t val;
419 
420 		err = dsl_dataset_snap_lookup(ds_head,
421 		    ds->ds_snapname, &val);
422 		ASSERT0(err);
423 		ASSERT3U(val, ==, obj);
424 	}
425 #endif
426 	VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
427 	dsl_dataset_rele(ds_head, FTAG);
428 
429 	if (ds_prev != NULL)
430 		dsl_dataset_rele(ds_prev, FTAG);
431 
432 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
433 
434 	if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
435 		uint64_t count;
436 		ASSERT0(zap_count(mos,
437 		    dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
438 		    count == 0);
439 		VERIFY0(dmu_object_free(mos,
440 		    dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
441 	}
442 	if (dsl_dataset_phys(ds)->ds_props_obj != 0)
443 		VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
444 		    tx));
445 	if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
446 		VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
447 		    tx));
448 	dsl_dir_rele(ds->ds_dir, ds);
449 	ds->ds_dir = NULL;
450 	dmu_object_free_zapified(mos, obj, tx);
451 }
452 
453 void
454 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
455 {
456 	dsl_destroy_snapshot_arg_t *ddsa = arg;
457 	const char *dsname = ddsa->ddsa_name;
458 	boolean_t defer = ddsa->ddsa_defer;
459 
460 	dsl_pool_t *dp = dmu_tx_pool(tx);
461 	dsl_dataset_t *ds;
462 
463 	int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
464 	if (error == ENOENT)
465 		return;
466 	ASSERT0(error);
467 	dsl_destroy_snapshot_sync_impl(ds, defer, tx);
468 	dsl_dataset_rele(ds, FTAG);
469 }
470 
471 /*
472  * The semantics of this function are described in the comment above
473  * lzc_destroy_snaps().  To summarize:
474  *
475  * The snapshots must all be in the same pool.
476  *
477  * Snapshots that don't exist will be silently ignored (considered to be
478  * "already deleted").
479  *
480  * On success, all snaps will be destroyed and this will return 0.
481  * On failure, no snaps will be destroyed, the errlist will be filled in,
482  * and this will return an errno.
483  */
484 int
485 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
486     nvlist_t *errlist)
487 {
488 	if (nvlist_next_nvpair(snaps, NULL) == NULL)
489 		return (0);
490 
491 	/*
492 	 * lzc_destroy_snaps() is documented to take an nvlist whose
493 	 * values "don't matter".  We need to convert that nvlist to
494 	 * one that we know can be converted to LUA. We also don't
495 	 * care about any duplicate entries because the nvlist will
496 	 * be converted to a LUA table which should take care of this.
497 	 */
498 	nvlist_t *snaps_normalized;
499 	VERIFY0(nvlist_alloc(&snaps_normalized, 0, KM_SLEEP));
500 	for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
501 	    pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
502 		fnvlist_add_boolean_value(snaps_normalized,
503 		    nvpair_name(pair), B_TRUE);
504 	}
505 
506 	nvlist_t *arg;
507 	VERIFY0(nvlist_alloc(&arg, 0, KM_SLEEP));
508 	fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
509 	fnvlist_free(snaps_normalized);
510 	fnvlist_add_boolean_value(arg, "defer", defer);
511 
512 	nvlist_t *wrapper;
513 	VERIFY0(nvlist_alloc(&wrapper, 0, KM_SLEEP));
514 	fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
515 	fnvlist_free(arg);
516 
517 	const char *program =
518 	    "arg = ...\n"
519 	    "snaps = arg['snaps']\n"
520 	    "defer = arg['defer']\n"
521 	    "errors = { }\n"
522 	    "has_errors = false\n"
523 	    "for snap, v in pairs(snaps) do\n"
524 	    "    errno = zfs.check.destroy{snap, defer=defer}\n"
525 	    "    zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
526 	    "    if errno == ENOENT then\n"
527 	    "        snaps[snap] = nil\n"
528 	    "    elseif errno ~= 0 then\n"
529 	    "        errors[snap] = errno\n"
530 	    "        has_errors = true\n"
531 	    "    end\n"
532 	    "end\n"
533 	    "if has_errors then\n"
534 	    "    return errors\n"
535 	    "end\n"
536 	    "for snap, v in pairs(snaps) do\n"
537 	    "    errno = zfs.sync.destroy{snap, defer=defer}\n"
538 	    "    assert(errno == 0)\n"
539 	    "end\n"
540 	    "return { }\n";
541 
542 	nvlist_t *result = fnvlist_alloc();
543 	int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
544 	    program,
545 	    B_TRUE,
546 	    0,
547 	    zfs_lua_max_memlimit,
548 	    nvlist_next_nvpair(wrapper, NULL), result);
549 	if (error != 0) {
550 		char *errorstr = NULL;
551 		(void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
552 		if (errorstr != NULL) {
553 			zfs_dbgmsg(errorstr);
554 		}
555 		return (error);
556 	}
557 	fnvlist_free(wrapper);
558 
559 	/*
560 	 * lzc_destroy_snaps() is documented to fill the errlist with
561 	 * int32 values, so we need to covert the int64 values that are
562 	 * returned from LUA.
563 	 */
564 	int rv = 0;
565 	nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
566 	for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
567 	    pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
568 		int32_t val = (int32_t)fnvpair_value_int64(pair);
569 		if (rv == 0)
570 			rv = val;
571 		fnvlist_add_int32(errlist, nvpair_name(pair), val);
572 	}
573 	fnvlist_free(result);
574 	return (rv);
575 }
576 
577 int
578 dsl_destroy_snapshot(const char *name, boolean_t defer)
579 {
580 	int error;
581 	nvlist_t *nvl = fnvlist_alloc();
582 	nvlist_t *errlist = fnvlist_alloc();
583 
584 	fnvlist_add_boolean(nvl, name);
585 	error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
586 	fnvlist_free(errlist);
587 	fnvlist_free(nvl);
588 	return (error);
589 }
590 
591 struct killarg {
592 	dsl_dataset_t *ds;
593 	dmu_tx_t *tx;
594 };
595 
596 /* ARGSUSED */
597 static int
598 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
599     const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
600 {
601 	struct killarg *ka = arg;
602 	dmu_tx_t *tx = ka->tx;
603 
604 	if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
605 		return (0);
606 
607 	if (zb->zb_level == ZB_ZIL_LEVEL) {
608 		ASSERT(zilog != NULL);
609 		/*
610 		 * It's a block in the intent log.  It has no
611 		 * accounting, so just free it.
612 		 */
613 		dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
614 	} else {
615 		ASSERT(zilog == NULL);
616 		ASSERT3U(bp->blk_birth, >,
617 		    dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
618 		(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
619 	}
620 
621 	return (0);
622 }
623 
624 static void
625 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
626 {
627 	struct killarg ka;
628 
629 	/*
630 	 * Free everything that we point to (that's born after
631 	 * the previous snapshot, if we are a clone)
632 	 *
633 	 * NB: this should be very quick, because we already
634 	 * freed all the objects in open context.
635 	 */
636 	ka.ds = ds;
637 	ka.tx = tx;
638 	VERIFY0(traverse_dataset(ds,
639 	    dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST,
640 	    kill_blkptr, &ka));
641 	ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
642 	    dsl_dataset_phys(ds)->ds_unique_bytes == 0);
643 }
644 
645 int
646 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
647 {
648 	int error;
649 	uint64_t count;
650 	objset_t *mos;
651 
652 	ASSERT(!ds->ds_is_snapshot);
653 	if (ds->ds_is_snapshot)
654 		return (SET_ERROR(EINVAL));
655 
656 	if (refcount_count(&ds->ds_longholds) != expected_holds)
657 		return (SET_ERROR(EBUSY));
658 
659 	mos = ds->ds_dir->dd_pool->dp_meta_objset;
660 
661 	/*
662 	 * Can't delete a head dataset if there are snapshots of it.
663 	 * (Except if the only snapshots are from the branch we cloned
664 	 * from.)
665 	 */
666 	if (ds->ds_prev != NULL &&
667 	    dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
668 		return (SET_ERROR(EBUSY));
669 
670 	/*
671 	 * Can't delete if there are children of this fs.
672 	 */
673 	error = zap_count(mos,
674 	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
675 	if (error != 0)
676 		return (error);
677 	if (count != 0)
678 		return (SET_ERROR(EEXIST));
679 
680 	if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
681 	    dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
682 	    ds->ds_prev->ds_userrefs == 0) {
683 		/* We need to remove the origin snapshot as well. */
684 		if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
685 			return (SET_ERROR(EBUSY));
686 	}
687 	return (0);
688 }
689 
690 int
691 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
692 {
693 	dsl_destroy_head_arg_t *ddha = arg;
694 	dsl_pool_t *dp = dmu_tx_pool(tx);
695 	dsl_dataset_t *ds;
696 	int error;
697 
698 	error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
699 	if (error != 0)
700 		return (error);
701 
702 	error = dsl_destroy_head_check_impl(ds, 0);
703 	dsl_dataset_rele(ds, FTAG);
704 	return (error);
705 }
706 
707 static void
708 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
709 {
710 	dsl_dir_t *dd;
711 	dsl_pool_t *dp = dmu_tx_pool(tx);
712 	objset_t *mos = dp->dp_meta_objset;
713 	dd_used_t t;
714 
715 	ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
716 
717 	VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
718 
719 	ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
720 
721 	/*
722 	 * Decrement the filesystem count for all parent filesystems.
723 	 *
724 	 * When we receive an incremental stream into a filesystem that already
725 	 * exists, a temporary clone is created.  We never count this temporary
726 	 * clone, whose name begins with a '%'.
727 	 */
728 	if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
729 		dsl_fs_ss_count_adjust(dd->dd_parent, -1,
730 		    DD_FIELD_FILESYSTEM_COUNT, tx);
731 
732 	/*
733 	 * Remove our reservation. The impl() routine avoids setting the
734 	 * actual property, which would require the (already destroyed) ds.
735 	 */
736 	dsl_dir_set_reservation_sync_impl(dd, 0, tx);
737 
738 	ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
739 	ASSERT0(dsl_dir_phys(dd)->dd_reserved);
740 	for (t = 0; t < DD_USED_NUM; t++)
741 		ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
742 
743 	VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
744 	VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
745 	VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
746 	VERIFY0(zap_remove(mos,
747 	    dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
748 	    dd->dd_myname, tx));
749 
750 	dsl_dir_rele(dd, FTAG);
751 	dmu_object_free_zapified(mos, ddobj, tx);
752 }
753 
754 void
755 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
756 {
757 	dsl_pool_t *dp = dmu_tx_pool(tx);
758 	objset_t *mos = dp->dp_meta_objset;
759 	uint64_t obj, ddobj, prevobj = 0;
760 	boolean_t rmorigin;
761 
762 	ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
763 	ASSERT(ds->ds_prev == NULL ||
764 	    dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
765 	rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
766 	ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
767 	rrw_exit(&ds->ds_bp_rwlock, FTAG);
768 	ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
769 
770 	/* We need to log before removing it from the namespace. */
771 	spa_history_log_internal_ds(ds, "destroy", tx, "");
772 
773 	rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
774 	    DS_IS_DEFER_DESTROY(ds->ds_prev) &&
775 	    dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
776 	    ds->ds_prev->ds_userrefs == 0);
777 
778 	/* Remove our reservation. */
779 	if (ds->ds_reserved != 0) {
780 		dsl_dataset_set_refreservation_sync_impl(ds,
781 		    (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
782 		    0, tx);
783 		ASSERT0(ds->ds_reserved);
784 	}
785 
786 	obj = ds->ds_object;
787 
788 	for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
789 		if (ds->ds_feature_inuse[f]) {
790 			dsl_dataset_deactivate_feature(obj, f, tx);
791 			ds->ds_feature_inuse[f] = B_FALSE;
792 		}
793 	}
794 
795 	dsl_scan_ds_destroyed(ds, tx);
796 
797 	if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
798 		/* This is a clone */
799 		ASSERT(ds->ds_prev != NULL);
800 		ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
801 		    obj);
802 		ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
803 
804 		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
805 		if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
806 			dsl_dataset_remove_from_next_clones(ds->ds_prev,
807 			    obj, tx);
808 		}
809 
810 		ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
811 		dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
812 	}
813 
814 	/*
815 	 * Destroy the deadlist.  Unless it's a clone, the
816 	 * deadlist should be empty.  (If it's a clone, it's
817 	 * safe to ignore the deadlist contents.)
818 	 */
819 	dsl_deadlist_close(&ds->ds_deadlist);
820 	dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
821 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
822 	dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
823 
824 	objset_t *os;
825 	VERIFY0(dmu_objset_from_ds(ds, &os));
826 
827 	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
828 		old_synchronous_dataset_destroy(ds, tx);
829 	} else {
830 		/*
831 		 * Move the bptree into the pool's list of trees to
832 		 * clean up and update space accounting information.
833 		 */
834 		uint64_t used, comp, uncomp;
835 
836 		zil_destroy_sync(dmu_objset_zil(os), tx);
837 
838 		if (!spa_feature_is_active(dp->dp_spa,
839 		    SPA_FEATURE_ASYNC_DESTROY)) {
840 			dsl_scan_t *scn = dp->dp_scan;
841 			spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
842 			    tx);
843 			dp->dp_bptree_obj = bptree_alloc(mos, tx);
844 			VERIFY0(zap_add(mos,
845 			    DMU_POOL_DIRECTORY_OBJECT,
846 			    DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
847 			    &dp->dp_bptree_obj, tx));
848 			ASSERT(!scn->scn_async_destroying);
849 			scn->scn_async_destroying = B_TRUE;
850 		}
851 
852 		used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
853 		comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
854 		uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
855 
856 		ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
857 		    dsl_dataset_phys(ds)->ds_unique_bytes == used);
858 
859 		rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
860 		bptree_add(mos, dp->dp_bptree_obj,
861 		    &dsl_dataset_phys(ds)->ds_bp,
862 		    dsl_dataset_phys(ds)->ds_prev_snap_txg,
863 		    used, comp, uncomp, tx);
864 		rrw_exit(&ds->ds_bp_rwlock, FTAG);
865 		dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
866 		    -used, -comp, -uncomp, tx);
867 		dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
868 		    used, comp, uncomp, tx);
869 	}
870 
871 	if (ds->ds_prev != NULL) {
872 		if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
873 			VERIFY0(zap_remove_int(mos,
874 			    dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
875 			    ds->ds_object, tx));
876 		}
877 		prevobj = ds->ds_prev->ds_object;
878 		dsl_dataset_rele(ds->ds_prev, ds);
879 		ds->ds_prev = NULL;
880 	}
881 
882 	/*
883 	 * This must be done after the dsl_traverse(), because it will
884 	 * re-open the objset.
885 	 */
886 	if (ds->ds_objset) {
887 		dmu_objset_evict(ds->ds_objset);
888 		ds->ds_objset = NULL;
889 	}
890 
891 	/* Erase the link in the dir */
892 	dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
893 	dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
894 	ddobj = ds->ds_dir->dd_object;
895 	ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
896 	VERIFY0(zap_destroy(mos,
897 	    dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
898 
899 	if (ds->ds_bookmarks != 0) {
900 		VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
901 		spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
902 	}
903 
904 	spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
905 
906 	ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
907 	ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
908 	ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
909 	dsl_dir_rele(ds->ds_dir, ds);
910 	ds->ds_dir = NULL;
911 	dmu_object_free_zapified(mos, obj, tx);
912 
913 	dsl_dir_destroy_sync(ddobj, tx);
914 
915 	if (rmorigin) {
916 		dsl_dataset_t *prev;
917 		VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
918 		dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
919 		dsl_dataset_rele(prev, FTAG);
920 	}
921 }
922 
923 void
924 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
925 {
926 	dsl_destroy_head_arg_t *ddha = arg;
927 	dsl_pool_t *dp = dmu_tx_pool(tx);
928 	dsl_dataset_t *ds;
929 
930 	VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
931 	dsl_destroy_head_sync_impl(ds, tx);
932 	dsl_dataset_rele(ds, FTAG);
933 }
934 
935 static void
936 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
937 {
938 	dsl_destroy_head_arg_t *ddha = arg;
939 	dsl_pool_t *dp = dmu_tx_pool(tx);
940 	dsl_dataset_t *ds;
941 
942 	VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
943 
944 	/* Mark it as inconsistent on-disk, in case we crash */
945 	dmu_buf_will_dirty(ds->ds_dbuf, tx);
946 	dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
947 
948 	spa_history_log_internal_ds(ds, "destroy begin", tx, "");
949 	dsl_dataset_rele(ds, FTAG);
950 }
951 
952 int
953 dsl_destroy_head(const char *name)
954 {
955 	dsl_destroy_head_arg_t ddha;
956 	int error;
957 	spa_t *spa;
958 	boolean_t isenabled;
959 
960 #ifdef _KERNEL
961 	zfs_destroy_unmount_origin(name);
962 #endif
963 
964 	error = spa_open(name, &spa, FTAG);
965 	if (error != 0)
966 		return (error);
967 	isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
968 	spa_close(spa, FTAG);
969 
970 	ddha.ddha_name = name;
971 
972 	if (!isenabled) {
973 		objset_t *os;
974 
975 		error = dsl_sync_task(name, dsl_destroy_head_check,
976 		    dsl_destroy_head_begin_sync, &ddha,
977 		    0, ZFS_SPACE_CHECK_NONE);
978 		if (error != 0)
979 			return (error);
980 
981 		/*
982 		 * Head deletion is processed in one txg on old pools;
983 		 * remove the objects from open context so that the txg sync
984 		 * is not too long.
985 		 */
986 		error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, FTAG, &os);
987 		if (error == 0) {
988 			uint64_t prev_snap_txg =
989 			    dsl_dataset_phys(dmu_objset_ds(os))->
990 			    ds_prev_snap_txg;
991 			for (uint64_t obj = 0; error == 0;
992 			    error = dmu_object_next(os, &obj, FALSE,
993 			    prev_snap_txg))
994 				(void) dmu_free_long_object(os, obj);
995 			/* sync out all frees */
996 			txg_wait_synced(dmu_objset_pool(os), 0);
997 			dmu_objset_disown(os, FTAG);
998 		}
999 	}
1000 
1001 	return (dsl_sync_task(name, dsl_destroy_head_check,
1002 	    dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_NONE));
1003 }
1004 
1005 /*
1006  * Note, this function is used as the callback for dmu_objset_find().  We
1007  * always return 0 so that we will continue to find and process
1008  * inconsistent datasets, even if we encounter an error trying to
1009  * process one of them.
1010  */
1011 /* ARGSUSED */
1012 int
1013 dsl_destroy_inconsistent(const char *dsname, void *arg)
1014 {
1015 	objset_t *os;
1016 
1017 	if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
1018 		boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
1019 
1020 		/*
1021 		 * If the dataset is inconsistent because a resumable receive
1022 		 * has failed, then do not destroy it.
1023 		 */
1024 		if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
1025 			need_destroy = B_FALSE;
1026 
1027 		dmu_objset_rele(os, FTAG);
1028 		if (need_destroy)
1029 			(void) dsl_destroy_head(dsname);
1030 	}
1031 	return (0);
1032 }
1033