1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
26 * Copyright (c) 2013 by Joyent, Inc. All rights reserved.
27 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
28 */
29
30 #include <sys/zfs_context.h>
31 #include <sys/dsl_userhold.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_synctask.h>
34 #include <sys/dsl_destroy.h>
35 #include <sys/dsl_bookmark.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/dsl_pool.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_scan.h>
41 #include <sys/dmu_objset.h>
42 #include <sys/zap.h>
43 #include <sys/zfeature.h>
44 #include <sys/zfs_ioctl.h>
45 #include <sys/dsl_deleg.h>
46 #include <sys/dmu_impl.h>
47 #include <sys/zvol.h>
48 #include <sys/zcp.h>
49 #include <sys/dsl_deadlist.h>
50 #include <sys/zthr.h>
51 #include <sys/spa_impl.h>
52
53 extern int zfs_snapshot_history_enabled;
54
55 int
dsl_destroy_snapshot_check_impl(dsl_dataset_t * ds,boolean_t defer)56 dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
57 {
58 if (!ds->ds_is_snapshot)
59 return (SET_ERROR(EINVAL));
60
61 if (dsl_dataset_long_held(ds))
62 return (SET_ERROR(EBUSY));
63
64 /*
65 * Only allow deferred destroy on pools that support it.
66 * NOTE: deferred destroy is only supported on snapshots.
67 */
68 if (defer) {
69 if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
70 SPA_VERSION_USERREFS)
71 return (SET_ERROR(ENOTSUP));
72 return (0);
73 }
74
75 /*
76 * If this snapshot has an elevated user reference count,
77 * we can't destroy it yet.
78 */
79 if (ds->ds_userrefs > 0)
80 return (SET_ERROR(EBUSY));
81
82 /*
83 * Can't delete a branch point.
84 */
85 if (dsl_dataset_phys(ds)->ds_num_children > 1)
86 return (SET_ERROR(EEXIST));
87
88 return (0);
89 }
90
91 int
dsl_destroy_snapshot_check(void * arg,dmu_tx_t * tx)92 dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
93 {
94 dsl_destroy_snapshot_arg_t *ddsa = arg;
95 const char *dsname = ddsa->ddsa_name;
96 boolean_t defer = ddsa->ddsa_defer;
97
98 dsl_pool_t *dp = dmu_tx_pool(tx);
99 int error = 0;
100 dsl_dataset_t *ds;
101
102 error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
103
104 /*
105 * If the snapshot does not exist, silently ignore it, and
106 * dsl_destroy_snapshot_sync() will be a no-op
107 * (it's "already destroyed").
108 */
109 if (error == ENOENT)
110 return (0);
111
112 if (error == 0) {
113 error = dsl_destroy_snapshot_check_impl(ds, defer);
114 dsl_dataset_rele(ds, FTAG);
115 }
116
117 return (error);
118 }
119
120 struct process_old_arg {
121 dsl_dataset_t *ds;
122 dsl_dataset_t *ds_prev;
123 boolean_t after_branch_point;
124 zio_t *pio;
125 uint64_t used, comp, uncomp;
126 };
127
128 static int
process_old_cb(void * arg,const blkptr_t * bp,boolean_t bp_freed,dmu_tx_t * tx)129 process_old_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
130 {
131 struct process_old_arg *poa = arg;
132 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
133
134 ASSERT(!BP_IS_HOLE(bp));
135
136 if (BP_GET_LOGICAL_BIRTH(bp) <=
137 dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
138 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, bp_freed, tx);
139 if (poa->ds_prev && !poa->after_branch_point &&
140 BP_GET_LOGICAL_BIRTH(bp) >
141 dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
142 dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
143 bp_get_dsize_sync(dp->dp_spa, bp);
144 }
145 } else {
146 poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
147 poa->comp += BP_GET_PSIZE(bp);
148 poa->uncomp += BP_GET_UCSIZE(bp);
149 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
150 }
151 return (0);
152 }
153
154 static void
process_old_deadlist(dsl_dataset_t * ds,dsl_dataset_t * ds_prev,dsl_dataset_t * ds_next,boolean_t after_branch_point,dmu_tx_t * tx)155 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
156 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
157 {
158 struct process_old_arg poa = { 0 };
159 dsl_pool_t *dp = ds->ds_dir->dd_pool;
160 objset_t *mos = dp->dp_meta_objset;
161 uint64_t deadlist_obj;
162
163 ASSERT(ds->ds_deadlist.dl_oldfmt);
164 ASSERT(ds_next->ds_deadlist.dl_oldfmt);
165
166 poa.ds = ds;
167 poa.ds_prev = ds_prev;
168 poa.after_branch_point = after_branch_point;
169 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
170 VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
171 process_old_cb, &poa, tx));
172 VERIFY0(zio_wait(poa.pio));
173 ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
174
175 /* change snapused */
176 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
177 -poa.used, -poa.comp, -poa.uncomp, tx);
178
179 /* swap next's deadlist to our deadlist */
180 dsl_deadlist_close(&ds->ds_deadlist);
181 dsl_deadlist_close(&ds_next->ds_deadlist);
182 deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
183 dsl_dataset_phys(ds)->ds_deadlist_obj =
184 dsl_dataset_phys(ds_next)->ds_deadlist_obj;
185 dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
186 VERIFY0(dsl_deadlist_open(&ds->ds_deadlist, mos,
187 dsl_dataset_phys(ds)->ds_deadlist_obj));
188 VERIFY0(dsl_deadlist_open(&ds_next->ds_deadlist, mos,
189 dsl_dataset_phys(ds_next)->ds_deadlist_obj));
190 }
191
192 typedef struct remaining_clones_key {
193 dsl_dataset_t *rck_clone;
194 list_node_t rck_node;
195 } remaining_clones_key_t;
196
197 static remaining_clones_key_t *
rck_alloc(dsl_dataset_t * clone)198 rck_alloc(dsl_dataset_t *clone)
199 {
200 remaining_clones_key_t *rck = kmem_alloc(sizeof (*rck), KM_SLEEP);
201 rck->rck_clone = clone;
202 return (rck);
203 }
204
205 static void
dsl_dir_remove_clones_key_impl(dsl_dir_t * dd,uint64_t mintxg,dmu_tx_t * tx,list_t * stack,const void * tag)206 dsl_dir_remove_clones_key_impl(dsl_dir_t *dd, uint64_t mintxg, dmu_tx_t *tx,
207 list_t *stack, const void *tag)
208 {
209 objset_t *mos = dd->dd_pool->dp_meta_objset;
210
211 /*
212 * If it is the old version, dd_clones doesn't exist so we can't
213 * find the clones, but dsl_deadlist_remove_key() is a no-op so it
214 * doesn't matter.
215 */
216 if (dsl_dir_phys(dd)->dd_clones == 0)
217 return;
218
219 zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
220 zap_attribute_t *za = zap_attribute_alloc();
221
222 for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
223 zap_cursor_retrieve(zc, za) == 0;
224 zap_cursor_advance(zc)) {
225 dsl_dataset_t *clone;
226
227 VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
228 za->za_first_integer, tag, &clone));
229
230 if (clone->ds_dir->dd_origin_txg > mintxg) {
231 dsl_deadlist_remove_key(&clone->ds_deadlist,
232 mintxg, tx);
233
234 if (dsl_dataset_remap_deadlist_exists(clone)) {
235 dsl_deadlist_remove_key(
236 &clone->ds_remap_deadlist, mintxg, tx);
237 }
238
239 list_insert_head(stack, rck_alloc(clone));
240 } else {
241 dsl_dataset_rele(clone, tag);
242 }
243 }
244 zap_cursor_fini(zc);
245
246 zap_attribute_free(za);
247 kmem_free(zc, sizeof (zap_cursor_t));
248 }
249
250 void
dsl_dir_remove_clones_key(dsl_dir_t * top_dd,uint64_t mintxg,dmu_tx_t * tx)251 dsl_dir_remove_clones_key(dsl_dir_t *top_dd, uint64_t mintxg, dmu_tx_t *tx)
252 {
253 list_t stack;
254
255 list_create(&stack, sizeof (remaining_clones_key_t),
256 offsetof(remaining_clones_key_t, rck_node));
257
258 dsl_dir_remove_clones_key_impl(top_dd, mintxg, tx, &stack, FTAG);
259 for (remaining_clones_key_t *rck = list_remove_head(&stack);
260 rck != NULL; rck = list_remove_head(&stack)) {
261 dsl_dataset_t *clone = rck->rck_clone;
262 dsl_dir_t *clone_dir = clone->ds_dir;
263
264 kmem_free(rck, sizeof (*rck));
265
266 dsl_dir_remove_clones_key_impl(clone_dir, mintxg, tx,
267 &stack, FTAG);
268 dsl_dataset_rele(clone, FTAG);
269 }
270
271 list_destroy(&stack);
272 }
273
274 static void
dsl_destroy_snapshot_handle_remaps(dsl_dataset_t * ds,dsl_dataset_t * ds_next,dmu_tx_t * tx)275 dsl_destroy_snapshot_handle_remaps(dsl_dataset_t *ds, dsl_dataset_t *ds_next,
276 dmu_tx_t *tx)
277 {
278 dsl_pool_t *dp = ds->ds_dir->dd_pool;
279
280 /* Move blocks to be obsoleted to pool's obsolete list. */
281 if (dsl_dataset_remap_deadlist_exists(ds_next)) {
282 if (!bpobj_is_open(&dp->dp_obsolete_bpobj))
283 dsl_pool_create_obsolete_bpobj(dp, tx);
284
285 dsl_deadlist_move_bpobj(&ds_next->ds_remap_deadlist,
286 &dp->dp_obsolete_bpobj,
287 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
288 }
289
290 /* Merge our deadlist into next's and free it. */
291 if (dsl_dataset_remap_deadlist_exists(ds)) {
292 uint64_t remap_deadlist_object =
293 dsl_dataset_get_remap_deadlist_object(ds);
294 ASSERT(remap_deadlist_object != 0);
295
296 mutex_enter(&ds_next->ds_remap_deadlist_lock);
297 if (!dsl_dataset_remap_deadlist_exists(ds_next))
298 dsl_dataset_create_remap_deadlist(ds_next, tx);
299 mutex_exit(&ds_next->ds_remap_deadlist_lock);
300
301 dsl_deadlist_merge(&ds_next->ds_remap_deadlist,
302 remap_deadlist_object, tx);
303 dsl_dataset_destroy_remap_deadlist(ds, tx);
304 }
305 }
306
307 void
dsl_destroy_snapshot_sync_impl(dsl_dataset_t * ds,boolean_t defer,dmu_tx_t * tx)308 dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
309 {
310 int after_branch_point = FALSE;
311 dsl_pool_t *dp = ds->ds_dir->dd_pool;
312 objset_t *mos = dp->dp_meta_objset;
313 dsl_dataset_t *ds_prev = NULL;
314 uint64_t obj;
315
316 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
317 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
318 ASSERT3U(BP_GET_LOGICAL_BIRTH(&dsl_dataset_phys(ds)->ds_bp), <=,
319 tx->tx_txg);
320 rrw_exit(&ds->ds_bp_rwlock, FTAG);
321 ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
322
323 if (defer &&
324 (ds->ds_userrefs > 0 ||
325 dsl_dataset_phys(ds)->ds_num_children > 1)) {
326 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
327 dmu_buf_will_dirty(ds->ds_dbuf, tx);
328 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
329 if (zfs_snapshot_history_enabled) {
330 spa_history_log_internal_ds(ds, "defer_destroy", tx,
331 " ");
332 }
333 return;
334 }
335
336 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
337
338 if (zfs_snapshot_history_enabled) {
339 /* We need to log before removing it from the namespace. */
340 spa_history_log_internal_ds(ds, "destroy", tx, " ");
341 }
342
343 dsl_scan_ds_destroyed(ds, tx);
344
345 obj = ds->ds_object;
346
347 boolean_t book_exists = dsl_bookmark_ds_destroyed(ds, tx);
348
349 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
350 if (dsl_dataset_feature_is_active(ds, f))
351 dsl_dataset_deactivate_feature(ds, f, tx);
352 }
353 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
354 ASSERT3P(ds->ds_prev, ==, NULL);
355 VERIFY0(dsl_dataset_hold_obj(dp,
356 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
357 after_branch_point =
358 (dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
359
360 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
361 if (after_branch_point &&
362 dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
363 dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
364 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
365 VERIFY0(zap_add_int(mos,
366 dsl_dataset_phys(ds_prev)->
367 ds_next_clones_obj,
368 dsl_dataset_phys(ds)->ds_next_snap_obj,
369 tx));
370 }
371 }
372 if (!after_branch_point) {
373 dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
374 dsl_dataset_phys(ds)->ds_next_snap_obj;
375 }
376 }
377
378 dsl_dataset_t *ds_next;
379 uint64_t old_unique;
380 uint64_t used = 0, comp = 0, uncomp = 0;
381
382 VERIFY0(dsl_dataset_hold_obj(dp,
383 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
384 ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
385
386 old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
387
388 dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
389 dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
390 dsl_dataset_phys(ds)->ds_prev_snap_obj;
391 dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
392 dsl_dataset_phys(ds)->ds_prev_snap_txg;
393 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
394 ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
395
396 if (ds_next->ds_deadlist.dl_oldfmt) {
397 process_old_deadlist(ds, ds_prev, ds_next,
398 after_branch_point, tx);
399 } else {
400 /* Adjust prev's unique space. */
401 if (ds_prev && !after_branch_point) {
402 dsl_deadlist_space_range(&ds_next->ds_deadlist,
403 dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
404 dsl_dataset_phys(ds)->ds_prev_snap_txg,
405 &used, &comp, &uncomp);
406 dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
407 }
408
409 /* Adjust snapused. */
410 dsl_deadlist_space_range(&ds_next->ds_deadlist,
411 dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
412 &used, &comp, &uncomp);
413 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
414 -used, -comp, -uncomp, tx);
415
416 /* Move blocks to be freed to pool's free list. */
417 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
418 &dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
419 tx);
420 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
421 DD_USED_HEAD, used, comp, uncomp, tx);
422
423 /* Merge our deadlist into next's and free it. */
424 dsl_deadlist_merge(&ds_next->ds_deadlist,
425 dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
426
427 /*
428 * We are done with the deadlist tree (generated/used
429 * by dsl_deadlist_move_bpobj() and dsl_deadlist_merge()).
430 * Discard it to save memory.
431 */
432 dsl_deadlist_discard_tree(&ds_next->ds_deadlist);
433 }
434
435 dsl_deadlist_close(&ds->ds_deadlist);
436 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
437 dmu_buf_will_dirty(ds->ds_dbuf, tx);
438 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
439
440 dsl_destroy_snapshot_handle_remaps(ds, ds_next, tx);
441
442 if (!book_exists) {
443 /* Collapse range in clone heads */
444 dsl_dir_remove_clones_key(ds->ds_dir,
445 dsl_dataset_phys(ds)->ds_creation_txg, tx);
446 }
447
448 if (ds_next->ds_is_snapshot) {
449 dsl_dataset_t *ds_nextnext;
450
451 /*
452 * Update next's unique to include blocks which
453 * were previously shared by only this snapshot
454 * and it. Those blocks will be born after the
455 * prev snap and before this snap, and will have
456 * died after the next snap and before the one
457 * after that (ie. be on the snap after next's
458 * deadlist).
459 */
460 VERIFY0(dsl_dataset_hold_obj(dp,
461 dsl_dataset_phys(ds_next)->ds_next_snap_obj,
462 FTAG, &ds_nextnext));
463 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
464 dsl_dataset_phys(ds)->ds_prev_snap_txg,
465 dsl_dataset_phys(ds)->ds_creation_txg,
466 &used, &comp, &uncomp);
467 dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
468 dsl_dataset_rele(ds_nextnext, FTAG);
469 ASSERT3P(ds_next->ds_prev, ==, NULL);
470
471 /* Collapse range in this head. */
472 dsl_dataset_t *hds;
473 VERIFY0(dsl_dataset_hold_obj(dp,
474 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
475 FTAG, &hds));
476 if (!book_exists) {
477 /* Collapse range in this head. */
478 dsl_deadlist_remove_key(&hds->ds_deadlist,
479 dsl_dataset_phys(ds)->ds_creation_txg, tx);
480 }
481 if (dsl_dataset_remap_deadlist_exists(hds)) {
482 dsl_deadlist_remove_key(&hds->ds_remap_deadlist,
483 dsl_dataset_phys(ds)->ds_creation_txg, tx);
484 }
485 dsl_dataset_rele(hds, FTAG);
486
487 } else {
488 ASSERT3P(ds_next->ds_prev, ==, ds);
489 dsl_dataset_rele(ds_next->ds_prev, ds_next);
490 ds_next->ds_prev = NULL;
491 if (ds_prev) {
492 VERIFY0(dsl_dataset_hold_obj(dp,
493 dsl_dataset_phys(ds)->ds_prev_snap_obj,
494 ds_next, &ds_next->ds_prev));
495 }
496
497 dsl_dataset_recalc_head_uniq(ds_next);
498
499 /*
500 * Reduce the amount of our unconsumed refreservation
501 * being charged to our parent by the amount of
502 * new unique data we have gained.
503 */
504 if (old_unique < ds_next->ds_reserved) {
505 int64_t mrsdelta;
506 uint64_t new_unique =
507 dsl_dataset_phys(ds_next)->ds_unique_bytes;
508
509 ASSERT(old_unique <= new_unique);
510 mrsdelta = MIN(new_unique - old_unique,
511 ds_next->ds_reserved - old_unique);
512 dsl_dir_diduse_space(ds->ds_dir,
513 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
514 }
515 }
516 dsl_dataset_rele(ds_next, FTAG);
517
518 /*
519 * This must be done after the dsl_traverse(), because it will
520 * re-open the objset.
521 */
522 if (ds->ds_objset) {
523 dmu_objset_evict(ds->ds_objset);
524 ds->ds_objset = NULL;
525 }
526
527 /* remove from snapshot namespace */
528 dsl_dataset_t *ds_head;
529 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
530 VERIFY0(dsl_dataset_hold_obj(dp,
531 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
532 VERIFY0(dsl_dataset_get_snapname(ds));
533 #ifdef ZFS_DEBUG
534 {
535 uint64_t val;
536 int err;
537
538 err = dsl_dataset_snap_lookup(ds_head,
539 ds->ds_snapname, &val);
540 ASSERT0(err);
541 ASSERT3U(val, ==, obj);
542 }
543 #endif
544 VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
545 dsl_dataset_rele(ds_head, FTAG);
546
547 if (ds_prev != NULL)
548 dsl_dataset_rele(ds_prev, FTAG);
549
550 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
551
552 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
553 uint64_t count __maybe_unused;
554 ASSERT0(zap_count(mos,
555 dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
556 count == 0);
557 VERIFY0(dmu_object_free(mos,
558 dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
559 }
560 if (dsl_dataset_phys(ds)->ds_props_obj != 0)
561 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
562 tx));
563 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
564 VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
565 tx));
566 dsl_dir_rele(ds->ds_dir, ds);
567 ds->ds_dir = NULL;
568 dmu_object_free_zapified(mos, obj, tx);
569 }
570
571 void
dsl_destroy_snapshot_sync(void * arg,dmu_tx_t * tx)572 dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
573 {
574 dsl_destroy_snapshot_arg_t *ddsa = arg;
575 const char *dsname = ddsa->ddsa_name;
576 boolean_t defer = ddsa->ddsa_defer;
577
578 dsl_pool_t *dp = dmu_tx_pool(tx);
579 dsl_dataset_t *ds;
580
581 int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
582 if (error == ENOENT)
583 return;
584 ASSERT0(error);
585 dsl_destroy_snapshot_sync_impl(ds, defer, tx);
586 zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
587 dsl_dataset_rele(ds, FTAG);
588 }
589
590 /*
591 * The semantics of this function are described in the comment above
592 * lzc_destroy_snaps(). To summarize:
593 *
594 * The snapshots must all be in the same pool.
595 *
596 * Snapshots that don't exist will be silently ignored (considered to be
597 * "already deleted").
598 *
599 * On success, all snaps will be destroyed and this will return 0.
600 * On failure, no snaps will be destroyed, the errlist will be filled in,
601 * and this will return an errno.
602 */
603 int
dsl_destroy_snapshots_nvl(nvlist_t * snaps,boolean_t defer,nvlist_t * errlist)604 dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
605 nvlist_t *errlist)
606 {
607 if (nvlist_next_nvpair(snaps, NULL) == NULL)
608 return (0);
609
610 /*
611 * lzc_destroy_snaps() is documented to take an nvlist whose
612 * values "don't matter". We need to convert that nvlist to
613 * one that we know can be converted to LUA.
614 */
615 nvlist_t *snaps_normalized = fnvlist_alloc();
616 for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
617 pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
618 fnvlist_add_boolean_value(snaps_normalized,
619 nvpair_name(pair), B_TRUE);
620 }
621
622 nvlist_t *arg = fnvlist_alloc();
623 fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
624 fnvlist_free(snaps_normalized);
625 fnvlist_add_boolean_value(arg, "defer", defer);
626
627 nvlist_t *wrapper = fnvlist_alloc();
628 fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
629 fnvlist_free(arg);
630
631 const char *program =
632 "arg = ...\n"
633 "snaps = arg['snaps']\n"
634 "defer = arg['defer']\n"
635 "errors = { }\n"
636 "has_errors = false\n"
637 "for snap, v in pairs(snaps) do\n"
638 " errno = zfs.check.destroy{snap, defer=defer}\n"
639 " zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
640 " if errno == ENOENT then\n"
641 " snaps[snap] = nil\n"
642 " elseif errno ~= 0 then\n"
643 " errors[snap] = errno\n"
644 " has_errors = true\n"
645 " end\n"
646 "end\n"
647 "if has_errors then\n"
648 " return errors\n"
649 "end\n"
650 "for snap, v in pairs(snaps) do\n"
651 " errno = zfs.sync.destroy{snap, defer=defer}\n"
652 " assert(errno == 0)\n"
653 "end\n"
654 "return { }\n";
655
656 nvlist_t *result = fnvlist_alloc();
657 int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
658 program,
659 B_TRUE,
660 0,
661 zfs_lua_max_memlimit,
662 fnvlist_lookup_nvpair(wrapper, ZCP_ARG_ARGLIST), result);
663 if (error != 0) {
664 const char *errorstr = NULL;
665 (void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
666 if (errorstr != NULL) {
667 zfs_dbgmsg("%s", errorstr);
668 }
669 fnvlist_free(wrapper);
670 fnvlist_free(result);
671 return (error);
672 }
673 fnvlist_free(wrapper);
674
675 /*
676 * lzc_destroy_snaps() is documented to fill the errlist with
677 * int32 values, so we need to convert the int64 values that are
678 * returned from LUA.
679 */
680 int rv = 0;
681 nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
682 for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
683 pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
684 int32_t val = (int32_t)fnvpair_value_int64(pair);
685 if (rv == 0)
686 rv = val;
687 fnvlist_add_int32(errlist, nvpair_name(pair), val);
688 }
689 fnvlist_free(result);
690 return (rv);
691 }
692
693 int
dsl_destroy_snapshot(const char * name,boolean_t defer)694 dsl_destroy_snapshot(const char *name, boolean_t defer)
695 {
696 int error;
697 nvlist_t *nvl = fnvlist_alloc();
698 nvlist_t *errlist = fnvlist_alloc();
699
700 fnvlist_add_boolean(nvl, name);
701 error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
702 fnvlist_free(errlist);
703 fnvlist_free(nvl);
704 return (error);
705 }
706
707 struct killarg {
708 dsl_dataset_t *ds;
709 dmu_tx_t *tx;
710 };
711
712 static int
kill_blkptr(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,const zbookmark_phys_t * zb,const dnode_phys_t * dnp,void * arg)713 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
714 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
715 {
716 (void) spa, (void) dnp;
717 struct killarg *ka = arg;
718 dmu_tx_t *tx = ka->tx;
719
720 if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
721 BP_IS_EMBEDDED(bp))
722 return (0);
723
724 if (zb->zb_level == ZB_ZIL_LEVEL) {
725 ASSERT(zilog != NULL);
726 /*
727 * It's a block in the intent log. It has no
728 * accounting, so just free it.
729 */
730 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
731 } else {
732 ASSERT(zilog == NULL);
733 ASSERT3U(BP_GET_LOGICAL_BIRTH(bp), >,
734 dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
735 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
736 }
737
738 return (0);
739 }
740
741 static void
old_synchronous_dataset_destroy(dsl_dataset_t * ds,dmu_tx_t * tx)742 old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
743 {
744 struct killarg ka;
745
746 spa_history_log_internal_ds(ds, "destroy", tx,
747 "(synchronous, mintxg=%llu)",
748 (long long)dsl_dataset_phys(ds)->ds_prev_snap_txg);
749
750 /*
751 * Free everything that we point to (that's born after
752 * the previous snapshot, if we are a clone)
753 *
754 * NB: this should be very quick, because we already
755 * freed all the objects in open context.
756 */
757 ka.ds = ds;
758 ka.tx = tx;
759 VERIFY0(traverse_dataset(ds,
760 dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST |
761 TRAVERSE_NO_DECRYPT, kill_blkptr, &ka));
762 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
763 dsl_dataset_phys(ds)->ds_unique_bytes == 0);
764 }
765
766 int
dsl_destroy_head_check_impl(dsl_dataset_t * ds,int expected_holds)767 dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
768 {
769 int error;
770 uint64_t count;
771 objset_t *mos;
772
773 ASSERT(!ds->ds_is_snapshot);
774 if (ds->ds_is_snapshot)
775 return (SET_ERROR(EINVAL));
776
777 if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
778 return (SET_ERROR(EBUSY));
779
780 ASSERT0(ds->ds_dir->dd_activity_waiters);
781
782 mos = ds->ds_dir->dd_pool->dp_meta_objset;
783
784 /*
785 * Can't delete a head dataset if there are snapshots of it.
786 * (Except if the only snapshots are from the branch we cloned
787 * from.)
788 */
789 if (ds->ds_prev != NULL &&
790 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
791 return (SET_ERROR(EBUSY));
792
793 /*
794 * Can't delete if there are children of this fs.
795 */
796 error = zap_count(mos,
797 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
798 if (error != 0)
799 return (error);
800 if (count != 0)
801 return (SET_ERROR(EEXIST));
802
803 if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
804 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
805 ds->ds_prev->ds_userrefs == 0) {
806 /* We need to remove the origin snapshot as well. */
807 if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
808 return (SET_ERROR(EBUSY));
809 }
810 return (0);
811 }
812
813 int
dsl_destroy_head_check(void * arg,dmu_tx_t * tx)814 dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
815 {
816 dsl_destroy_head_arg_t *ddha = arg;
817 dsl_pool_t *dp = dmu_tx_pool(tx);
818 dsl_dataset_t *ds;
819 int error;
820
821 error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
822 if (error != 0)
823 return (error);
824
825 error = dsl_destroy_head_check_impl(ds, 0);
826 dsl_dataset_rele(ds, FTAG);
827 return (error);
828 }
829
830 static void
dsl_dir_destroy_sync(uint64_t ddobj,dmu_tx_t * tx)831 dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
832 {
833 dsl_dir_t *dd;
834 dsl_pool_t *dp = dmu_tx_pool(tx);
835 objset_t *mos = dp->dp_meta_objset;
836 dd_used_t t;
837
838 ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
839
840 VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
841
842 ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
843
844 /* Decrement the filesystem count for all parent filesystems. */
845 if (dd->dd_parent != NULL)
846 dsl_fs_ss_count_adjust(dd->dd_parent, -1,
847 DD_FIELD_FILESYSTEM_COUNT, tx);
848
849 /*
850 * Remove our reservation. The impl() routine avoids setting the
851 * actual property, which would require the (already destroyed) ds.
852 */
853 dsl_dir_set_reservation_sync_impl(dd, 0, tx);
854
855 ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
856 ASSERT0(dsl_dir_phys(dd)->dd_reserved);
857 for (t = 0; t < DD_USED_NUM; t++)
858 ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
859
860 if (dd->dd_crypto_obj != 0) {
861 dsl_crypto_key_destroy_sync(dd->dd_crypto_obj, tx);
862 (void) spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
863 }
864
865 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
866 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
867 if (dsl_dir_phys(dd)->dd_clones != 0)
868 VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_clones, tx));
869 VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
870 VERIFY0(zap_remove(mos,
871 dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
872 dd->dd_myname, tx));
873
874 dsl_dir_rele(dd, FTAG);
875 dmu_object_free_zapified(mos, ddobj, tx);
876 }
877
878 static void
dsl_clone_destroy_assert(dsl_dir_t * dd)879 dsl_clone_destroy_assert(dsl_dir_t *dd)
880 {
881 uint64_t used, comp, uncomp;
882
883 ASSERT(dsl_dir_is_clone(dd));
884 dsl_deadlist_space(&dd->dd_livelist, &used, &comp, &uncomp);
885
886 ASSERT3U(dsl_dir_phys(dd)->dd_used_bytes, ==, used);
887 ASSERT3U(dsl_dir_phys(dd)->dd_compressed_bytes, ==, comp);
888 /*
889 * Greater than because we do not track embedded block pointers in
890 * the livelist
891 */
892 ASSERT3U(dsl_dir_phys(dd)->dd_uncompressed_bytes, >=, uncomp);
893
894 ASSERT(list_is_empty(&dd->dd_pending_allocs.bpl_list));
895 ASSERT(list_is_empty(&dd->dd_pending_frees.bpl_list));
896 }
897
898 /*
899 * Start the delete process for a clone. Free its zil, verify the space usage
900 * and queue the blkptrs for deletion by adding the livelist to the pool-wide
901 * delete queue.
902 */
903 static void
dsl_async_clone_destroy(dsl_dataset_t * ds,dmu_tx_t * tx)904 dsl_async_clone_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
905 {
906 uint64_t zap_obj, to_delete, used, comp, uncomp;
907 objset_t *os;
908 dsl_dir_t *dd = ds->ds_dir;
909 dsl_pool_t *dp = dmu_tx_pool(tx);
910 objset_t *mos = dp->dp_meta_objset;
911 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
912 VERIFY0(dmu_objset_from_ds(ds, &os));
913
914 uint64_t mintxg = 0;
915 dsl_deadlist_entry_t *dle = dsl_deadlist_first(&dd->dd_livelist);
916 if (dle != NULL)
917 mintxg = dle->dle_mintxg;
918
919 spa_history_log_internal_ds(ds, "destroy", tx,
920 "(livelist, mintxg=%llu)", (long long)mintxg);
921
922 /* Check that the clone is in a correct state to be deleted */
923 dsl_clone_destroy_assert(dd);
924
925 /* Destroy the zil */
926 zil_destroy_sync(dmu_objset_zil(os), tx);
927
928 VERIFY0(zap_lookup(mos, dd->dd_object,
929 DD_FIELD_LIVELIST, sizeof (uint64_t), 1, &to_delete));
930 /* Initialize deleted_clones entry to track livelists to cleanup */
931 int error = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
932 DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
933 if (error == ENOENT) {
934 zap_obj = zap_create(mos, DMU_OTN_ZAP_METADATA,
935 DMU_OT_NONE, 0, tx);
936 VERIFY0(zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
937 DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1,
938 &(zap_obj), tx));
939 spa->spa_livelists_to_delete = zap_obj;
940 } else if (error != 0) {
941 zfs_panic_recover("zfs: error %d was returned while looking "
942 "up DMU_POOL_DELETED_CLONES in the zap", error);
943 return;
944 }
945 VERIFY0(zap_add_int(mos, zap_obj, to_delete, tx));
946
947 /* Clone is no longer using space, now tracked by dp_free_dir */
948 dsl_deadlist_space(&dd->dd_livelist, &used, &comp, &uncomp);
949 dsl_dir_diduse_space(dd, DD_USED_HEAD,
950 -used, -comp, -dsl_dir_phys(dd)->dd_uncompressed_bytes,
951 tx);
952 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
953 used, comp, uncomp, tx);
954 dsl_dir_remove_livelist(dd, tx, B_FALSE);
955 zthr_wakeup(spa->spa_livelist_delete_zthr);
956 }
957
958 /*
959 * Move the bptree into the pool's list of trees to clean up, update space
960 * accounting information and destroy the zil.
961 */
962 static void
dsl_async_dataset_destroy(dsl_dataset_t * ds,dmu_tx_t * tx)963 dsl_async_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
964 {
965 uint64_t used, comp, uncomp;
966 objset_t *os;
967
968 VERIFY0(dmu_objset_from_ds(ds, &os));
969 dsl_pool_t *dp = dmu_tx_pool(tx);
970 objset_t *mos = dp->dp_meta_objset;
971
972 spa_history_log_internal_ds(ds, "destroy", tx,
973 "(bptree, mintxg=%llu)",
974 (long long)dsl_dataset_phys(ds)->ds_prev_snap_txg);
975
976 zil_destroy_sync(dmu_objset_zil(os), tx);
977
978 if (!spa_feature_is_active(dp->dp_spa,
979 SPA_FEATURE_ASYNC_DESTROY)) {
980 dsl_scan_t *scn = dp->dp_scan;
981 spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
982 tx);
983 dp->dp_bptree_obj = bptree_alloc(mos, tx);
984 VERIFY0(zap_add(mos,
985 DMU_POOL_DIRECTORY_OBJECT,
986 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
987 &dp->dp_bptree_obj, tx));
988 ASSERT(!scn->scn_async_destroying);
989 scn->scn_async_destroying = B_TRUE;
990 }
991
992 used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
993 comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
994 uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
995
996 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
997 dsl_dataset_phys(ds)->ds_unique_bytes == used);
998
999 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1000 bptree_add(mos, dp->dp_bptree_obj,
1001 &dsl_dataset_phys(ds)->ds_bp,
1002 dsl_dataset_phys(ds)->ds_prev_snap_txg,
1003 used, comp, uncomp, tx);
1004 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1005 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
1006 -used, -comp, -uncomp, tx);
1007 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
1008 used, comp, uncomp, tx);
1009 }
1010
1011 void
dsl_destroy_head_sync_impl(dsl_dataset_t * ds,dmu_tx_t * tx)1012 dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
1013 {
1014 dsl_pool_t *dp = dmu_tx_pool(tx);
1015 objset_t *mos = dp->dp_meta_objset;
1016 uint64_t obj, ddobj, prevobj = 0;
1017 boolean_t rmorigin;
1018
1019 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
1020 ASSERT(ds->ds_prev == NULL ||
1021 dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
1022 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1023 ASSERT3U(BP_GET_LOGICAL_BIRTH(&dsl_dataset_phys(ds)->ds_bp), <=,
1024 tx->tx_txg);
1025 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1026 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
1027
1028 dsl_dir_cancel_waiters(ds->ds_dir);
1029
1030 rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
1031 DS_IS_DEFER_DESTROY(ds->ds_prev) &&
1032 dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
1033 ds->ds_prev->ds_userrefs == 0);
1034
1035 /* Remove our reservation. */
1036 if (ds->ds_reserved != 0) {
1037 dsl_dataset_set_refreservation_sync_impl(ds,
1038 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
1039 0, tx);
1040 ASSERT0(ds->ds_reserved);
1041 }
1042
1043 obj = ds->ds_object;
1044
1045 for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
1046 if (dsl_dataset_feature_is_active(ds, f))
1047 dsl_dataset_deactivate_feature(ds, f, tx);
1048 }
1049
1050 dsl_scan_ds_destroyed(ds, tx);
1051
1052 if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
1053 /* This is a clone */
1054 ASSERT(ds->ds_prev != NULL);
1055 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
1056 obj);
1057 ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
1058
1059 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
1060 if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
1061 dsl_dataset_remove_from_next_clones(ds->ds_prev,
1062 obj, tx);
1063 }
1064
1065 ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
1066 dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
1067 }
1068
1069 /*
1070 * Destroy the deadlist. Unless it's a clone, the
1071 * deadlist should be empty since the dataset has no snapshots.
1072 * (If it's a clone, it's safe to ignore the deadlist contents
1073 * since they are still referenced by the origin snapshot.)
1074 */
1075 dsl_deadlist_close(&ds->ds_deadlist);
1076 dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
1077 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1078 dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
1079
1080 if (dsl_dataset_remap_deadlist_exists(ds))
1081 dsl_dataset_destroy_remap_deadlist(ds, tx);
1082
1083 /*
1084 * Each destroy is responsible for both destroying (enqueuing
1085 * to be destroyed) the blkptrs comprising the dataset as well as
1086 * those belonging to the zil.
1087 */
1088 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist)) {
1089 dsl_async_clone_destroy(ds, tx);
1090 } else if (spa_feature_is_enabled(dp->dp_spa,
1091 SPA_FEATURE_ASYNC_DESTROY)) {
1092 dsl_async_dataset_destroy(ds, tx);
1093 } else {
1094 old_synchronous_dataset_destroy(ds, tx);
1095 }
1096
1097 if (ds->ds_prev != NULL) {
1098 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
1099 VERIFY0(zap_remove_int(mos,
1100 dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
1101 ds->ds_object, tx));
1102 }
1103 prevobj = ds->ds_prev->ds_object;
1104 dsl_dataset_rele(ds->ds_prev, ds);
1105 ds->ds_prev = NULL;
1106 }
1107
1108 /*
1109 * This must be done after the dsl_traverse(), because it will
1110 * re-open the objset.
1111 */
1112 if (ds->ds_objset) {
1113 dmu_objset_evict(ds->ds_objset);
1114 ds->ds_objset = NULL;
1115 }
1116
1117 /* Erase the link in the dir */
1118 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
1119 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
1120 ddobj = ds->ds_dir->dd_object;
1121 ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
1122 VERIFY0(zap_destroy(mos,
1123 dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
1124
1125 if (ds->ds_bookmarks_obj != 0) {
1126 void *cookie = NULL;
1127 dsl_bookmark_node_t *dbn;
1128
1129 while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) !=
1130 NULL) {
1131 if (dbn->dbn_phys.zbm_redaction_obj != 0) {
1132 dnode_t *rl;
1133 VERIFY0(dnode_hold(mos,
1134 dbn->dbn_phys.zbm_redaction_obj, FTAG,
1135 &rl));
1136 if (rl->dn_have_spill) {
1137 spa_feature_decr(dmu_objset_spa(mos),
1138 SPA_FEATURE_REDACTION_LIST_SPILL,
1139 tx);
1140 }
1141 dnode_rele(rl, FTAG);
1142 VERIFY0(dmu_object_free(mos,
1143 dbn->dbn_phys.zbm_redaction_obj, tx));
1144 spa_feature_decr(dmu_objset_spa(mos),
1145 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
1146 }
1147 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
1148 spa_feature_decr(dmu_objset_spa(mos),
1149 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
1150 }
1151 spa_strfree(dbn->dbn_name);
1152 mutex_destroy(&dbn->dbn_lock);
1153 kmem_free(dbn, sizeof (*dbn));
1154 }
1155 avl_destroy(&ds->ds_bookmarks);
1156 VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
1157 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
1158 }
1159
1160 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
1161
1162 ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
1163 ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
1164 ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
1165 dsl_dir_rele(ds->ds_dir, ds);
1166 ds->ds_dir = NULL;
1167 dmu_object_free_zapified(mos, obj, tx);
1168
1169 dsl_dir_destroy_sync(ddobj, tx);
1170
1171 if (rmorigin) {
1172 dsl_dataset_t *prev;
1173 VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
1174 dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
1175 dsl_dataset_rele(prev, FTAG);
1176 }
1177 /* Delete errlog. */
1178 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_HEAD_ERRLOG))
1179 spa_delete_dataset_errlog(dp->dp_spa, ds->ds_object, tx);
1180 }
1181
1182 void
dsl_destroy_head_sync(void * arg,dmu_tx_t * tx)1183 dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
1184 {
1185 dsl_destroy_head_arg_t *ddha = arg;
1186 dsl_pool_t *dp = dmu_tx_pool(tx);
1187 dsl_dataset_t *ds;
1188
1189 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
1190 dsl_destroy_head_sync_impl(ds, tx);
1191 zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
1192 dsl_dataset_rele(ds, FTAG);
1193 }
1194
1195 static void
dsl_destroy_head_begin_sync(void * arg,dmu_tx_t * tx)1196 dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
1197 {
1198 dsl_destroy_head_arg_t *ddha = arg;
1199 dsl_pool_t *dp = dmu_tx_pool(tx);
1200 dsl_dataset_t *ds;
1201
1202 VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
1203
1204 /* Mark it as inconsistent on-disk, in case we crash */
1205 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1206 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1207
1208 spa_history_log_internal_ds(ds, "destroy begin", tx, " ");
1209 dsl_dataset_rele(ds, FTAG);
1210 }
1211
1212 int
dsl_destroy_head(const char * name)1213 dsl_destroy_head(const char *name)
1214 {
1215 dsl_destroy_head_arg_t ddha;
1216 int error;
1217 spa_t *spa;
1218 boolean_t isenabled;
1219
1220 #ifdef _KERNEL
1221 zfs_destroy_unmount_origin(name);
1222 #endif
1223
1224 error = spa_open(name, &spa, FTAG);
1225 if (error != 0)
1226 return (error);
1227 isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
1228 spa_close(spa, FTAG);
1229
1230 ddha.ddha_name = name;
1231
1232 if (!isenabled) {
1233 objset_t *os;
1234
1235 error = dsl_sync_task(name, dsl_destroy_head_check,
1236 dsl_destroy_head_begin_sync, &ddha,
1237 0, ZFS_SPACE_CHECK_DESTROY);
1238 if (error != 0)
1239 return (error);
1240
1241 /*
1242 * Head deletion is processed in one txg on old pools;
1243 * remove the objects from open context so that the txg sync
1244 * is not too long. This optimization can only work for
1245 * encrypted datasets if the wrapping key is loaded.
1246 */
1247 error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_TRUE,
1248 FTAG, &os);
1249 if (error == 0) {
1250 uint64_t prev_snap_txg =
1251 dsl_dataset_phys(dmu_objset_ds(os))->
1252 ds_prev_snap_txg;
1253 for (uint64_t obj = 0; error == 0;
1254 error = dmu_object_next(os, &obj, FALSE,
1255 prev_snap_txg))
1256 (void) dmu_free_long_object(os, obj);
1257 /* sync out all frees */
1258 txg_wait_synced(dmu_objset_pool(os), 0);
1259 dmu_objset_disown(os, B_TRUE, FTAG);
1260 }
1261 }
1262
1263 return (dsl_sync_task(name, dsl_destroy_head_check,
1264 dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_DESTROY));
1265 }
1266
1267 /*
1268 * Note, this function is used as the callback for dmu_objset_find(). We
1269 * always return 0 so that we will continue to find and process
1270 * inconsistent datasets, even if we encounter an error trying to
1271 * process one of them.
1272 */
1273 int
dsl_destroy_inconsistent(const char * dsname,void * arg)1274 dsl_destroy_inconsistent(const char *dsname, void *arg)
1275 {
1276 (void) arg;
1277 objset_t *os;
1278
1279 if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
1280 boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
1281
1282 /*
1283 * If the dataset is inconsistent because a resumable receive
1284 * has failed, then do not destroy it.
1285 */
1286 if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
1287 need_destroy = B_FALSE;
1288
1289 dmu_objset_rele(os, FTAG);
1290 if (need_destroy)
1291 (void) dsl_destroy_head(dsname);
1292 }
1293 return (0);
1294 }
1295
1296
1297 #if defined(_KERNEL)
1298 EXPORT_SYMBOL(dsl_destroy_head);
1299 EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
1300 EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
1301 EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
1302 EXPORT_SYMBOL(dsl_destroy_inconsistent);
1303 EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
1304 EXPORT_SYMBOL(dsl_destroy_head_check_impl);
1305 #endif
1306