1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * This file and its contents are supplied under the terms of the
6 * Common Development and Distribution License ("CDDL"), version 1.0.
7 * You may only use this file in accordance with the terms of version
8 * 1.0 of the CDDL.
9 *
10 * A full copy of the text of the CDDL should have accompanied this
11 * source. A copy of the CDDL is also available via the Internet at
12 * http://www.illumos.org/license/CDDL.
13 *
14 * CDDL HEADER END
15 */
16
17 /*
18 * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
19 * Copyright 2017 Nexenta Systems, Inc.
20 * Copyright 2019, 2020 by Christian Schwarz. All rights reserved.
21 */
22
23 #include <sys/zfs_context.h>
24 #include <sys/dsl_dataset.h>
25 #include <sys/dsl_dir.h>
26 #include <sys/dsl_prop.h>
27 #include <sys/dsl_synctask.h>
28 #include <sys/dsl_destroy.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/arc.h>
32 #include <sys/zap.h>
33 #include <sys/zfeature.h>
34 #include <sys/spa.h>
35 #include <sys/dsl_bookmark.h>
36 #include <zfs_namecheck.h>
37 #include <sys/dmu_send.h>
38 #include <sys/dbuf.h>
39
40 static int
dsl_bookmark_hold_ds(dsl_pool_t * dp,const char * fullname,dsl_dataset_t ** dsp,const void * tag,char ** shortnamep)41 dsl_bookmark_hold_ds(dsl_pool_t *dp, const char *fullname,
42 dsl_dataset_t **dsp, const void *tag, char **shortnamep)
43 {
44 char buf[ZFS_MAX_DATASET_NAME_LEN];
45 char *hashp;
46
47 if (strlen(fullname) >= ZFS_MAX_DATASET_NAME_LEN)
48 return (SET_ERROR(ENAMETOOLONG));
49 hashp = strchr(fullname, '#');
50 if (hashp == NULL)
51 return (SET_ERROR(EINVAL));
52
53 *shortnamep = hashp + 1;
54 if (zfs_component_namecheck(*shortnamep, NULL, NULL))
55 return (SET_ERROR(EINVAL));
56 (void) strlcpy(buf, fullname, hashp - fullname + 1);
57 return (dsl_dataset_hold(dp, buf, tag, dsp));
58 }
59
60 /*
61 * When reading BOOKMARK_V1 bookmarks, the BOOKMARK_V2 fields are guaranteed
62 * to be zeroed.
63 *
64 * Returns ESRCH if bookmark is not found.
65 * Note, we need to use the ZAP rather than the AVL to look up bookmarks
66 * by name, because only the ZAP honors the casesensitivity setting.
67 */
68 int
dsl_bookmark_lookup_impl(dsl_dataset_t * ds,const char * shortname,zfs_bookmark_phys_t * bmark_phys)69 dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
70 zfs_bookmark_phys_t *bmark_phys)
71 {
72 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
73 uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
74 matchtype_t mt = 0;
75 int err;
76
77 if (bmark_zapobj == 0)
78 return (SET_ERROR(ESRCH));
79
80 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
81 mt = MT_NORMALIZE;
82
83 /*
84 * Zero out the bookmark in case the one stored on disk
85 * is in an older, shorter format.
86 */
87 memset(bmark_phys, 0, sizeof (*bmark_phys));
88
89 err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
90 sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
91 NULL);
92
93 return (err == ENOENT ? SET_ERROR(ESRCH) : err);
94 }
95
96 /*
97 * If later_ds is non-NULL, this will return EXDEV if the specified bookmark
98 * does not represents an earlier point in later_ds's timeline. However,
99 * bmp will still be filled in if we return EXDEV.
100 *
101 * Returns ENOENT if the dataset containing the bookmark does not exist.
102 * Returns ESRCH if the dataset exists but the bookmark was not found in it.
103 */
104 int
dsl_bookmark_lookup(dsl_pool_t * dp,const char * fullname,dsl_dataset_t * later_ds,zfs_bookmark_phys_t * bmp)105 dsl_bookmark_lookup(dsl_pool_t *dp, const char *fullname,
106 dsl_dataset_t *later_ds, zfs_bookmark_phys_t *bmp)
107 {
108 char *shortname;
109 dsl_dataset_t *ds;
110 int error;
111
112 error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname);
113 if (error != 0)
114 return (error);
115
116 error = dsl_bookmark_lookup_impl(ds, shortname, bmp);
117 if (error == 0 && later_ds != NULL) {
118 if (!dsl_dataset_is_before(later_ds, ds, bmp->zbm_creation_txg))
119 error = SET_ERROR(EXDEV);
120 }
121 dsl_dataset_rele(ds, FTAG);
122 return (error);
123 }
124
125 /*
126 * Validates that
127 * - bmark is a full dataset path of a bookmark (bookmark_namecheck)
128 * - source is a full path of a snapshot or bookmark
129 * ({bookmark,snapshot}_namecheck)
130 *
131 * Returns 0 if valid, -1 otherwise.
132 */
133 static int
dsl_bookmark_create_nvl_validate_pair(const char * bmark,const char * source)134 dsl_bookmark_create_nvl_validate_pair(const char *bmark, const char *source)
135 {
136 if (bookmark_namecheck(bmark, NULL, NULL) != 0)
137 return (-1);
138
139 int is_bmark, is_snap;
140 is_bmark = bookmark_namecheck(source, NULL, NULL) == 0;
141 is_snap = snapshot_namecheck(source, NULL, NULL) == 0;
142 if (!is_bmark && !is_snap)
143 return (-1);
144
145 return (0);
146 }
147
148 /*
149 * Check that the given nvlist corresponds to the following schema:
150 * { newbookmark -> source, ... }
151 * where
152 * - each pair passes dsl_bookmark_create_nvl_validate_pair
153 * - all newbookmarks are in the same pool
154 * - all newbookmarks have unique names
155 *
156 * Note that this function is only validates above schema. Callers must ensure
157 * that the bookmarks can be created, e.g. that sources exist.
158 *
159 * Returns 0 if the nvlist adheres to above schema.
160 * Returns -1 if it doesn't.
161 */
162 int
dsl_bookmark_create_nvl_validate(nvlist_t * bmarks)163 dsl_bookmark_create_nvl_validate(nvlist_t *bmarks)
164 {
165 const char *first = NULL;
166 size_t first_len = 0;
167
168 for (nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
169 pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) {
170
171 const char *bmark = nvpair_name(pair);
172 const char *source;
173
174 /* list structure: values must be snapshots XOR bookmarks */
175 if (nvpair_value_string(pair, &source) != 0)
176 return (-1);
177 if (dsl_bookmark_create_nvl_validate_pair(bmark, source) != 0)
178 return (-1);
179
180 /* same pool check */
181 if (first == NULL) {
182 const char *cp = strpbrk(bmark, "/#");
183 if (cp == NULL)
184 return (-1);
185 first = bmark;
186 first_len = cp - bmark;
187 }
188 if (strncmp(first, bmark, first_len) != 0)
189 return (-1);
190 switch (*(bmark + first_len)) {
191 case '/': /* fallthrough */
192 case '#':
193 break;
194 default:
195 return (-1);
196 }
197
198 /* unique newbookmark names; todo: O(n^2) */
199 for (nvpair_t *pair2 = nvlist_next_nvpair(bmarks, pair);
200 pair2 != NULL; pair2 = nvlist_next_nvpair(bmarks, pair2)) {
201 if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
202 return (-1);
203 }
204
205 }
206 return (0);
207 }
208
209 /*
210 * expects that newbm and source have been validated using
211 * dsl_bookmark_create_nvl_validate_pair
212 */
213 static int
dsl_bookmark_create_check_impl(dsl_pool_t * dp,const char * newbm,const char * source)214 dsl_bookmark_create_check_impl(dsl_pool_t *dp,
215 const char *newbm, const char *source)
216 {
217 ASSERT0(dsl_bookmark_create_nvl_validate_pair(newbm, source));
218 /* defer source namecheck until we know it's a snapshot or bookmark */
219
220 int error;
221 dsl_dataset_t *newbm_ds;
222 char *newbm_short;
223 zfs_bookmark_phys_t bmark_phys;
224
225 error = dsl_bookmark_hold_ds(dp, newbm, &newbm_ds, FTAG, &newbm_short);
226 if (error != 0)
227 return (error);
228
229 /* Verify that the new bookmark does not already exist */
230 error = dsl_bookmark_lookup_impl(newbm_ds, newbm_short, &bmark_phys);
231 switch (error) {
232 case ESRCH:
233 /* happy path: new bmark doesn't exist, proceed after switch */
234 break;
235 case 0:
236 error = SET_ERROR(EEXIST);
237 goto eholdnewbmds;
238 default:
239 /* dsl_bookmark_lookup_impl already did SET_ERROR */
240 goto eholdnewbmds;
241 }
242
243 /* error is retval of the following if-cascade */
244 if (strchr(source, '@') != NULL) {
245 dsl_dataset_t *source_snap_ds;
246 ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
247 error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
248 if (error == 0) {
249 VERIFY(source_snap_ds->ds_is_snapshot);
250 /*
251 * Verify that source snapshot is an earlier point in
252 * newbm_ds's timeline (source may be newbm_ds's origin)
253 */
254 if (!dsl_dataset_is_before(newbm_ds, source_snap_ds, 0))
255 error = SET_ERROR(
256 ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
257 dsl_dataset_rele(source_snap_ds, FTAG);
258 }
259 } else if (strchr(source, '#') != NULL) {
260 zfs_bookmark_phys_t source_phys;
261 ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
262 /*
263 * Source must exists and be an earlier point in newbm_ds's
264 * timeline (newbm_ds's origin may be a snap of source's ds)
265 */
266 error = dsl_bookmark_lookup(dp, source, newbm_ds, &source_phys);
267 switch (error) {
268 case 0:
269 break; /* happy path */
270 case EXDEV:
271 error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
272 break;
273 default:
274 /* dsl_bookmark_lookup already did SET_ERROR */
275 break;
276 }
277 } else {
278 /*
279 * dsl_bookmark_create_nvl_validate validates that source is
280 * either snapshot or bookmark
281 */
282 panic("unreachable code: %s", source);
283 }
284
285 eholdnewbmds:
286 dsl_dataset_rele(newbm_ds, FTAG);
287 return (error);
288 }
289
290 int
dsl_bookmark_create_check(void * arg,dmu_tx_t * tx)291 dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
292 {
293 dsl_bookmark_create_arg_t *dbca = arg;
294 int rv = 0;
295 int schema_err = 0;
296 ASSERT3P(dbca, !=, NULL);
297 ASSERT3P(dbca->dbca_bmarks, !=, NULL);
298 /* dbca->dbca_errors is allowed to be NULL */
299
300 dsl_pool_t *dp = dmu_tx_pool(tx);
301
302 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
303 return (SET_ERROR(ENOTSUP));
304
305 if (dsl_bookmark_create_nvl_validate(dbca->dbca_bmarks) != 0)
306 rv = schema_err = SET_ERROR(EINVAL);
307
308 for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
309 pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
310 const char *new = nvpair_name(pair);
311
312 int error = schema_err;
313 if (error == 0) {
314 const char *source = fnvpair_value_string(pair);
315 error = dsl_bookmark_create_check_impl(dp, new, source);
316 if (error != 0)
317 error = SET_ERROR(error);
318 }
319
320 if (error != 0) {
321 rv = error;
322 if (dbca->dbca_errors != NULL)
323 fnvlist_add_int32(dbca->dbca_errors,
324 new, error);
325 }
326 }
327
328 return (rv);
329 }
330
331 static dsl_bookmark_node_t *
dsl_bookmark_node_alloc(char * shortname)332 dsl_bookmark_node_alloc(char *shortname)
333 {
334 dsl_bookmark_node_t *dbn = kmem_alloc(sizeof (*dbn), KM_SLEEP);
335 dbn->dbn_name = spa_strdup(shortname);
336 dbn->dbn_dirty = B_FALSE;
337 mutex_init(&dbn->dbn_lock, NULL, MUTEX_DEFAULT, NULL);
338 return (dbn);
339 }
340
341 /*
342 * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
343 */
344 static void
dsl_bookmark_set_phys(zfs_bookmark_phys_t * zbm,dsl_dataset_t * snap)345 dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
346 {
347 spa_t *spa = dsl_dataset_get_spa(snap);
348 objset_t *mos = spa_get_dsl(spa)->dp_meta_objset;
349 dsl_dataset_phys_t *dsp = dsl_dataset_phys(snap);
350
351 memset(zbm, 0, sizeof (zfs_bookmark_phys_t));
352 zbm->zbm_guid = dsp->ds_guid;
353 zbm->zbm_creation_txg = dsp->ds_creation_txg;
354 zbm->zbm_creation_time = dsp->ds_creation_time;
355 zbm->zbm_redaction_obj = 0;
356
357 /*
358 * If the dataset is encrypted create a larger bookmark to
359 * accommodate the IVset guid. The IVset guid was added
360 * after the encryption feature to prevent a problem with
361 * raw sends. If we encounter an encrypted dataset without
362 * an IVset guid we fall back to a normal bookmark.
363 */
364 if (snap->ds_dir->dd_crypto_obj != 0 &&
365 spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
366 (void) zap_lookup(mos, snap->ds_object,
367 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
368 &zbm->zbm_ivset_guid);
369 }
370
371 if (spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_WRITTEN)) {
372 zbm->zbm_flags = ZBM_FLAG_SNAPSHOT_EXISTS | ZBM_FLAG_HAS_FBN;
373 zbm->zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
374 zbm->zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
375 zbm->zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
376
377 dsl_dataset_t *nextds;
378 VERIFY0(dsl_dataset_hold_obj(snap->ds_dir->dd_pool,
379 dsp->ds_next_snap_obj, FTAG, &nextds));
380 dsl_deadlist_space(&nextds->ds_deadlist,
381 &zbm->zbm_referenced_freed_before_next_snap,
382 &zbm->zbm_compressed_freed_before_next_snap,
383 &zbm->zbm_uncompressed_freed_before_next_snap);
384 dsl_dataset_rele(nextds, FTAG);
385 }
386 }
387
388 /*
389 * Add dsl_bookmark_node_t `dbn` to the given dataset and increment appropriate
390 * SPA feature counters.
391 */
392 void
dsl_bookmark_node_add(dsl_dataset_t * hds,dsl_bookmark_node_t * dbn,dmu_tx_t * tx)393 dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
394 dmu_tx_t *tx)
395 {
396 dsl_pool_t *dp = dmu_tx_pool(tx);
397 objset_t *mos = dp->dp_meta_objset;
398
399 if (hds->ds_bookmarks_obj == 0) {
400 hds->ds_bookmarks_obj = zap_create_norm(mos,
401 U8_TEXTPREP_TOUPPER, DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0,
402 tx);
403 spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
404
405 dsl_dataset_zapify(hds, tx);
406 VERIFY0(zap_add(mos, hds->ds_object,
407 DS_FIELD_BOOKMARK_NAMES,
408 sizeof (hds->ds_bookmarks_obj), 1,
409 &hds->ds_bookmarks_obj, tx));
410 }
411
412 avl_add(&hds->ds_bookmarks, dbn);
413
414 /*
415 * To maintain backwards compatibility with software that doesn't
416 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
417 * possible bookmark size.
418 */
419 uint64_t bookmark_phys_size = BOOKMARK_PHYS_SIZE_V1;
420 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2) &&
421 (dbn->dbn_phys.zbm_ivset_guid != 0 || dbn->dbn_phys.zbm_flags &
422 ZBM_FLAG_HAS_FBN || dbn->dbn_phys.zbm_redaction_obj != 0)) {
423 bookmark_phys_size = BOOKMARK_PHYS_SIZE_V2;
424 spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
425 }
426
427 zfs_bookmark_phys_t zero_phys = { 0 };
428 ASSERT0(memcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
429 &zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
430
431 VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
432 sizeof (uint64_t), bookmark_phys_size / sizeof (uint64_t),
433 &dbn->dbn_phys, tx));
434 }
435
436 /*
437 * If redaction_list is non-null, we create a redacted bookmark and redaction
438 * list, and store the object number of the redaction list in redact_obj.
439 */
440 static void
dsl_bookmark_create_sync_impl_snap(const char * bookmark,const char * snapshot,dmu_tx_t * tx,uint64_t num_redact_snaps,uint64_t * redact_snaps,const void * tag,redaction_list_t ** redaction_list)441 dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
442 dmu_tx_t *tx, uint64_t num_redact_snaps, uint64_t *redact_snaps,
443 const void *tag, redaction_list_t **redaction_list)
444 {
445 dsl_pool_t *dp = dmu_tx_pool(tx);
446 objset_t *mos = dp->dp_meta_objset;
447 dsl_dataset_t *snapds, *bmark_fs;
448 char *shortname;
449 boolean_t bookmark_redacted;
450 uint64_t *dsredactsnaps;
451 uint64_t dsnumsnaps;
452
453 VERIFY0(dsl_dataset_hold(dp, snapshot, FTAG, &snapds));
454 VERIFY0(dsl_bookmark_hold_ds(dp, bookmark, &bmark_fs, FTAG,
455 &shortname));
456
457 dsl_bookmark_node_t *dbn = dsl_bookmark_node_alloc(shortname);
458 dsl_bookmark_set_phys(&dbn->dbn_phys, snapds);
459
460 bookmark_redacted = dsl_dataset_get_uint64_array_feature(snapds,
461 SPA_FEATURE_REDACTED_DATASETS, &dsnumsnaps, &dsredactsnaps);
462 if (redaction_list != NULL || bookmark_redacted) {
463 redaction_list_t *local_rl;
464 boolean_t spill = B_FALSE;
465 if (bookmark_redacted) {
466 redact_snaps = dsredactsnaps;
467 num_redact_snaps = dsnumsnaps;
468 }
469 int bonuslen = sizeof (redaction_list_phys_t) +
470 num_redact_snaps * sizeof (uint64_t);
471 if (bonuslen > dmu_bonus_max())
472 spill = B_TRUE;
473 dbn->dbn_phys.zbm_redaction_obj = dmu_object_alloc(mos,
474 DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
475 DMU_OTN_UINT64_METADATA, spill ? 0 : bonuslen, tx);
476 spa_feature_incr(dp->dp_spa,
477 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
478 if (spill) {
479 spa_feature_incr(dp->dp_spa,
480 SPA_FEATURE_REDACTION_LIST_SPILL, tx);
481 }
482
483 VERIFY0(dsl_redaction_list_hold_obj(dp,
484 dbn->dbn_phys.zbm_redaction_obj, tag, &local_rl));
485 dsl_redaction_list_long_hold(dp, local_rl, tag);
486
487 if (!spill) {
488 ASSERT3U(local_rl->rl_bonus->db_size, >=, bonuslen);
489 dmu_buf_will_dirty(local_rl->rl_bonus, tx);
490 } else {
491 dmu_buf_t *db;
492 VERIFY0(dmu_spill_hold_by_bonus(local_rl->rl_bonus,
493 DB_RF_MUST_SUCCEED, FTAG, &db));
494 dmu_buf_will_fill(db, tx, B_FALSE);
495 VERIFY0(dbuf_spill_set_blksz(db, P2ROUNDUP(bonuslen,
496 SPA_MINBLOCKSIZE), tx));
497 local_rl->rl_phys = db->db_data;
498 local_rl->rl_dbuf = db;
499 }
500 memcpy(local_rl->rl_phys->rlp_snaps, redact_snaps,
501 sizeof (uint64_t) * num_redact_snaps);
502 local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
503 if (bookmark_redacted) {
504 ASSERT3P(redaction_list, ==, NULL);
505 local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
506 local_rl->rl_phys->rlp_last_object = UINT64_MAX;
507 dsl_redaction_list_long_rele(local_rl, tag);
508 dsl_redaction_list_rele(local_rl, tag);
509 } else {
510 *redaction_list = local_rl;
511 }
512 }
513
514 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
515 spa_feature_incr(dp->dp_spa,
516 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
517 }
518
519 dsl_bookmark_node_add(bmark_fs, dbn, tx);
520
521 spa_history_log_internal_ds(bmark_fs, "bookmark", tx,
522 "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
523 shortname, (longlong_t)dbn->dbn_phys.zbm_creation_txg,
524 (longlong_t)snapds->ds_object,
525 (longlong_t)dbn->dbn_phys.zbm_redaction_obj);
526
527 dsl_dataset_rele(bmark_fs, FTAG);
528 dsl_dataset_rele(snapds, FTAG);
529 }
530
531
532 static void
dsl_bookmark_create_sync_impl_book(const char * new_name,const char * source_name,dmu_tx_t * tx)533 dsl_bookmark_create_sync_impl_book(
534 const char *new_name, const char *source_name, dmu_tx_t *tx)
535 {
536 dsl_pool_t *dp = dmu_tx_pool(tx);
537 dsl_dataset_t *bmark_fs_source, *bmark_fs_new;
538 char *source_shortname, *new_shortname;
539 zfs_bookmark_phys_t source_phys;
540
541 VERIFY0(dsl_bookmark_hold_ds(dp, source_name, &bmark_fs_source, FTAG,
542 &source_shortname));
543 VERIFY0(dsl_bookmark_hold_ds(dp, new_name, &bmark_fs_new, FTAG,
544 &new_shortname));
545
546 /*
547 * create a copy of the source bookmark by copying most of its members
548 *
549 * Caveat: bookmarking a redaction bookmark yields a normal bookmark
550 * -----------------------------------------------------------------
551 * Reasoning:
552 * - The zbm_redaction_obj would be referred to by both source and new
553 * bookmark, but would be destroyed once either source or new is
554 * destroyed, resulting in use-after-free of the referred object.
555 * - User expectation when issuing the `zfs bookmark` command is that
556 * a normal bookmark of the source is created
557 *
558 * Design Alternatives For Full Redaction Bookmark Copying:
559 * - reference-count the redaction object => would require on-disk
560 * format change for existing redaction objects
561 * - Copy the redaction object => cannot be done in syncing context
562 * because the redaction object might be too large
563 */
564
565 VERIFY0(dsl_bookmark_lookup_impl(bmark_fs_source, source_shortname,
566 &source_phys));
567 dsl_bookmark_node_t *new_dbn = dsl_bookmark_node_alloc(new_shortname);
568
569 memcpy(&new_dbn->dbn_phys, &source_phys, sizeof (source_phys));
570 new_dbn->dbn_phys.zbm_redaction_obj = 0;
571
572 /* update feature counters */
573 if (new_dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
574 spa_feature_incr(dp->dp_spa,
575 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
576 }
577 /* no need for redaction bookmark counter; nulled zbm_redaction_obj */
578 /* dsl_bookmark_node_add bumps bookmarks and v2-bookmarks counter */
579
580 /*
581 * write new bookmark
582 *
583 * Note that dsl_bookmark_lookup_impl guarantees that, if source is a
584 * v1 bookmark, the v2-only fields are zeroed.
585 * And dsl_bookmark_node_add writes back a v1-sized bookmark if
586 * v2 bookmarks are disabled and/or v2-only fields are zeroed.
587 * => bookmark copying works on pre-bookmark-v2 pools
588 */
589 dsl_bookmark_node_add(bmark_fs_new, new_dbn, tx);
590
591 spa_history_log_internal_ds(bmark_fs_source, "bookmark", tx,
592 "name=%s creation_txg=%llu source_guid=%llu",
593 new_shortname, (longlong_t)new_dbn->dbn_phys.zbm_creation_txg,
594 (longlong_t)source_phys.zbm_guid);
595
596 dsl_dataset_rele(bmark_fs_source, FTAG);
597 dsl_dataset_rele(bmark_fs_new, FTAG);
598 }
599
600 void
dsl_bookmark_create_sync(void * arg,dmu_tx_t * tx)601 dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
602 {
603 dsl_bookmark_create_arg_t *dbca = arg;
604
605 ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx)->dp_spa,
606 SPA_FEATURE_BOOKMARKS));
607
608 for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
609 pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
610
611 const char *new = nvpair_name(pair);
612 const char *source = fnvpair_value_string(pair);
613
614 if (strchr(source, '@') != NULL) {
615 dsl_bookmark_create_sync_impl_snap(new, source, tx,
616 0, NULL, NULL, NULL);
617 } else if (strchr(source, '#') != NULL) {
618 dsl_bookmark_create_sync_impl_book(new, source, tx);
619 } else {
620 panic("unreachable code");
621 }
622
623 }
624 }
625
626 /*
627 * The bookmarks must all be in the same pool.
628 */
629 int
dsl_bookmark_create(nvlist_t * bmarks,nvlist_t * errors)630 dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
631 {
632 nvpair_t *pair;
633 dsl_bookmark_create_arg_t dbca;
634
635 pair = nvlist_next_nvpair(bmarks, NULL);
636 if (pair == NULL)
637 return (0);
638
639 dbca.dbca_bmarks = bmarks;
640 dbca.dbca_errors = errors;
641
642 return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
643 dsl_bookmark_create_sync, &dbca,
644 fnvlist_num_pairs(bmarks), ZFS_SPACE_CHECK_NORMAL));
645 }
646
647 static int
dsl_bookmark_create_redacted_check(void * arg,dmu_tx_t * tx)648 dsl_bookmark_create_redacted_check(void *arg, dmu_tx_t *tx)
649 {
650 dsl_bookmark_create_redacted_arg_t *dbcra = arg;
651 dsl_pool_t *dp = dmu_tx_pool(tx);
652 int rv = 0;
653
654 if (!spa_feature_is_enabled(dp->dp_spa,
655 SPA_FEATURE_REDACTION_BOOKMARKS))
656 return (SET_ERROR(ENOTSUP));
657 /*
658 * If the list of redact snaps will not fit in the bonus buffer (or
659 * spill block, with the REDACTION_LIST_SPILL feature) with the
660 * furthest reached object and offset, fail.
661 */
662 uint64_t snaplimit = ((spa_feature_is_enabled(dp->dp_spa,
663 SPA_FEATURE_REDACTION_LIST_SPILL) ? spa_maxblocksize(dp->dp_spa) :
664 dmu_bonus_max()) -
665 sizeof (redaction_list_phys_t)) / sizeof (uint64_t);
666 if (dbcra->dbcra_numsnaps > snaplimit)
667 return (SET_ERROR(E2BIG));
668
669 if (dsl_bookmark_create_nvl_validate_pair(
670 dbcra->dbcra_bmark, dbcra->dbcra_snap) != 0)
671 return (SET_ERROR(EINVAL));
672
673 rv = dsl_bookmark_create_check_impl(dp,
674 dbcra->dbcra_bmark, dbcra->dbcra_snap);
675 return (rv);
676 }
677
678 static void
dsl_bookmark_create_redacted_sync(void * arg,dmu_tx_t * tx)679 dsl_bookmark_create_redacted_sync(void *arg, dmu_tx_t *tx)
680 {
681 dsl_bookmark_create_redacted_arg_t *dbcra = arg;
682 dsl_bookmark_create_sync_impl_snap(dbcra->dbcra_bmark,
683 dbcra->dbcra_snap, tx, dbcra->dbcra_numsnaps, dbcra->dbcra_snaps,
684 dbcra->dbcra_tag, dbcra->dbcra_rl);
685 }
686
687 int
dsl_bookmark_create_redacted(const char * bookmark,const char * snapshot,uint64_t numsnaps,uint64_t * snapguids,const void * tag,redaction_list_t ** rl)688 dsl_bookmark_create_redacted(const char *bookmark, const char *snapshot,
689 uint64_t numsnaps, uint64_t *snapguids, const void *tag,
690 redaction_list_t **rl)
691 {
692 dsl_bookmark_create_redacted_arg_t dbcra;
693
694 dbcra.dbcra_bmark = bookmark;
695 dbcra.dbcra_snap = snapshot;
696 dbcra.dbcra_rl = rl;
697 dbcra.dbcra_numsnaps = numsnaps;
698 dbcra.dbcra_snaps = snapguids;
699 dbcra.dbcra_tag = tag;
700
701 return (dsl_sync_task(bookmark, dsl_bookmark_create_redacted_check,
702 dsl_bookmark_create_redacted_sync, &dbcra, 5,
703 ZFS_SPACE_CHECK_NORMAL));
704 }
705
706 /*
707 * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
708 * If 'props' is NULL, retrieves all properties.
709 */
710 static void
dsl_bookmark_fetch_props(dsl_pool_t * dp,zfs_bookmark_phys_t * bmark_phys,nvlist_t * props,nvlist_t * out_props)711 dsl_bookmark_fetch_props(dsl_pool_t *dp, zfs_bookmark_phys_t *bmark_phys,
712 nvlist_t *props, nvlist_t *out_props)
713 {
714 ASSERT3P(dp, !=, NULL);
715 ASSERT3P(bmark_phys, !=, NULL);
716 ASSERT3P(out_props, !=, NULL);
717 ASSERT(RRW_LOCK_HELD(&dp->dp_config_rwlock));
718
719 if (props == NULL || nvlist_exists(props,
720 zfs_prop_to_name(ZFS_PROP_GUID))) {
721 dsl_prop_nvlist_add_uint64(out_props,
722 ZFS_PROP_GUID, bmark_phys->zbm_guid);
723 }
724 if (props == NULL || nvlist_exists(props,
725 zfs_prop_to_name(ZFS_PROP_CREATETXG))) {
726 dsl_prop_nvlist_add_uint64(out_props,
727 ZFS_PROP_CREATETXG, bmark_phys->zbm_creation_txg);
728 }
729 if (props == NULL || nvlist_exists(props,
730 zfs_prop_to_name(ZFS_PROP_CREATION))) {
731 dsl_prop_nvlist_add_uint64(out_props,
732 ZFS_PROP_CREATION, bmark_phys->zbm_creation_time);
733 }
734 if (props == NULL || nvlist_exists(props,
735 zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) {
736 dsl_prop_nvlist_add_uint64(out_props,
737 ZFS_PROP_IVSET_GUID, bmark_phys->zbm_ivset_guid);
738 }
739 if (bmark_phys->zbm_flags & ZBM_FLAG_HAS_FBN) {
740 if (props == NULL || nvlist_exists(props,
741 zfs_prop_to_name(ZFS_PROP_REFERENCED))) {
742 dsl_prop_nvlist_add_uint64(out_props,
743 ZFS_PROP_REFERENCED,
744 bmark_phys->zbm_referenced_bytes_refd);
745 }
746 if (props == NULL || nvlist_exists(props,
747 zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED))) {
748 dsl_prop_nvlist_add_uint64(out_props,
749 ZFS_PROP_LOGICALREFERENCED,
750 bmark_phys->zbm_uncompressed_bytes_refd);
751 }
752 if (props == NULL || nvlist_exists(props,
753 zfs_prop_to_name(ZFS_PROP_REFRATIO))) {
754 uint64_t ratio =
755 bmark_phys->zbm_compressed_bytes_refd == 0 ? 100 :
756 bmark_phys->zbm_uncompressed_bytes_refd * 100 /
757 bmark_phys->zbm_compressed_bytes_refd;
758 dsl_prop_nvlist_add_uint64(out_props,
759 ZFS_PROP_REFRATIO, ratio);
760 }
761 }
762
763 if ((props == NULL || nvlist_exists(props, "redact_snaps") ||
764 nvlist_exists(props, "redact_complete")) &&
765 bmark_phys->zbm_redaction_obj != 0) {
766 redaction_list_t *rl;
767 int err = dsl_redaction_list_hold_obj(dp,
768 bmark_phys->zbm_redaction_obj, FTAG, &rl);
769 if (err == 0) {
770 if (nvlist_exists(props, "redact_snaps")) {
771 nvlist_t *nvl;
772 nvl = fnvlist_alloc();
773 fnvlist_add_uint64_array(nvl, ZPROP_VALUE,
774 rl->rl_phys->rlp_snaps,
775 rl->rl_phys->rlp_num_snaps);
776 fnvlist_add_nvlist(out_props, "redact_snaps",
777 nvl);
778 nvlist_free(nvl);
779 }
780 if (nvlist_exists(props, "redact_complete")) {
781 nvlist_t *nvl;
782 nvl = fnvlist_alloc();
783 fnvlist_add_boolean_value(nvl, ZPROP_VALUE,
784 rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
785 rl->rl_phys->rlp_last_object == UINT64_MAX);
786 fnvlist_add_nvlist(out_props, "redact_complete",
787 nvl);
788 nvlist_free(nvl);
789 }
790 dsl_redaction_list_rele(rl, FTAG);
791 }
792 }
793 }
794
795 int
dsl_get_bookmarks_impl(dsl_dataset_t * ds,nvlist_t * props,nvlist_t * outnvl)796 dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
797 {
798 dsl_pool_t *dp = ds->ds_dir->dd_pool;
799
800 ASSERT(dsl_pool_config_held(dp));
801
802 if (dsl_dataset_is_snapshot(ds))
803 return (SET_ERROR(EINVAL));
804
805 for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
806 dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
807 nvlist_t *out_props = fnvlist_alloc();
808
809 dsl_bookmark_fetch_props(dp, &dbn->dbn_phys, props, out_props);
810
811 fnvlist_add_nvlist(outnvl, dbn->dbn_name, out_props);
812 fnvlist_free(out_props);
813 }
814 return (0);
815 }
816
817 /*
818 * Comparison func for ds_bookmarks AVL tree. We sort the bookmarks by
819 * their TXG, then by their FBN-ness. The "FBN-ness" component ensures
820 * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
821 * dsl_bookmark_destroy_sync_impl() depends on. Note that there may be
822 * multiple bookmarks at the same TXG (with the same FBN-ness). In this
823 * case we differentiate them by an arbitrary metric (in this case,
824 * their names).
825 */
826 static int
dsl_bookmark_compare(const void * l,const void * r)827 dsl_bookmark_compare(const void *l, const void *r)
828 {
829 const dsl_bookmark_node_t *ldbn = l;
830 const dsl_bookmark_node_t *rdbn = r;
831
832 int64_t cmp = TREE_CMP(ldbn->dbn_phys.zbm_creation_txg,
833 rdbn->dbn_phys.zbm_creation_txg);
834 if (likely(cmp))
835 return (cmp);
836 cmp = TREE_CMP((ldbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN),
837 (rdbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
838 if (likely(cmp))
839 return (cmp);
840 cmp = strcmp(ldbn->dbn_name, rdbn->dbn_name);
841 return (TREE_ISIGN(cmp));
842 }
843
844 /*
845 * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
846 */
847 int
dsl_bookmark_init_ds(dsl_dataset_t * ds)848 dsl_bookmark_init_ds(dsl_dataset_t *ds)
849 {
850 dsl_pool_t *dp = ds->ds_dir->dd_pool;
851 objset_t *mos = dp->dp_meta_objset;
852
853 ASSERT(!ds->ds_is_snapshot);
854
855 avl_create(&ds->ds_bookmarks, dsl_bookmark_compare,
856 sizeof (dsl_bookmark_node_t),
857 offsetof(dsl_bookmark_node_t, dbn_node));
858
859 if (!dsl_dataset_is_zapified(ds))
860 return (0);
861
862 int zaperr = zap_lookup(mos, ds->ds_object, DS_FIELD_BOOKMARK_NAMES,
863 sizeof (ds->ds_bookmarks_obj), 1, &ds->ds_bookmarks_obj);
864 if (zaperr == ENOENT)
865 return (0);
866 if (zaperr != 0)
867 return (zaperr);
868
869 if (ds->ds_bookmarks_obj == 0)
870 return (0);
871
872 int err = 0;
873 zap_cursor_t zc;
874 zap_attribute_t *attr;
875
876 attr = zap_attribute_alloc();
877 for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
878 (err = zap_cursor_retrieve(&zc, attr)) == 0;
879 zap_cursor_advance(&zc)) {
880 dsl_bookmark_node_t *dbn =
881 dsl_bookmark_node_alloc(attr->za_name);
882
883 err = dsl_bookmark_lookup_impl(ds,
884 dbn->dbn_name, &dbn->dbn_phys);
885 ASSERT3U(err, !=, ENOENT);
886 if (err != 0) {
887 kmem_free(dbn, sizeof (*dbn));
888 break;
889 }
890 avl_add(&ds->ds_bookmarks, dbn);
891 }
892 zap_cursor_fini(&zc);
893 zap_attribute_free(attr);
894 if (err == ENOENT)
895 err = 0;
896 return (err);
897 }
898
899 void
dsl_bookmark_fini_ds(dsl_dataset_t * ds)900 dsl_bookmark_fini_ds(dsl_dataset_t *ds)
901 {
902 void *cookie = NULL;
903 dsl_bookmark_node_t *dbn;
904
905 if (ds->ds_is_snapshot)
906 return;
907
908 while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) != NULL) {
909 spa_strfree(dbn->dbn_name);
910 mutex_destroy(&dbn->dbn_lock);
911 kmem_free(dbn, sizeof (*dbn));
912 }
913 avl_destroy(&ds->ds_bookmarks);
914 }
915
916 /*
917 * Retrieve the bookmarks that exist in the specified dataset, and the
918 * requested properties of each bookmark.
919 *
920 * The "props" nvlist specifies which properties are requested.
921 * See lzc_get_bookmarks() for the list of valid properties.
922 */
923 int
dsl_get_bookmarks(const char * dsname,nvlist_t * props,nvlist_t * outnvl)924 dsl_get_bookmarks(const char *dsname, nvlist_t *props, nvlist_t *outnvl)
925 {
926 dsl_pool_t *dp;
927 dsl_dataset_t *ds;
928 int err;
929
930 err = dsl_pool_hold(dsname, FTAG, &dp);
931 if (err != 0)
932 return (err);
933 err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
934 if (err != 0) {
935 dsl_pool_rele(dp, FTAG);
936 return (err);
937 }
938
939 err = dsl_get_bookmarks_impl(ds, props, outnvl);
940
941 dsl_dataset_rele(ds, FTAG);
942 dsl_pool_rele(dp, FTAG);
943 return (err);
944 }
945
946 /*
947 * Retrieve all properties for a single bookmark in the given dataset.
948 */
949 int
dsl_get_bookmark_props(const char * dsname,const char * bmname,nvlist_t * props)950 dsl_get_bookmark_props(const char *dsname, const char *bmname, nvlist_t *props)
951 {
952 dsl_pool_t *dp;
953 dsl_dataset_t *ds;
954 zfs_bookmark_phys_t bmark_phys = { 0 };
955 int err;
956
957 err = dsl_pool_hold(dsname, FTAG, &dp);
958 if (err != 0)
959 return (err);
960 err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
961 if (err != 0) {
962 dsl_pool_rele(dp, FTAG);
963 return (err);
964 }
965
966 err = dsl_bookmark_lookup_impl(ds, bmname, &bmark_phys);
967 if (err != 0)
968 goto out;
969
970 dsl_bookmark_fetch_props(dp, &bmark_phys, NULL, props);
971 out:
972 dsl_dataset_rele(ds, FTAG);
973 dsl_pool_rele(dp, FTAG);
974 return (err);
975 }
976
977 typedef struct dsl_bookmark_destroy_arg {
978 nvlist_t *dbda_bmarks;
979 nvlist_t *dbda_success;
980 nvlist_t *dbda_errors;
981 } dsl_bookmark_destroy_arg_t;
982
983 static void
dsl_bookmark_destroy_sync_impl(dsl_dataset_t * ds,const char * name,dmu_tx_t * tx)984 dsl_bookmark_destroy_sync_impl(dsl_dataset_t *ds, const char *name,
985 dmu_tx_t *tx)
986 {
987 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
988 uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
989 matchtype_t mt = 0;
990 uint64_t int_size, num_ints;
991 /*
992 * 'search' must be zeroed so that dbn_flags (which is used in
993 * dsl_bookmark_compare()) will be zeroed even if the on-disk
994 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
995 */
996 dsl_bookmark_node_t search = { 0 };
997 char realname[ZFS_MAX_DATASET_NAME_LEN];
998
999 /*
1000 * Find the real name of this bookmark, which may be different
1001 * from the given name if the dataset is case-insensitive. Then
1002 * use the real name to find the node in the ds_bookmarks AVL tree.
1003 */
1004
1005 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
1006 mt = MT_NORMALIZE;
1007
1008 VERIFY0(zap_length(mos, bmark_zapobj, name, &int_size, &num_ints));
1009
1010 ASSERT3U(int_size, ==, sizeof (uint64_t));
1011
1012 if (num_ints * int_size > BOOKMARK_PHYS_SIZE_V1) {
1013 spa_feature_decr(dmu_objset_spa(mos),
1014 SPA_FEATURE_BOOKMARK_V2, tx);
1015 }
1016 VERIFY0(zap_lookup_norm(mos, bmark_zapobj, name, sizeof (uint64_t),
1017 num_ints, &search.dbn_phys, mt, realname, sizeof (realname), NULL));
1018
1019 search.dbn_name = realname;
1020 dsl_bookmark_node_t *dbn = avl_find(&ds->ds_bookmarks, &search, NULL);
1021 ASSERT(dbn != NULL);
1022
1023 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
1024 /*
1025 * If this bookmark HAS_FBN, and it is before the most
1026 * recent snapshot, then its TXG is a key in the head's
1027 * deadlist (and all clones' heads' deadlists). If this is
1028 * the last thing keeping the key (i.e. there are no more
1029 * bookmarks with HAS_FBN at this TXG, and there is no
1030 * snapshot at this TXG), then remove the key.
1031 *
1032 * Note that this algorithm depends on ds_bookmarks being
1033 * sorted such that all bookmarks at the same TXG with
1034 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
1035 * at the same TXG in between them). If this were not
1036 * the case, we would need to examine *all* bookmarks
1037 * at this TXG, rather than just the adjacent ones.
1038 */
1039
1040 dsl_bookmark_node_t *dbn_prev =
1041 AVL_PREV(&ds->ds_bookmarks, dbn);
1042 dsl_bookmark_node_t *dbn_next =
1043 AVL_NEXT(&ds->ds_bookmarks, dbn);
1044
1045 boolean_t more_bookmarks_at_this_txg =
1046 (dbn_prev != NULL && dbn_prev->dbn_phys.zbm_creation_txg ==
1047 dbn->dbn_phys.zbm_creation_txg &&
1048 (dbn_prev->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) ||
1049 (dbn_next != NULL && dbn_next->dbn_phys.zbm_creation_txg ==
1050 dbn->dbn_phys.zbm_creation_txg &&
1051 (dbn_next->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
1052
1053 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS) &&
1054 !more_bookmarks_at_this_txg &&
1055 dbn->dbn_phys.zbm_creation_txg <
1056 dsl_dataset_phys(ds)->ds_prev_snap_txg) {
1057 dsl_dir_remove_clones_key(ds->ds_dir,
1058 dbn->dbn_phys.zbm_creation_txg, tx);
1059 dsl_deadlist_remove_key(&ds->ds_deadlist,
1060 dbn->dbn_phys.zbm_creation_txg, tx);
1061 }
1062
1063 spa_feature_decr(dmu_objset_spa(mos),
1064 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
1065 }
1066
1067 if (dbn->dbn_phys.zbm_redaction_obj != 0) {
1068 dnode_t *rl;
1069 VERIFY0(dnode_hold(mos,
1070 dbn->dbn_phys.zbm_redaction_obj, FTAG, &rl));
1071 if (rl->dn_have_spill) {
1072 spa_feature_decr(dmu_objset_spa(mos),
1073 SPA_FEATURE_REDACTION_LIST_SPILL, tx);
1074 }
1075 dnode_rele(rl, FTAG);
1076 VERIFY0(dmu_object_free(mos,
1077 dbn->dbn_phys.zbm_redaction_obj, tx));
1078 spa_feature_decr(dmu_objset_spa(mos),
1079 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
1080 }
1081
1082 avl_remove(&ds->ds_bookmarks, dbn);
1083 spa_strfree(dbn->dbn_name);
1084 mutex_destroy(&dbn->dbn_lock);
1085 kmem_free(dbn, sizeof (*dbn));
1086
1087 VERIFY0(zap_remove_norm(mos, bmark_zapobj, name, mt, tx));
1088 }
1089
1090 static int
dsl_bookmark_destroy_check(void * arg,dmu_tx_t * tx)1091 dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
1092 {
1093 dsl_bookmark_destroy_arg_t *dbda = arg;
1094 dsl_pool_t *dp = dmu_tx_pool(tx);
1095 int rv = 0;
1096
1097 ASSERT(nvlist_empty(dbda->dbda_success));
1098 ASSERT(nvlist_empty(dbda->dbda_errors));
1099
1100 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
1101 return (0);
1102
1103 for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
1104 pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
1105 const char *fullname = nvpair_name(pair);
1106 dsl_dataset_t *ds;
1107 zfs_bookmark_phys_t bm;
1108 int error;
1109 char *shortname;
1110
1111 error = dsl_bookmark_hold_ds(dp, fullname, &ds,
1112 FTAG, &shortname);
1113 if (error == ENOENT) {
1114 /* ignore it; the bookmark is "already destroyed" */
1115 continue;
1116 }
1117 if (error == 0) {
1118 error = dsl_bookmark_lookup_impl(ds, shortname, &bm);
1119 dsl_dataset_rele(ds, FTAG);
1120 if (error == ESRCH) {
1121 /*
1122 * ignore it; the bookmark is
1123 * "already destroyed"
1124 */
1125 continue;
1126 }
1127 if (error == 0 && bm.zbm_redaction_obj != 0) {
1128 redaction_list_t *rl = NULL;
1129 error = dsl_redaction_list_hold_obj(tx->tx_pool,
1130 bm.zbm_redaction_obj, FTAG, &rl);
1131 if (error == ENOENT) {
1132 error = 0;
1133 } else if (error == 0 &&
1134 dsl_redaction_list_long_held(rl)) {
1135 error = SET_ERROR(EBUSY);
1136 }
1137 if (rl != NULL) {
1138 dsl_redaction_list_rele(rl, FTAG);
1139 }
1140 }
1141 }
1142 if (error == 0) {
1143 if (dmu_tx_is_syncing(tx)) {
1144 fnvlist_add_boolean(dbda->dbda_success,
1145 fullname);
1146 }
1147 } else {
1148 fnvlist_add_int32(dbda->dbda_errors, fullname, error);
1149 rv = error;
1150 }
1151 }
1152 return (rv);
1153 }
1154
1155 static void
dsl_bookmark_destroy_sync(void * arg,dmu_tx_t * tx)1156 dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
1157 {
1158 dsl_bookmark_destroy_arg_t *dbda = arg;
1159 dsl_pool_t *dp = dmu_tx_pool(tx);
1160 objset_t *mos = dp->dp_meta_objset;
1161
1162 for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
1163 pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
1164 dsl_dataset_t *ds;
1165 char *shortname;
1166 uint64_t zap_cnt;
1167
1168 VERIFY0(dsl_bookmark_hold_ds(dp, nvpair_name(pair),
1169 &ds, FTAG, &shortname));
1170 dsl_bookmark_destroy_sync_impl(ds, shortname, tx);
1171
1172 /*
1173 * If all of this dataset's bookmarks have been destroyed,
1174 * free the zap object and decrement the feature's use count.
1175 */
1176 VERIFY0(zap_count(mos, ds->ds_bookmarks_obj, &zap_cnt));
1177 if (zap_cnt == 0) {
1178 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1179 VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
1180 ds->ds_bookmarks_obj = 0;
1181 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
1182 VERIFY0(zap_remove(mos, ds->ds_object,
1183 DS_FIELD_BOOKMARK_NAMES, tx));
1184 }
1185
1186 spa_history_log_internal_ds(ds, "remove bookmark", tx,
1187 "name=%s", shortname);
1188
1189 dsl_dataset_rele(ds, FTAG);
1190 }
1191 }
1192
1193 /*
1194 * The bookmarks must all be in the same pool.
1195 */
1196 int
dsl_bookmark_destroy(nvlist_t * bmarks,nvlist_t * errors)1197 dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
1198 {
1199 int rv;
1200 dsl_bookmark_destroy_arg_t dbda;
1201 nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
1202 if (pair == NULL)
1203 return (0);
1204
1205 dbda.dbda_bmarks = bmarks;
1206 dbda.dbda_errors = errors;
1207 dbda.dbda_success = fnvlist_alloc();
1208
1209 rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
1210 dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks),
1211 ZFS_SPACE_CHECK_RESERVED);
1212 fnvlist_free(dbda.dbda_success);
1213 return (rv);
1214 }
1215
1216 /* Return B_TRUE if there are any long holds on this dataset. */
1217 boolean_t
dsl_redaction_list_long_held(redaction_list_t * rl)1218 dsl_redaction_list_long_held(redaction_list_t *rl)
1219 {
1220 return (!zfs_refcount_is_zero(&rl->rl_longholds));
1221 }
1222
1223 void
dsl_redaction_list_long_hold(dsl_pool_t * dp,redaction_list_t * rl,const void * tag)1224 dsl_redaction_list_long_hold(dsl_pool_t *dp, redaction_list_t *rl,
1225 const void *tag)
1226 {
1227 ASSERT(dsl_pool_config_held(dp));
1228 (void) zfs_refcount_add(&rl->rl_longholds, tag);
1229 }
1230
1231 void
dsl_redaction_list_long_rele(redaction_list_t * rl,const void * tag)1232 dsl_redaction_list_long_rele(redaction_list_t *rl, const void *tag)
1233 {
1234 (void) zfs_refcount_remove(&rl->rl_longholds, tag);
1235 }
1236
1237 static void
redaction_list_evict_sync(void * rlu)1238 redaction_list_evict_sync(void *rlu)
1239 {
1240 redaction_list_t *rl = rlu;
1241 zfs_refcount_destroy(&rl->rl_longholds);
1242
1243 kmem_free(rl, sizeof (redaction_list_t));
1244 }
1245
1246 void
dsl_redaction_list_rele(redaction_list_t * rl,const void * tag)1247 dsl_redaction_list_rele(redaction_list_t *rl, const void *tag)
1248 {
1249 if (rl->rl_bonus != rl->rl_dbuf)
1250 dmu_buf_rele(rl->rl_dbuf, tag);
1251 dmu_buf_rele(rl->rl_bonus, tag);
1252 }
1253
1254 int
dsl_redaction_list_hold_obj(dsl_pool_t * dp,uint64_t rlobj,const void * tag,redaction_list_t ** rlp)1255 dsl_redaction_list_hold_obj(dsl_pool_t *dp, uint64_t rlobj, const void *tag,
1256 redaction_list_t **rlp)
1257 {
1258 objset_t *mos = dp->dp_meta_objset;
1259 dmu_buf_t *dbuf, *spill_dbuf;
1260 redaction_list_t *rl;
1261 int err;
1262
1263 ASSERT(dsl_pool_config_held(dp));
1264
1265 err = dmu_bonus_hold(mos, rlobj, tag, &dbuf);
1266 if (err != 0)
1267 return (err);
1268
1269 rl = dmu_buf_get_user(dbuf);
1270 if (rl == NULL) {
1271 redaction_list_t *winner = NULL;
1272
1273 rl = kmem_zalloc(sizeof (redaction_list_t), KM_SLEEP);
1274 rl->rl_bonus = dbuf;
1275 if (dmu_spill_hold_existing(dbuf, tag, &spill_dbuf) == 0) {
1276 rl->rl_dbuf = spill_dbuf;
1277 } else {
1278 rl->rl_dbuf = dbuf;
1279 }
1280 rl->rl_object = rlobj;
1281 rl->rl_phys = rl->rl_dbuf->db_data;
1282 rl->rl_mos = dp->dp_meta_objset;
1283 zfs_refcount_create(&rl->rl_longholds);
1284 dmu_buf_init_user(&rl->rl_dbu, redaction_list_evict_sync, NULL,
1285 &rl->rl_bonus);
1286 if ((winner = dmu_buf_set_user_ie(dbuf, &rl->rl_dbu)) != NULL) {
1287 kmem_free(rl, sizeof (*rl));
1288 rl = winner;
1289 }
1290 }
1291 *rlp = rl;
1292 return (0);
1293 }
1294
1295 /*
1296 * Snapshot ds is being destroyed.
1297 *
1298 * Adjust the "freed_before_next" of any bookmarks between this snap
1299 * and the previous snapshot, because their "next snapshot" is changing.
1300 *
1301 * If there are any bookmarks with HAS_FBN at this snapshot, remove
1302 * their HAS_SNAP flag (note: there can be at most one snapshot of
1303 * each filesystem at a given txg), and return B_TRUE. In this case
1304 * the caller can not remove the key in the deadlist at this TXG, because
1305 * the HAS_FBN bookmarks require the key be there.
1306 *
1307 * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1308 * snapshot's TXG. In this case the caller can remove the key in the
1309 * deadlist at this TXG.
1310 */
1311 boolean_t
dsl_bookmark_ds_destroyed(dsl_dataset_t * ds,dmu_tx_t * tx)1312 dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
1313 {
1314 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1315
1316 dsl_dataset_t *head, *next;
1317 VERIFY0(dsl_dataset_hold_obj(dp,
1318 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &head));
1319 VERIFY0(dsl_dataset_hold_obj(dp,
1320 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &next));
1321
1322 /*
1323 * Find the first bookmark that HAS_FBN at or after the
1324 * previous snapshot.
1325 */
1326 dsl_bookmark_node_t search = { 0 };
1327 avl_index_t idx;
1328 search.dbn_phys.zbm_creation_txg =
1329 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1330 search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1331 /*
1332 * The empty-string name can't be in the AVL, and it compares
1333 * before any entries with this TXG.
1334 */
1335 search.dbn_name = (char *)"";
1336 VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1337 dsl_bookmark_node_t *dbn =
1338 avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1339
1340 /*
1341 * Iterate over all bookmarks that are at or after the previous
1342 * snapshot, and before this (being deleted) snapshot. Adjust
1343 * their FBN based on their new next snapshot.
1344 */
1345 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg <
1346 dsl_dataset_phys(ds)->ds_creation_txg;
1347 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1348 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN))
1349 continue;
1350 /*
1351 * Increase our FBN by the amount of space that was live
1352 * (referenced) at the time of this bookmark (i.e.
1353 * birth <= zbm_creation_txg), and killed between this
1354 * (being deleted) snapshot and the next snapshot (i.e.
1355 * on the next snapshot's deadlist). (Space killed before
1356 * this are already on our FBN.)
1357 */
1358 uint64_t referenced, compressed, uncompressed;
1359 dsl_deadlist_space_range(&next->ds_deadlist,
1360 0, dbn->dbn_phys.zbm_creation_txg,
1361 &referenced, &compressed, &uncompressed);
1362 dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1363 referenced;
1364 dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1365 compressed;
1366 dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1367 uncompressed;
1368 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1369 dbn->dbn_name, sizeof (uint64_t),
1370 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1371 &dbn->dbn_phys, tx));
1372 }
1373 dsl_dataset_rele(next, FTAG);
1374
1375 /*
1376 * There may be several bookmarks at this txg (the TXG of the
1377 * snapshot being deleted). We need to clear the SNAPSHOT_EXISTS
1378 * flag on all of them, and return TRUE if there is at least 1
1379 * bookmark here with HAS_FBN (thus preventing the deadlist
1380 * key from being removed).
1381 */
1382 boolean_t rv = B_FALSE;
1383 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1384 dsl_dataset_phys(ds)->ds_creation_txg;
1385 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1386 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1387 ASSERT(!(dbn->dbn_phys.zbm_flags &
1388 ZBM_FLAG_SNAPSHOT_EXISTS));
1389 continue;
1390 }
1391 ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS);
1392 dbn->dbn_phys.zbm_flags &= ~ZBM_FLAG_SNAPSHOT_EXISTS;
1393 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1394 dbn->dbn_name, sizeof (uint64_t),
1395 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1396 &dbn->dbn_phys, tx));
1397 rv = B_TRUE;
1398 }
1399 dsl_dataset_rele(head, FTAG);
1400 return (rv);
1401 }
1402
1403 /*
1404 * A snapshot is being created of this (head) dataset.
1405 *
1406 * We don't keep keys in the deadlist for the most recent snapshot, or any
1407 * bookmarks at or after it, because there can't be any blocks on the
1408 * deadlist in this range. Now that the most recent snapshot is after
1409 * all bookmarks, we need to add these keys. Note that the caller always
1410 * adds a key at the previous snapshot, so we only add keys for bookmarks
1411 * after that.
1412 */
1413 void
dsl_bookmark_snapshotted(dsl_dataset_t * ds,dmu_tx_t * tx)1414 dsl_bookmark_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
1415 {
1416 uint64_t last_key_added = UINT64_MAX;
1417 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1418 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >
1419 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1420 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1421 uint64_t creation_txg = dbn->dbn_phys.zbm_creation_txg;
1422 ASSERT3U(creation_txg, <=, last_key_added);
1423 /*
1424 * Note, there may be multiple bookmarks at this TXG,
1425 * and we only want to add the key for this TXG once.
1426 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1427 * these bookmarks in sequence.
1428 */
1429 if ((dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) &&
1430 creation_txg != last_key_added) {
1431 dsl_deadlist_add_key(&ds->ds_deadlist,
1432 creation_txg, tx);
1433 last_key_added = creation_txg;
1434 }
1435 }
1436 }
1437
1438 /*
1439 * The next snapshot of the origin dataset has changed, due to
1440 * promote or clone swap. If there are any bookmarks at this dataset,
1441 * we need to update their zbm_*_freed_before_next_snap to reflect this.
1442 * The head dataset has the relevant bookmarks in ds_bookmarks.
1443 */
1444 void
dsl_bookmark_next_changed(dsl_dataset_t * head,dsl_dataset_t * origin,dmu_tx_t * tx)1445 dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
1446 dmu_tx_t *tx)
1447 {
1448 dsl_pool_t *dp = dmu_tx_pool(tx);
1449
1450 /*
1451 * Find the first bookmark that HAS_FBN at the origin snapshot.
1452 */
1453 dsl_bookmark_node_t search = { 0 };
1454 avl_index_t idx;
1455 search.dbn_phys.zbm_creation_txg =
1456 dsl_dataset_phys(origin)->ds_creation_txg;
1457 search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1458 /*
1459 * The empty-string name can't be in the AVL, and it compares
1460 * before any entries with this TXG.
1461 */
1462 search.dbn_name = (char *)"";
1463 VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1464 dsl_bookmark_node_t *dbn =
1465 avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1466
1467 /*
1468 * Iterate over all bookmarks that are at the origin txg.
1469 * Adjust their FBN based on their new next snapshot.
1470 */
1471 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1472 dsl_dataset_phys(origin)->ds_creation_txg &&
1473 (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1474 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1475
1476 /*
1477 * Bookmark is at the origin, therefore its
1478 * "next dataset" is changing, so we need
1479 * to reset its FBN by recomputing it in
1480 * dsl_bookmark_set_phys().
1481 */
1482 ASSERT3U(dbn->dbn_phys.zbm_guid, ==,
1483 dsl_dataset_phys(origin)->ds_guid);
1484 ASSERT3U(dbn->dbn_phys.zbm_referenced_bytes_refd, ==,
1485 dsl_dataset_phys(origin)->ds_referenced_bytes);
1486 ASSERT(dbn->dbn_phys.zbm_flags &
1487 ZBM_FLAG_SNAPSHOT_EXISTS);
1488 /*
1489 * Save and restore the zbm_redaction_obj, which
1490 * is zeroed by dsl_bookmark_set_phys().
1491 */
1492 uint64_t redaction_obj =
1493 dbn->dbn_phys.zbm_redaction_obj;
1494 dsl_bookmark_set_phys(&dbn->dbn_phys, origin);
1495 dbn->dbn_phys.zbm_redaction_obj = redaction_obj;
1496
1497 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1498 dbn->dbn_name, sizeof (uint64_t),
1499 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1500 &dbn->dbn_phys, tx));
1501 }
1502 }
1503
1504 /*
1505 * This block is no longer referenced by this (head) dataset.
1506 *
1507 * Adjust the FBN of any bookmarks that reference this block, whose "next"
1508 * is the head dataset.
1509 */
1510 void
dsl_bookmark_block_killed(dsl_dataset_t * ds,const blkptr_t * bp,dmu_tx_t * tx)1511 dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
1512 {
1513 (void) tx;
1514
1515 /*
1516 * Iterate over bookmarks whose "next" is the head dataset.
1517 */
1518 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1519 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1520 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1521 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1522 /*
1523 * If the block was live (referenced) at the time of this
1524 * bookmark, add its space to the bookmark's FBN.
1525 */
1526 if (BP_GET_LOGICAL_BIRTH(bp) <=
1527 dbn->dbn_phys.zbm_creation_txg &&
1528 (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1529 mutex_enter(&dbn->dbn_lock);
1530 dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1531 bp_get_dsize_sync(dsl_dataset_get_spa(ds), bp);
1532 dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1533 BP_GET_PSIZE(bp);
1534 dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1535 BP_GET_UCSIZE(bp);
1536 /*
1537 * Changing the ZAP object here would be too
1538 * expensive. Also, we may be called from the zio
1539 * interrupt thread, which can't block on i/o.
1540 * Therefore, we mark this bookmark as dirty and
1541 * modify the ZAP once per txg, in
1542 * dsl_bookmark_sync_done().
1543 */
1544 dbn->dbn_dirty = B_TRUE;
1545 mutex_exit(&dbn->dbn_lock);
1546 }
1547 }
1548 }
1549
1550 void
dsl_bookmark_sync_done(dsl_dataset_t * ds,dmu_tx_t * tx)1551 dsl_bookmark_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
1552 {
1553 dsl_pool_t *dp = dmu_tx_pool(tx);
1554
1555 if (dsl_dataset_is_snapshot(ds))
1556 return;
1557
1558 /*
1559 * We only dirty bookmarks that are at or after the most recent
1560 * snapshot. We can't create snapshots between
1561 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1562 * don't need to look at any bookmarks before ds_prev_snap_txg.
1563 */
1564 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1565 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1566 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1567 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1568 if (dbn->dbn_dirty) {
1569 /*
1570 * We only dirty nodes with HAS_FBN, therefore
1571 * we can always use the current bookmark struct size.
1572 */
1573 ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1574 VERIFY0(zap_update(dp->dp_meta_objset,
1575 ds->ds_bookmarks_obj,
1576 dbn->dbn_name, sizeof (uint64_t),
1577 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1578 &dbn->dbn_phys, tx));
1579 dbn->dbn_dirty = B_FALSE;
1580 }
1581 }
1582 #ifdef ZFS_DEBUG
1583 for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
1584 dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
1585 ASSERT(!dbn->dbn_dirty);
1586 }
1587 #endif
1588 }
1589
1590 /*
1591 * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1592 */
1593 uint64_t
dsl_bookmark_latest_txg(dsl_dataset_t * ds)1594 dsl_bookmark_latest_txg(dsl_dataset_t *ds)
1595 {
1596 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1597 dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1598 if (dbn == NULL)
1599 return (0);
1600 return (dbn->dbn_phys.zbm_creation_txg);
1601 }
1602
1603 /*
1604 * Compare the redact_block_phys_t to the bookmark. If the last block in the
1605 * redact_block_phys_t is before the bookmark, return -1. If the first block in
1606 * the redact_block_phys_t is after the bookmark, return 1. Otherwise, the
1607 * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1608 */
1609 static int
redact_block_zb_compare(redact_block_phys_t * first,zbookmark_phys_t * second)1610 redact_block_zb_compare(redact_block_phys_t *first,
1611 zbookmark_phys_t *second)
1612 {
1613 /*
1614 * If the block_phys is for a previous object, or the last block in the
1615 * block_phys is strictly before the block in the bookmark, the
1616 * block_phys is earlier.
1617 */
1618 if (first->rbp_object < second->zb_object ||
1619 (first->rbp_object == second->zb_object &&
1620 first->rbp_blkid + (redact_block_get_count(first) - 1) <
1621 second->zb_blkid)) {
1622 return (-1);
1623 }
1624
1625 /*
1626 * If the bookmark is for a previous object, or the block in the
1627 * bookmark is strictly before the first block in the block_phys, the
1628 * bookmark is earlier.
1629 */
1630 if (first->rbp_object > second->zb_object ||
1631 (first->rbp_object == second->zb_object &&
1632 first->rbp_blkid > second->zb_blkid)) {
1633 return (1);
1634 }
1635
1636 return (0);
1637 }
1638
1639 /*
1640 * Traverse the redaction list in the provided object, and call the callback for
1641 * each entry we find. Don't call the callback for any records before resume.
1642 */
1643 int
dsl_redaction_list_traverse(redaction_list_t * rl,zbookmark_phys_t * resume,rl_traverse_callback_t cb,void * arg)1644 dsl_redaction_list_traverse(redaction_list_t *rl, zbookmark_phys_t *resume,
1645 rl_traverse_callback_t cb, void *arg)
1646 {
1647 objset_t *mos = rl->rl_mos;
1648 int err = 0;
1649
1650 if (rl->rl_phys->rlp_last_object != UINT64_MAX ||
1651 rl->rl_phys->rlp_last_blkid != UINT64_MAX) {
1652 /*
1653 * When we finish a send, we update the last object and offset
1654 * to UINT64_MAX. If a send fails partway through, the last
1655 * object and offset will have some other value, indicating how
1656 * far the send got. The redaction list must be complete before
1657 * it can be traversed, so return EINVAL if the last object and
1658 * blkid are not set to UINT64_MAX.
1659 */
1660 return (SET_ERROR(EINVAL));
1661 }
1662
1663 /*
1664 * This allows us to skip the binary search and resume checking logic
1665 * below, if we're not resuming a redacted send.
1666 */
1667 if (ZB_IS_ZERO(resume))
1668 resume = NULL;
1669
1670 /*
1671 * Binary search for the point to resume from.
1672 */
1673 uint64_t maxidx = rl->rl_phys->rlp_num_entries - 1;
1674 uint64_t minidx = 0;
1675 while (resume != NULL && maxidx > minidx) {
1676 redact_block_phys_t rbp = { 0 };
1677 ASSERT3U(maxidx, >, minidx);
1678 uint64_t mididx = minidx + ((maxidx - minidx) / 2);
1679 err = dmu_read(mos, rl->rl_object, mididx * sizeof (rbp),
1680 sizeof (rbp), &rbp, DMU_READ_NO_PREFETCH);
1681 if (err != 0)
1682 break;
1683
1684 int cmp = redact_block_zb_compare(&rbp, resume);
1685
1686 if (cmp == 0) {
1687 minidx = mididx;
1688 break;
1689 } else if (cmp > 0) {
1690 maxidx =
1691 (mididx == minidx ? minidx : mididx - 1);
1692 } else {
1693 minidx = mididx + 1;
1694 }
1695 }
1696
1697 unsigned int bufsize = SPA_OLD_MAXBLOCKSIZE;
1698 redact_block_phys_t *buf = zio_data_buf_alloc(bufsize);
1699
1700 unsigned int entries_per_buf = bufsize / sizeof (redact_block_phys_t);
1701 uint64_t start_block = minidx / entries_per_buf;
1702 err = dmu_read(mos, rl->rl_object, start_block * bufsize, bufsize, buf,
1703 DMU_READ_PREFETCH);
1704
1705 for (uint64_t curidx = minidx;
1706 err == 0 && curidx < rl->rl_phys->rlp_num_entries;
1707 curidx++) {
1708 /*
1709 * We read in the redaction list one block at a time. Once we
1710 * finish with all the entries in a given block, we read in a
1711 * new one. The predictive prefetcher will take care of any
1712 * prefetching, and this code shouldn't be the bottleneck, so we
1713 * don't need to do manual prefetching.
1714 */
1715 if (curidx % entries_per_buf == 0) {
1716 err = dmu_read(mos, rl->rl_object, curidx *
1717 sizeof (*buf), bufsize, buf,
1718 DMU_READ_PREFETCH);
1719 if (err != 0)
1720 break;
1721 }
1722 redact_block_phys_t *rb = &buf[curidx % entries_per_buf];
1723 /*
1724 * If resume is non-null, we should either not send the data, or
1725 * null out resume so we don't have to keep doing these
1726 * comparisons.
1727 */
1728 if (resume != NULL) {
1729 /*
1730 * It is possible that after the binary search we got
1731 * a record before the resume point. There's two cases
1732 * where this can occur. If the record is the last
1733 * redaction record, and the resume point is after the
1734 * end of the redacted data, curidx will be the last
1735 * redaction record. In that case, the loop will end
1736 * after this iteration. The second case is if the
1737 * resume point is between two redaction records, the
1738 * binary search can return either the record before
1739 * or after the resume point. In that case, the next
1740 * iteration will be greater than the resume point.
1741 */
1742 if (redact_block_zb_compare(rb, resume) < 0) {
1743 ASSERT3U(curidx, ==, minidx);
1744 continue;
1745 } else {
1746 /*
1747 * If the place to resume is in the middle of
1748 * the range described by this
1749 * redact_block_phys, then modify the
1750 * redact_block_phys in memory so we generate
1751 * the right records.
1752 */
1753 if (resume->zb_object == rb->rbp_object &&
1754 resume->zb_blkid > rb->rbp_blkid) {
1755 uint64_t diff = resume->zb_blkid -
1756 rb->rbp_blkid;
1757 rb->rbp_blkid = resume->zb_blkid;
1758 redact_block_set_count(rb,
1759 redact_block_get_count(rb) - diff);
1760 }
1761 resume = NULL;
1762 }
1763 }
1764
1765 if (cb(rb, arg) != 0) {
1766 err = EINTR;
1767 break;
1768 }
1769 }
1770
1771 zio_data_buf_free(buf, bufsize);
1772 return (err);
1773 }
1774