1 /*
2 * CDDL HEADER START
3 *
4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
7 * 1.0 of the CDDL.
8 *
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
12 *
13 * CDDL HEADER END
14 */
15
16 /*
17 * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
18 * Copyright 2017 Nexenta Systems, Inc.
19 * Copyright 2019, 2020 by Christian Schwarz. All rights reserved.
20 */
21
22 #include <sys/zfs_context.h>
23 #include <sys/dsl_dataset.h>
24 #include <sys/dsl_dir.h>
25 #include <sys/dsl_prop.h>
26 #include <sys/dsl_synctask.h>
27 #include <sys/dsl_destroy.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/arc.h>
31 #include <sys/zap.h>
32 #include <sys/zfeature.h>
33 #include <sys/spa.h>
34 #include <sys/dsl_bookmark.h>
35 #include <zfs_namecheck.h>
36 #include <sys/dmu_send.h>
37 #include <sys/dbuf.h>
38
39 static int
dsl_bookmark_hold_ds(dsl_pool_t * dp,const char * fullname,dsl_dataset_t ** dsp,const void * tag,char ** shortnamep)40 dsl_bookmark_hold_ds(dsl_pool_t *dp, const char *fullname,
41 dsl_dataset_t **dsp, const void *tag, char **shortnamep)
42 {
43 char buf[ZFS_MAX_DATASET_NAME_LEN];
44 char *hashp;
45
46 if (strlen(fullname) >= ZFS_MAX_DATASET_NAME_LEN)
47 return (SET_ERROR(ENAMETOOLONG));
48 hashp = strchr(fullname, '#');
49 if (hashp == NULL)
50 return (SET_ERROR(EINVAL));
51
52 *shortnamep = hashp + 1;
53 if (zfs_component_namecheck(*shortnamep, NULL, NULL))
54 return (SET_ERROR(EINVAL));
55 (void) strlcpy(buf, fullname, hashp - fullname + 1);
56 return (dsl_dataset_hold(dp, buf, tag, dsp));
57 }
58
59 /*
60 * When reading BOOKMARK_V1 bookmarks, the BOOKMARK_V2 fields are guaranteed
61 * to be zeroed.
62 *
63 * Returns ESRCH if bookmark is not found.
64 * Note, we need to use the ZAP rather than the AVL to look up bookmarks
65 * by name, because only the ZAP honors the casesensitivity setting.
66 */
67 int
dsl_bookmark_lookup_impl(dsl_dataset_t * ds,const char * shortname,zfs_bookmark_phys_t * bmark_phys)68 dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
69 zfs_bookmark_phys_t *bmark_phys)
70 {
71 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
72 uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
73 matchtype_t mt = 0;
74 int err;
75
76 if (bmark_zapobj == 0)
77 return (SET_ERROR(ESRCH));
78
79 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
80 mt = MT_NORMALIZE;
81
82 /*
83 * Zero out the bookmark in case the one stored on disk
84 * is in an older, shorter format.
85 */
86 memset(bmark_phys, 0, sizeof (*bmark_phys));
87
88 err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
89 sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
90 NULL);
91
92 return (err == ENOENT ? SET_ERROR(ESRCH) : err);
93 }
94
95 /*
96 * If later_ds is non-NULL, this will return EXDEV if the specified bookmark
97 * does not represents an earlier point in later_ds's timeline. However,
98 * bmp will still be filled in if we return EXDEV.
99 *
100 * Returns ENOENT if the dataset containing the bookmark does not exist.
101 * Returns ESRCH if the dataset exists but the bookmark was not found in it.
102 */
103 int
dsl_bookmark_lookup(dsl_pool_t * dp,const char * fullname,dsl_dataset_t * later_ds,zfs_bookmark_phys_t * bmp)104 dsl_bookmark_lookup(dsl_pool_t *dp, const char *fullname,
105 dsl_dataset_t *later_ds, zfs_bookmark_phys_t *bmp)
106 {
107 char *shortname;
108 dsl_dataset_t *ds;
109 int error;
110
111 error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname);
112 if (error != 0)
113 return (error);
114
115 error = dsl_bookmark_lookup_impl(ds, shortname, bmp);
116 if (error == 0 && later_ds != NULL) {
117 if (!dsl_dataset_is_before(later_ds, ds, bmp->zbm_creation_txg))
118 error = SET_ERROR(EXDEV);
119 }
120 dsl_dataset_rele(ds, FTAG);
121 return (error);
122 }
123
124 /*
125 * Validates that
126 * - bmark is a full dataset path of a bookmark (bookmark_namecheck)
127 * - source is a full path of a snapshot or bookmark
128 * ({bookmark,snapshot}_namecheck)
129 *
130 * Returns 0 if valid, -1 otherwise.
131 */
132 static int
dsl_bookmark_create_nvl_validate_pair(const char * bmark,const char * source)133 dsl_bookmark_create_nvl_validate_pair(const char *bmark, const char *source)
134 {
135 if (bookmark_namecheck(bmark, NULL, NULL) != 0)
136 return (-1);
137
138 int is_bmark, is_snap;
139 is_bmark = bookmark_namecheck(source, NULL, NULL) == 0;
140 is_snap = snapshot_namecheck(source, NULL, NULL) == 0;
141 if (!is_bmark && !is_snap)
142 return (-1);
143
144 return (0);
145 }
146
147 /*
148 * Check that the given nvlist corresponds to the following schema:
149 * { newbookmark -> source, ... }
150 * where
151 * - each pair passes dsl_bookmark_create_nvl_validate_pair
152 * - all newbookmarks are in the same pool
153 * - all newbookmarks have unique names
154 *
155 * Note that this function is only validates above schema. Callers must ensure
156 * that the bookmarks can be created, e.g. that sources exist.
157 *
158 * Returns 0 if the nvlist adheres to above schema.
159 * Returns -1 if it doesn't.
160 */
161 int
dsl_bookmark_create_nvl_validate(nvlist_t * bmarks)162 dsl_bookmark_create_nvl_validate(nvlist_t *bmarks)
163 {
164 const char *first = NULL;
165 size_t first_len = 0;
166
167 for (nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
168 pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) {
169
170 const char *bmark = nvpair_name(pair);
171 const char *source;
172
173 /* list structure: values must be snapshots XOR bookmarks */
174 if (nvpair_value_string(pair, &source) != 0)
175 return (-1);
176 if (dsl_bookmark_create_nvl_validate_pair(bmark, source) != 0)
177 return (-1);
178
179 /* same pool check */
180 if (first == NULL) {
181 const char *cp = strpbrk(bmark, "/#");
182 if (cp == NULL)
183 return (-1);
184 first = bmark;
185 first_len = cp - bmark;
186 }
187 if (strncmp(first, bmark, first_len) != 0)
188 return (-1);
189 switch (*(bmark + first_len)) {
190 case '/': /* fallthrough */
191 case '#':
192 break;
193 default:
194 return (-1);
195 }
196
197 /* unique newbookmark names; todo: O(n^2) */
198 for (nvpair_t *pair2 = nvlist_next_nvpair(bmarks, pair);
199 pair2 != NULL; pair2 = nvlist_next_nvpair(bmarks, pair2)) {
200 if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
201 return (-1);
202 }
203
204 }
205 return (0);
206 }
207
208 /*
209 * expects that newbm and source have been validated using
210 * dsl_bookmark_create_nvl_validate_pair
211 */
212 static int
dsl_bookmark_create_check_impl(dsl_pool_t * dp,const char * newbm,const char * source)213 dsl_bookmark_create_check_impl(dsl_pool_t *dp,
214 const char *newbm, const char *source)
215 {
216 ASSERT0(dsl_bookmark_create_nvl_validate_pair(newbm, source));
217 /* defer source namecheck until we know it's a snapshot or bookmark */
218
219 int error;
220 dsl_dataset_t *newbm_ds;
221 char *newbm_short;
222 zfs_bookmark_phys_t bmark_phys;
223
224 error = dsl_bookmark_hold_ds(dp, newbm, &newbm_ds, FTAG, &newbm_short);
225 if (error != 0)
226 return (error);
227
228 /* Verify that the new bookmark does not already exist */
229 error = dsl_bookmark_lookup_impl(newbm_ds, newbm_short, &bmark_phys);
230 switch (error) {
231 case ESRCH:
232 /* happy path: new bmark doesn't exist, proceed after switch */
233 break;
234 case 0:
235 error = SET_ERROR(EEXIST);
236 goto eholdnewbmds;
237 default:
238 /* dsl_bookmark_lookup_impl already did SET_ERROR */
239 goto eholdnewbmds;
240 }
241
242 /* error is retval of the following if-cascade */
243 if (strchr(source, '@') != NULL) {
244 dsl_dataset_t *source_snap_ds;
245 ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
246 error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
247 if (error == 0) {
248 VERIFY(source_snap_ds->ds_is_snapshot);
249 /*
250 * Verify that source snapshot is an earlier point in
251 * newbm_ds's timeline (source may be newbm_ds's origin)
252 */
253 if (!dsl_dataset_is_before(newbm_ds, source_snap_ds, 0))
254 error = SET_ERROR(
255 ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
256 dsl_dataset_rele(source_snap_ds, FTAG);
257 }
258 } else if (strchr(source, '#') != NULL) {
259 zfs_bookmark_phys_t source_phys;
260 ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
261 /*
262 * Source must exists and be an earlier point in newbm_ds's
263 * timeline (newbm_ds's origin may be a snap of source's ds)
264 */
265 error = dsl_bookmark_lookup(dp, source, newbm_ds, &source_phys);
266 switch (error) {
267 case 0:
268 break; /* happy path */
269 case EXDEV:
270 error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
271 break;
272 default:
273 /* dsl_bookmark_lookup already did SET_ERROR */
274 break;
275 }
276 } else {
277 /*
278 * dsl_bookmark_create_nvl_validate validates that source is
279 * either snapshot or bookmark
280 */
281 panic("unreachable code: %s", source);
282 }
283
284 eholdnewbmds:
285 dsl_dataset_rele(newbm_ds, FTAG);
286 return (error);
287 }
288
289 int
dsl_bookmark_create_check(void * arg,dmu_tx_t * tx)290 dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
291 {
292 dsl_bookmark_create_arg_t *dbca = arg;
293 int rv = 0;
294 int schema_err = 0;
295 ASSERT3P(dbca, !=, NULL);
296 ASSERT3P(dbca->dbca_bmarks, !=, NULL);
297 /* dbca->dbca_errors is allowed to be NULL */
298
299 dsl_pool_t *dp = dmu_tx_pool(tx);
300
301 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
302 return (SET_ERROR(ENOTSUP));
303
304 if (dsl_bookmark_create_nvl_validate(dbca->dbca_bmarks) != 0)
305 rv = schema_err = SET_ERROR(EINVAL);
306
307 for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
308 pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
309 const char *new = nvpair_name(pair);
310
311 int error = schema_err;
312 if (error == 0) {
313 const char *source = fnvpair_value_string(pair);
314 error = dsl_bookmark_create_check_impl(dp, new, source);
315 if (error != 0)
316 error = SET_ERROR(error);
317 }
318
319 if (error != 0) {
320 rv = error;
321 if (dbca->dbca_errors != NULL)
322 fnvlist_add_int32(dbca->dbca_errors,
323 new, error);
324 }
325 }
326
327 return (rv);
328 }
329
330 static dsl_bookmark_node_t *
dsl_bookmark_node_alloc(char * shortname)331 dsl_bookmark_node_alloc(char *shortname)
332 {
333 dsl_bookmark_node_t *dbn = kmem_alloc(sizeof (*dbn), KM_SLEEP);
334 dbn->dbn_name = spa_strdup(shortname);
335 dbn->dbn_dirty = B_FALSE;
336 mutex_init(&dbn->dbn_lock, NULL, MUTEX_DEFAULT, NULL);
337 return (dbn);
338 }
339
340 /*
341 * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
342 */
343 static void
dsl_bookmark_set_phys(zfs_bookmark_phys_t * zbm,dsl_dataset_t * snap)344 dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
345 {
346 spa_t *spa = dsl_dataset_get_spa(snap);
347 objset_t *mos = spa_get_dsl(spa)->dp_meta_objset;
348 dsl_dataset_phys_t *dsp = dsl_dataset_phys(snap);
349
350 memset(zbm, 0, sizeof (zfs_bookmark_phys_t));
351 zbm->zbm_guid = dsp->ds_guid;
352 zbm->zbm_creation_txg = dsp->ds_creation_txg;
353 zbm->zbm_creation_time = dsp->ds_creation_time;
354 zbm->zbm_redaction_obj = 0;
355
356 /*
357 * If the dataset is encrypted create a larger bookmark to
358 * accommodate the IVset guid. The IVset guid was added
359 * after the encryption feature to prevent a problem with
360 * raw sends. If we encounter an encrypted dataset without
361 * an IVset guid we fall back to a normal bookmark.
362 */
363 if (snap->ds_dir->dd_crypto_obj != 0 &&
364 spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
365 (void) zap_lookup(mos, snap->ds_object,
366 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
367 &zbm->zbm_ivset_guid);
368 }
369
370 if (spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_WRITTEN)) {
371 zbm->zbm_flags = ZBM_FLAG_SNAPSHOT_EXISTS | ZBM_FLAG_HAS_FBN;
372 zbm->zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
373 zbm->zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
374 zbm->zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
375
376 dsl_dataset_t *nextds;
377 VERIFY0(dsl_dataset_hold_obj(snap->ds_dir->dd_pool,
378 dsp->ds_next_snap_obj, FTAG, &nextds));
379 dsl_deadlist_space(&nextds->ds_deadlist,
380 &zbm->zbm_referenced_freed_before_next_snap,
381 &zbm->zbm_compressed_freed_before_next_snap,
382 &zbm->zbm_uncompressed_freed_before_next_snap);
383 dsl_dataset_rele(nextds, FTAG);
384 }
385 }
386
387 /*
388 * Add dsl_bookmark_node_t `dbn` to the given dataset and increment appropriate
389 * SPA feature counters.
390 */
391 void
dsl_bookmark_node_add(dsl_dataset_t * hds,dsl_bookmark_node_t * dbn,dmu_tx_t * tx)392 dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
393 dmu_tx_t *tx)
394 {
395 dsl_pool_t *dp = dmu_tx_pool(tx);
396 objset_t *mos = dp->dp_meta_objset;
397
398 if (hds->ds_bookmarks_obj == 0) {
399 hds->ds_bookmarks_obj = zap_create_norm(mos,
400 U8_TEXTPREP_TOUPPER, DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0,
401 tx);
402 spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
403
404 dsl_dataset_zapify(hds, tx);
405 VERIFY0(zap_add(mos, hds->ds_object,
406 DS_FIELD_BOOKMARK_NAMES,
407 sizeof (hds->ds_bookmarks_obj), 1,
408 &hds->ds_bookmarks_obj, tx));
409 }
410
411 avl_add(&hds->ds_bookmarks, dbn);
412
413 /*
414 * To maintain backwards compatibility with software that doesn't
415 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
416 * possible bookmark size.
417 */
418 uint64_t bookmark_phys_size = BOOKMARK_PHYS_SIZE_V1;
419 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2) &&
420 (dbn->dbn_phys.zbm_ivset_guid != 0 || dbn->dbn_phys.zbm_flags &
421 ZBM_FLAG_HAS_FBN || dbn->dbn_phys.zbm_redaction_obj != 0)) {
422 bookmark_phys_size = BOOKMARK_PHYS_SIZE_V2;
423 spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
424 }
425
426 zfs_bookmark_phys_t zero_phys = { 0 };
427 ASSERT0(memcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
428 &zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
429
430 VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
431 sizeof (uint64_t), bookmark_phys_size / sizeof (uint64_t),
432 &dbn->dbn_phys, tx));
433 }
434
435 /*
436 * If redaction_list is non-null, we create a redacted bookmark and redaction
437 * list, and store the object number of the redaction list in redact_obj.
438 */
439 static void
dsl_bookmark_create_sync_impl_snap(const char * bookmark,const char * snapshot,dmu_tx_t * tx,uint64_t num_redact_snaps,uint64_t * redact_snaps,const void * tag,redaction_list_t ** redaction_list)440 dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
441 dmu_tx_t *tx, uint64_t num_redact_snaps, uint64_t *redact_snaps,
442 const void *tag, redaction_list_t **redaction_list)
443 {
444 dsl_pool_t *dp = dmu_tx_pool(tx);
445 objset_t *mos = dp->dp_meta_objset;
446 dsl_dataset_t *snapds, *bmark_fs;
447 char *shortname;
448 boolean_t bookmark_redacted;
449 uint64_t *dsredactsnaps;
450 uint64_t dsnumsnaps;
451
452 VERIFY0(dsl_dataset_hold(dp, snapshot, FTAG, &snapds));
453 VERIFY0(dsl_bookmark_hold_ds(dp, bookmark, &bmark_fs, FTAG,
454 &shortname));
455
456 dsl_bookmark_node_t *dbn = dsl_bookmark_node_alloc(shortname);
457 dsl_bookmark_set_phys(&dbn->dbn_phys, snapds);
458
459 bookmark_redacted = dsl_dataset_get_uint64_array_feature(snapds,
460 SPA_FEATURE_REDACTED_DATASETS, &dsnumsnaps, &dsredactsnaps);
461 if (redaction_list != NULL || bookmark_redacted) {
462 redaction_list_t *local_rl;
463 boolean_t spill = B_FALSE;
464 if (bookmark_redacted) {
465 redact_snaps = dsredactsnaps;
466 num_redact_snaps = dsnumsnaps;
467 }
468 int bonuslen = sizeof (redaction_list_phys_t) +
469 num_redact_snaps * sizeof (uint64_t);
470 if (bonuslen > dmu_bonus_max())
471 spill = B_TRUE;
472 dbn->dbn_phys.zbm_redaction_obj = dmu_object_alloc(mos,
473 DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
474 DMU_OTN_UINT64_METADATA, spill ? 0 : bonuslen, tx);
475 spa_feature_incr(dp->dp_spa,
476 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
477 if (spill) {
478 spa_feature_incr(dp->dp_spa,
479 SPA_FEATURE_REDACTION_LIST_SPILL, tx);
480 }
481
482 VERIFY0(dsl_redaction_list_hold_obj(dp,
483 dbn->dbn_phys.zbm_redaction_obj, tag, &local_rl));
484 dsl_redaction_list_long_hold(dp, local_rl, tag);
485
486 if (!spill) {
487 ASSERT3U(local_rl->rl_bonus->db_size, >=, bonuslen);
488 dmu_buf_will_dirty(local_rl->rl_bonus, tx);
489 } else {
490 dmu_buf_t *db;
491 VERIFY0(dmu_spill_hold_by_bonus(local_rl->rl_bonus,
492 DB_RF_MUST_SUCCEED, FTAG, &db));
493 dmu_buf_will_fill(db, tx, B_FALSE);
494 VERIFY0(dbuf_spill_set_blksz(db, P2ROUNDUP(bonuslen,
495 SPA_MINBLOCKSIZE), tx));
496 local_rl->rl_phys = db->db_data;
497 local_rl->rl_dbuf = db;
498 }
499 memcpy(local_rl->rl_phys->rlp_snaps, redact_snaps,
500 sizeof (uint64_t) * num_redact_snaps);
501 local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
502 if (bookmark_redacted) {
503 ASSERT3P(redaction_list, ==, NULL);
504 local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
505 local_rl->rl_phys->rlp_last_object = UINT64_MAX;
506 dsl_redaction_list_long_rele(local_rl, tag);
507 dsl_redaction_list_rele(local_rl, tag);
508 } else {
509 *redaction_list = local_rl;
510 }
511 }
512
513 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
514 spa_feature_incr(dp->dp_spa,
515 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
516 }
517
518 dsl_bookmark_node_add(bmark_fs, dbn, tx);
519
520 spa_history_log_internal_ds(bmark_fs, "bookmark", tx,
521 "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
522 shortname, (longlong_t)dbn->dbn_phys.zbm_creation_txg,
523 (longlong_t)snapds->ds_object,
524 (longlong_t)dbn->dbn_phys.zbm_redaction_obj);
525
526 dsl_dataset_rele(bmark_fs, FTAG);
527 dsl_dataset_rele(snapds, FTAG);
528 }
529
530
531 static void
dsl_bookmark_create_sync_impl_book(const char * new_name,const char * source_name,dmu_tx_t * tx)532 dsl_bookmark_create_sync_impl_book(
533 const char *new_name, const char *source_name, dmu_tx_t *tx)
534 {
535 dsl_pool_t *dp = dmu_tx_pool(tx);
536 dsl_dataset_t *bmark_fs_source, *bmark_fs_new;
537 char *source_shortname, *new_shortname;
538 zfs_bookmark_phys_t source_phys;
539
540 VERIFY0(dsl_bookmark_hold_ds(dp, source_name, &bmark_fs_source, FTAG,
541 &source_shortname));
542 VERIFY0(dsl_bookmark_hold_ds(dp, new_name, &bmark_fs_new, FTAG,
543 &new_shortname));
544
545 /*
546 * create a copy of the source bookmark by copying most of its members
547 *
548 * Caveat: bookmarking a redaction bookmark yields a normal bookmark
549 * -----------------------------------------------------------------
550 * Reasoning:
551 * - The zbm_redaction_obj would be referred to by both source and new
552 * bookmark, but would be destroyed once either source or new is
553 * destroyed, resulting in use-after-free of the referred object.
554 * - User expectation when issuing the `zfs bookmark` command is that
555 * a normal bookmark of the source is created
556 *
557 * Design Alternatives For Full Redaction Bookmark Copying:
558 * - reference-count the redaction object => would require on-disk
559 * format change for existing redaction objects
560 * - Copy the redaction object => cannot be done in syncing context
561 * because the redaction object might be too large
562 */
563
564 VERIFY0(dsl_bookmark_lookup_impl(bmark_fs_source, source_shortname,
565 &source_phys));
566 dsl_bookmark_node_t *new_dbn = dsl_bookmark_node_alloc(new_shortname);
567
568 memcpy(&new_dbn->dbn_phys, &source_phys, sizeof (source_phys));
569 new_dbn->dbn_phys.zbm_redaction_obj = 0;
570
571 /* update feature counters */
572 if (new_dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
573 spa_feature_incr(dp->dp_spa,
574 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
575 }
576 /* no need for redaction bookmark counter; nulled zbm_redaction_obj */
577 /* dsl_bookmark_node_add bumps bookmarks and v2-bookmarks counter */
578
579 /*
580 * write new bookmark
581 *
582 * Note that dsl_bookmark_lookup_impl guarantees that, if source is a
583 * v1 bookmark, the v2-only fields are zeroed.
584 * And dsl_bookmark_node_add writes back a v1-sized bookmark if
585 * v2 bookmarks are disabled and/or v2-only fields are zeroed.
586 * => bookmark copying works on pre-bookmark-v2 pools
587 */
588 dsl_bookmark_node_add(bmark_fs_new, new_dbn, tx);
589
590 spa_history_log_internal_ds(bmark_fs_source, "bookmark", tx,
591 "name=%s creation_txg=%llu source_guid=%llu",
592 new_shortname, (longlong_t)new_dbn->dbn_phys.zbm_creation_txg,
593 (longlong_t)source_phys.zbm_guid);
594
595 dsl_dataset_rele(bmark_fs_source, FTAG);
596 dsl_dataset_rele(bmark_fs_new, FTAG);
597 }
598
599 void
dsl_bookmark_create_sync(void * arg,dmu_tx_t * tx)600 dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
601 {
602 dsl_bookmark_create_arg_t *dbca = arg;
603
604 ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx)->dp_spa,
605 SPA_FEATURE_BOOKMARKS));
606
607 for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
608 pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
609
610 const char *new = nvpair_name(pair);
611 const char *source = fnvpair_value_string(pair);
612
613 if (strchr(source, '@') != NULL) {
614 dsl_bookmark_create_sync_impl_snap(new, source, tx,
615 0, NULL, NULL, NULL);
616 } else if (strchr(source, '#') != NULL) {
617 dsl_bookmark_create_sync_impl_book(new, source, tx);
618 } else {
619 panic("unreachable code");
620 }
621
622 }
623 }
624
625 /*
626 * The bookmarks must all be in the same pool.
627 */
628 int
dsl_bookmark_create(nvlist_t * bmarks,nvlist_t * errors)629 dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
630 {
631 nvpair_t *pair;
632 dsl_bookmark_create_arg_t dbca;
633
634 pair = nvlist_next_nvpair(bmarks, NULL);
635 if (pair == NULL)
636 return (0);
637
638 dbca.dbca_bmarks = bmarks;
639 dbca.dbca_errors = errors;
640
641 return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
642 dsl_bookmark_create_sync, &dbca,
643 fnvlist_num_pairs(bmarks), ZFS_SPACE_CHECK_NORMAL));
644 }
645
646 static int
dsl_bookmark_create_redacted_check(void * arg,dmu_tx_t * tx)647 dsl_bookmark_create_redacted_check(void *arg, dmu_tx_t *tx)
648 {
649 dsl_bookmark_create_redacted_arg_t *dbcra = arg;
650 dsl_pool_t *dp = dmu_tx_pool(tx);
651 int rv = 0;
652
653 if (!spa_feature_is_enabled(dp->dp_spa,
654 SPA_FEATURE_REDACTION_BOOKMARKS))
655 return (SET_ERROR(ENOTSUP));
656 /*
657 * If the list of redact snaps will not fit in the bonus buffer (or
658 * spill block, with the REDACTION_LIST_SPILL feature) with the
659 * furthest reached object and offset, fail.
660 */
661 uint64_t snaplimit = ((spa_feature_is_enabled(dp->dp_spa,
662 SPA_FEATURE_REDACTION_LIST_SPILL) ? spa_maxblocksize(dp->dp_spa) :
663 dmu_bonus_max()) -
664 sizeof (redaction_list_phys_t)) / sizeof (uint64_t);
665 if (dbcra->dbcra_numsnaps > snaplimit)
666 return (SET_ERROR(E2BIG));
667
668 if (dsl_bookmark_create_nvl_validate_pair(
669 dbcra->dbcra_bmark, dbcra->dbcra_snap) != 0)
670 return (SET_ERROR(EINVAL));
671
672 rv = dsl_bookmark_create_check_impl(dp,
673 dbcra->dbcra_bmark, dbcra->dbcra_snap);
674 return (rv);
675 }
676
677 static void
dsl_bookmark_create_redacted_sync(void * arg,dmu_tx_t * tx)678 dsl_bookmark_create_redacted_sync(void *arg, dmu_tx_t *tx)
679 {
680 dsl_bookmark_create_redacted_arg_t *dbcra = arg;
681 dsl_bookmark_create_sync_impl_snap(dbcra->dbcra_bmark,
682 dbcra->dbcra_snap, tx, dbcra->dbcra_numsnaps, dbcra->dbcra_snaps,
683 dbcra->dbcra_tag, dbcra->dbcra_rl);
684 }
685
686 int
dsl_bookmark_create_redacted(const char * bookmark,const char * snapshot,uint64_t numsnaps,uint64_t * snapguids,const void * tag,redaction_list_t ** rl)687 dsl_bookmark_create_redacted(const char *bookmark, const char *snapshot,
688 uint64_t numsnaps, uint64_t *snapguids, const void *tag,
689 redaction_list_t **rl)
690 {
691 dsl_bookmark_create_redacted_arg_t dbcra;
692
693 dbcra.dbcra_bmark = bookmark;
694 dbcra.dbcra_snap = snapshot;
695 dbcra.dbcra_rl = rl;
696 dbcra.dbcra_numsnaps = numsnaps;
697 dbcra.dbcra_snaps = snapguids;
698 dbcra.dbcra_tag = tag;
699
700 return (dsl_sync_task(bookmark, dsl_bookmark_create_redacted_check,
701 dsl_bookmark_create_redacted_sync, &dbcra, 5,
702 ZFS_SPACE_CHECK_NORMAL));
703 }
704
705 /*
706 * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
707 * If 'props' is NULL, retrieves all properties.
708 */
709 static void
dsl_bookmark_fetch_props(dsl_pool_t * dp,zfs_bookmark_phys_t * bmark_phys,nvlist_t * props,nvlist_t * out_props)710 dsl_bookmark_fetch_props(dsl_pool_t *dp, zfs_bookmark_phys_t *bmark_phys,
711 nvlist_t *props, nvlist_t *out_props)
712 {
713 ASSERT3P(dp, !=, NULL);
714 ASSERT3P(bmark_phys, !=, NULL);
715 ASSERT3P(out_props, !=, NULL);
716 ASSERT(RRW_LOCK_HELD(&dp->dp_config_rwlock));
717
718 if (props == NULL || nvlist_exists(props,
719 zfs_prop_to_name(ZFS_PROP_GUID))) {
720 dsl_prop_nvlist_add_uint64(out_props,
721 ZFS_PROP_GUID, bmark_phys->zbm_guid);
722 }
723 if (props == NULL || nvlist_exists(props,
724 zfs_prop_to_name(ZFS_PROP_CREATETXG))) {
725 dsl_prop_nvlist_add_uint64(out_props,
726 ZFS_PROP_CREATETXG, bmark_phys->zbm_creation_txg);
727 }
728 if (props == NULL || nvlist_exists(props,
729 zfs_prop_to_name(ZFS_PROP_CREATION))) {
730 dsl_prop_nvlist_add_uint64(out_props,
731 ZFS_PROP_CREATION, bmark_phys->zbm_creation_time);
732 }
733 if (props == NULL || nvlist_exists(props,
734 zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) {
735 dsl_prop_nvlist_add_uint64(out_props,
736 ZFS_PROP_IVSET_GUID, bmark_phys->zbm_ivset_guid);
737 }
738 if (bmark_phys->zbm_flags & ZBM_FLAG_HAS_FBN) {
739 if (props == NULL || nvlist_exists(props,
740 zfs_prop_to_name(ZFS_PROP_REFERENCED))) {
741 dsl_prop_nvlist_add_uint64(out_props,
742 ZFS_PROP_REFERENCED,
743 bmark_phys->zbm_referenced_bytes_refd);
744 }
745 if (props == NULL || nvlist_exists(props,
746 zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED))) {
747 dsl_prop_nvlist_add_uint64(out_props,
748 ZFS_PROP_LOGICALREFERENCED,
749 bmark_phys->zbm_uncompressed_bytes_refd);
750 }
751 if (props == NULL || nvlist_exists(props,
752 zfs_prop_to_name(ZFS_PROP_REFRATIO))) {
753 uint64_t ratio =
754 bmark_phys->zbm_compressed_bytes_refd == 0 ? 100 :
755 bmark_phys->zbm_uncompressed_bytes_refd * 100 /
756 bmark_phys->zbm_compressed_bytes_refd;
757 dsl_prop_nvlist_add_uint64(out_props,
758 ZFS_PROP_REFRATIO, ratio);
759 }
760 }
761
762 if ((props == NULL || nvlist_exists(props, "redact_snaps") ||
763 nvlist_exists(props, "redact_complete")) &&
764 bmark_phys->zbm_redaction_obj != 0) {
765 redaction_list_t *rl;
766 int err = dsl_redaction_list_hold_obj(dp,
767 bmark_phys->zbm_redaction_obj, FTAG, &rl);
768 if (err == 0) {
769 if (nvlist_exists(props, "redact_snaps")) {
770 nvlist_t *nvl;
771 nvl = fnvlist_alloc();
772 fnvlist_add_uint64_array(nvl, ZPROP_VALUE,
773 rl->rl_phys->rlp_snaps,
774 rl->rl_phys->rlp_num_snaps);
775 fnvlist_add_nvlist(out_props, "redact_snaps",
776 nvl);
777 nvlist_free(nvl);
778 }
779 if (nvlist_exists(props, "redact_complete")) {
780 nvlist_t *nvl;
781 nvl = fnvlist_alloc();
782 fnvlist_add_boolean_value(nvl, ZPROP_VALUE,
783 rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
784 rl->rl_phys->rlp_last_object == UINT64_MAX);
785 fnvlist_add_nvlist(out_props, "redact_complete",
786 nvl);
787 nvlist_free(nvl);
788 }
789 dsl_redaction_list_rele(rl, FTAG);
790 }
791 }
792 }
793
794 int
dsl_get_bookmarks_impl(dsl_dataset_t * ds,nvlist_t * props,nvlist_t * outnvl)795 dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
796 {
797 dsl_pool_t *dp = ds->ds_dir->dd_pool;
798
799 ASSERT(dsl_pool_config_held(dp));
800
801 if (dsl_dataset_is_snapshot(ds))
802 return (SET_ERROR(EINVAL));
803
804 for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
805 dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
806 nvlist_t *out_props = fnvlist_alloc();
807
808 dsl_bookmark_fetch_props(dp, &dbn->dbn_phys, props, out_props);
809
810 fnvlist_add_nvlist(outnvl, dbn->dbn_name, out_props);
811 fnvlist_free(out_props);
812 }
813 return (0);
814 }
815
816 /*
817 * Comparison func for ds_bookmarks AVL tree. We sort the bookmarks by
818 * their TXG, then by their FBN-ness. The "FBN-ness" component ensures
819 * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
820 * dsl_bookmark_destroy_sync_impl() depends on. Note that there may be
821 * multiple bookmarks at the same TXG (with the same FBN-ness). In this
822 * case we differentiate them by an arbitrary metric (in this case,
823 * their names).
824 */
825 static int
dsl_bookmark_compare(const void * l,const void * r)826 dsl_bookmark_compare(const void *l, const void *r)
827 {
828 const dsl_bookmark_node_t *ldbn = l;
829 const dsl_bookmark_node_t *rdbn = r;
830
831 int64_t cmp = TREE_CMP(ldbn->dbn_phys.zbm_creation_txg,
832 rdbn->dbn_phys.zbm_creation_txg);
833 if (likely(cmp))
834 return (cmp);
835 cmp = TREE_CMP((ldbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN),
836 (rdbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
837 if (likely(cmp))
838 return (cmp);
839 cmp = strcmp(ldbn->dbn_name, rdbn->dbn_name);
840 return (TREE_ISIGN(cmp));
841 }
842
843 /*
844 * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
845 */
846 int
dsl_bookmark_init_ds(dsl_dataset_t * ds)847 dsl_bookmark_init_ds(dsl_dataset_t *ds)
848 {
849 dsl_pool_t *dp = ds->ds_dir->dd_pool;
850 objset_t *mos = dp->dp_meta_objset;
851
852 ASSERT(!ds->ds_is_snapshot);
853
854 avl_create(&ds->ds_bookmarks, dsl_bookmark_compare,
855 sizeof (dsl_bookmark_node_t),
856 offsetof(dsl_bookmark_node_t, dbn_node));
857
858 if (!dsl_dataset_is_zapified(ds))
859 return (0);
860
861 int zaperr = zap_lookup(mos, ds->ds_object, DS_FIELD_BOOKMARK_NAMES,
862 sizeof (ds->ds_bookmarks_obj), 1, &ds->ds_bookmarks_obj);
863 if (zaperr == ENOENT)
864 return (0);
865 if (zaperr != 0)
866 return (zaperr);
867
868 if (ds->ds_bookmarks_obj == 0)
869 return (0);
870
871 int err = 0;
872 zap_cursor_t zc;
873 zap_attribute_t *attr;
874
875 attr = zap_attribute_alloc();
876 for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
877 (err = zap_cursor_retrieve(&zc, attr)) == 0;
878 zap_cursor_advance(&zc)) {
879 dsl_bookmark_node_t *dbn =
880 dsl_bookmark_node_alloc(attr->za_name);
881
882 err = dsl_bookmark_lookup_impl(ds,
883 dbn->dbn_name, &dbn->dbn_phys);
884 ASSERT3U(err, !=, ENOENT);
885 if (err != 0) {
886 kmem_free(dbn, sizeof (*dbn));
887 break;
888 }
889 avl_add(&ds->ds_bookmarks, dbn);
890 }
891 zap_cursor_fini(&zc);
892 zap_attribute_free(attr);
893 if (err == ENOENT)
894 err = 0;
895 return (err);
896 }
897
898 void
dsl_bookmark_fini_ds(dsl_dataset_t * ds)899 dsl_bookmark_fini_ds(dsl_dataset_t *ds)
900 {
901 void *cookie = NULL;
902 dsl_bookmark_node_t *dbn;
903
904 if (ds->ds_is_snapshot)
905 return;
906
907 while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) != NULL) {
908 spa_strfree(dbn->dbn_name);
909 mutex_destroy(&dbn->dbn_lock);
910 kmem_free(dbn, sizeof (*dbn));
911 }
912 avl_destroy(&ds->ds_bookmarks);
913 }
914
915 /*
916 * Retrieve the bookmarks that exist in the specified dataset, and the
917 * requested properties of each bookmark.
918 *
919 * The "props" nvlist specifies which properties are requested.
920 * See lzc_get_bookmarks() for the list of valid properties.
921 */
922 int
dsl_get_bookmarks(const char * dsname,nvlist_t * props,nvlist_t * outnvl)923 dsl_get_bookmarks(const char *dsname, nvlist_t *props, nvlist_t *outnvl)
924 {
925 dsl_pool_t *dp;
926 dsl_dataset_t *ds;
927 int err;
928
929 err = dsl_pool_hold(dsname, FTAG, &dp);
930 if (err != 0)
931 return (err);
932 err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
933 if (err != 0) {
934 dsl_pool_rele(dp, FTAG);
935 return (err);
936 }
937
938 err = dsl_get_bookmarks_impl(ds, props, outnvl);
939
940 dsl_dataset_rele(ds, FTAG);
941 dsl_pool_rele(dp, FTAG);
942 return (err);
943 }
944
945 /*
946 * Retrieve all properties for a single bookmark in the given dataset.
947 */
948 int
dsl_get_bookmark_props(const char * dsname,const char * bmname,nvlist_t * props)949 dsl_get_bookmark_props(const char *dsname, const char *bmname, nvlist_t *props)
950 {
951 dsl_pool_t *dp;
952 dsl_dataset_t *ds;
953 zfs_bookmark_phys_t bmark_phys = { 0 };
954 int err;
955
956 err = dsl_pool_hold(dsname, FTAG, &dp);
957 if (err != 0)
958 return (err);
959 err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
960 if (err != 0) {
961 dsl_pool_rele(dp, FTAG);
962 return (err);
963 }
964
965 err = dsl_bookmark_lookup_impl(ds, bmname, &bmark_phys);
966 if (err != 0)
967 goto out;
968
969 dsl_bookmark_fetch_props(dp, &bmark_phys, NULL, props);
970 out:
971 dsl_dataset_rele(ds, FTAG);
972 dsl_pool_rele(dp, FTAG);
973 return (err);
974 }
975
976 typedef struct dsl_bookmark_destroy_arg {
977 nvlist_t *dbda_bmarks;
978 nvlist_t *dbda_success;
979 nvlist_t *dbda_errors;
980 } dsl_bookmark_destroy_arg_t;
981
982 static void
dsl_bookmark_destroy_sync_impl(dsl_dataset_t * ds,const char * name,dmu_tx_t * tx)983 dsl_bookmark_destroy_sync_impl(dsl_dataset_t *ds, const char *name,
984 dmu_tx_t *tx)
985 {
986 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
987 uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
988 matchtype_t mt = 0;
989 uint64_t int_size, num_ints;
990 /*
991 * 'search' must be zeroed so that dbn_flags (which is used in
992 * dsl_bookmark_compare()) will be zeroed even if the on-disk
993 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
994 */
995 dsl_bookmark_node_t search = { 0 };
996 char realname[ZFS_MAX_DATASET_NAME_LEN];
997
998 /*
999 * Find the real name of this bookmark, which may be different
1000 * from the given name if the dataset is case-insensitive. Then
1001 * use the real name to find the node in the ds_bookmarks AVL tree.
1002 */
1003
1004 if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
1005 mt = MT_NORMALIZE;
1006
1007 VERIFY0(zap_length(mos, bmark_zapobj, name, &int_size, &num_ints));
1008
1009 ASSERT3U(int_size, ==, sizeof (uint64_t));
1010
1011 if (num_ints * int_size > BOOKMARK_PHYS_SIZE_V1) {
1012 spa_feature_decr(dmu_objset_spa(mos),
1013 SPA_FEATURE_BOOKMARK_V2, tx);
1014 }
1015 VERIFY0(zap_lookup_norm(mos, bmark_zapobj, name, sizeof (uint64_t),
1016 num_ints, &search.dbn_phys, mt, realname, sizeof (realname), NULL));
1017
1018 search.dbn_name = realname;
1019 dsl_bookmark_node_t *dbn = avl_find(&ds->ds_bookmarks, &search, NULL);
1020 ASSERT(dbn != NULL);
1021
1022 if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
1023 /*
1024 * If this bookmark HAS_FBN, and it is before the most
1025 * recent snapshot, then its TXG is a key in the head's
1026 * deadlist (and all clones' heads' deadlists). If this is
1027 * the last thing keeping the key (i.e. there are no more
1028 * bookmarks with HAS_FBN at this TXG, and there is no
1029 * snapshot at this TXG), then remove the key.
1030 *
1031 * Note that this algorithm depends on ds_bookmarks being
1032 * sorted such that all bookmarks at the same TXG with
1033 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
1034 * at the same TXG in between them). If this were not
1035 * the case, we would need to examine *all* bookmarks
1036 * at this TXG, rather than just the adjacent ones.
1037 */
1038
1039 dsl_bookmark_node_t *dbn_prev =
1040 AVL_PREV(&ds->ds_bookmarks, dbn);
1041 dsl_bookmark_node_t *dbn_next =
1042 AVL_NEXT(&ds->ds_bookmarks, dbn);
1043
1044 boolean_t more_bookmarks_at_this_txg =
1045 (dbn_prev != NULL && dbn_prev->dbn_phys.zbm_creation_txg ==
1046 dbn->dbn_phys.zbm_creation_txg &&
1047 (dbn_prev->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) ||
1048 (dbn_next != NULL && dbn_next->dbn_phys.zbm_creation_txg ==
1049 dbn->dbn_phys.zbm_creation_txg &&
1050 (dbn_next->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
1051
1052 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS) &&
1053 !more_bookmarks_at_this_txg &&
1054 dbn->dbn_phys.zbm_creation_txg <
1055 dsl_dataset_phys(ds)->ds_prev_snap_txg) {
1056 dsl_dir_remove_clones_key(ds->ds_dir,
1057 dbn->dbn_phys.zbm_creation_txg, tx);
1058 dsl_deadlist_remove_key(&ds->ds_deadlist,
1059 dbn->dbn_phys.zbm_creation_txg, tx);
1060 }
1061
1062 spa_feature_decr(dmu_objset_spa(mos),
1063 SPA_FEATURE_BOOKMARK_WRITTEN, tx);
1064 }
1065
1066 if (dbn->dbn_phys.zbm_redaction_obj != 0) {
1067 dnode_t *rl;
1068 VERIFY0(dnode_hold(mos,
1069 dbn->dbn_phys.zbm_redaction_obj, FTAG, &rl));
1070 if (rl->dn_have_spill) {
1071 spa_feature_decr(dmu_objset_spa(mos),
1072 SPA_FEATURE_REDACTION_LIST_SPILL, tx);
1073 }
1074 dnode_rele(rl, FTAG);
1075 VERIFY0(dmu_object_free(mos,
1076 dbn->dbn_phys.zbm_redaction_obj, tx));
1077 spa_feature_decr(dmu_objset_spa(mos),
1078 SPA_FEATURE_REDACTION_BOOKMARKS, tx);
1079 }
1080
1081 avl_remove(&ds->ds_bookmarks, dbn);
1082 spa_strfree(dbn->dbn_name);
1083 mutex_destroy(&dbn->dbn_lock);
1084 kmem_free(dbn, sizeof (*dbn));
1085
1086 VERIFY0(zap_remove_norm(mos, bmark_zapobj, name, mt, tx));
1087 }
1088
1089 static int
dsl_bookmark_destroy_check(void * arg,dmu_tx_t * tx)1090 dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
1091 {
1092 dsl_bookmark_destroy_arg_t *dbda = arg;
1093 dsl_pool_t *dp = dmu_tx_pool(tx);
1094 int rv = 0;
1095
1096 ASSERT(nvlist_empty(dbda->dbda_success));
1097 ASSERT(nvlist_empty(dbda->dbda_errors));
1098
1099 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
1100 return (0);
1101
1102 for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
1103 pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
1104 const char *fullname = nvpair_name(pair);
1105 dsl_dataset_t *ds;
1106 zfs_bookmark_phys_t bm;
1107 int error;
1108 char *shortname;
1109
1110 error = dsl_bookmark_hold_ds(dp, fullname, &ds,
1111 FTAG, &shortname);
1112 if (error == ENOENT) {
1113 /* ignore it; the bookmark is "already destroyed" */
1114 continue;
1115 }
1116 if (error == 0) {
1117 error = dsl_bookmark_lookup_impl(ds, shortname, &bm);
1118 dsl_dataset_rele(ds, FTAG);
1119 if (error == ESRCH) {
1120 /*
1121 * ignore it; the bookmark is
1122 * "already destroyed"
1123 */
1124 continue;
1125 }
1126 if (error == 0 && bm.zbm_redaction_obj != 0) {
1127 redaction_list_t *rl = NULL;
1128 error = dsl_redaction_list_hold_obj(tx->tx_pool,
1129 bm.zbm_redaction_obj, FTAG, &rl);
1130 if (error == ENOENT) {
1131 error = 0;
1132 } else if (error == 0 &&
1133 dsl_redaction_list_long_held(rl)) {
1134 error = SET_ERROR(EBUSY);
1135 }
1136 if (rl != NULL) {
1137 dsl_redaction_list_rele(rl, FTAG);
1138 }
1139 }
1140 }
1141 if (error == 0) {
1142 if (dmu_tx_is_syncing(tx)) {
1143 fnvlist_add_boolean(dbda->dbda_success,
1144 fullname);
1145 }
1146 } else {
1147 fnvlist_add_int32(dbda->dbda_errors, fullname, error);
1148 rv = error;
1149 }
1150 }
1151 return (rv);
1152 }
1153
1154 static void
dsl_bookmark_destroy_sync(void * arg,dmu_tx_t * tx)1155 dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
1156 {
1157 dsl_bookmark_destroy_arg_t *dbda = arg;
1158 dsl_pool_t *dp = dmu_tx_pool(tx);
1159 objset_t *mos = dp->dp_meta_objset;
1160
1161 for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
1162 pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
1163 dsl_dataset_t *ds;
1164 char *shortname;
1165 uint64_t zap_cnt;
1166
1167 VERIFY0(dsl_bookmark_hold_ds(dp, nvpair_name(pair),
1168 &ds, FTAG, &shortname));
1169 dsl_bookmark_destroy_sync_impl(ds, shortname, tx);
1170
1171 /*
1172 * If all of this dataset's bookmarks have been destroyed,
1173 * free the zap object and decrement the feature's use count.
1174 */
1175 VERIFY0(zap_count(mos, ds->ds_bookmarks_obj, &zap_cnt));
1176 if (zap_cnt == 0) {
1177 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1178 VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
1179 ds->ds_bookmarks_obj = 0;
1180 spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
1181 VERIFY0(zap_remove(mos, ds->ds_object,
1182 DS_FIELD_BOOKMARK_NAMES, tx));
1183 }
1184
1185 spa_history_log_internal_ds(ds, "remove bookmark", tx,
1186 "name=%s", shortname);
1187
1188 dsl_dataset_rele(ds, FTAG);
1189 }
1190 }
1191
1192 /*
1193 * The bookmarks must all be in the same pool.
1194 */
1195 int
dsl_bookmark_destroy(nvlist_t * bmarks,nvlist_t * errors)1196 dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
1197 {
1198 int rv;
1199 dsl_bookmark_destroy_arg_t dbda;
1200 nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
1201 if (pair == NULL)
1202 return (0);
1203
1204 dbda.dbda_bmarks = bmarks;
1205 dbda.dbda_errors = errors;
1206 dbda.dbda_success = fnvlist_alloc();
1207
1208 rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
1209 dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks),
1210 ZFS_SPACE_CHECK_RESERVED);
1211 fnvlist_free(dbda.dbda_success);
1212 return (rv);
1213 }
1214
1215 /* Return B_TRUE if there are any long holds on this dataset. */
1216 boolean_t
dsl_redaction_list_long_held(redaction_list_t * rl)1217 dsl_redaction_list_long_held(redaction_list_t *rl)
1218 {
1219 return (!zfs_refcount_is_zero(&rl->rl_longholds));
1220 }
1221
1222 void
dsl_redaction_list_long_hold(dsl_pool_t * dp,redaction_list_t * rl,const void * tag)1223 dsl_redaction_list_long_hold(dsl_pool_t *dp, redaction_list_t *rl,
1224 const void *tag)
1225 {
1226 ASSERT(dsl_pool_config_held(dp));
1227 (void) zfs_refcount_add(&rl->rl_longholds, tag);
1228 }
1229
1230 void
dsl_redaction_list_long_rele(redaction_list_t * rl,const void * tag)1231 dsl_redaction_list_long_rele(redaction_list_t *rl, const void *tag)
1232 {
1233 (void) zfs_refcount_remove(&rl->rl_longholds, tag);
1234 }
1235
1236 static void
redaction_list_evict_sync(void * rlu)1237 redaction_list_evict_sync(void *rlu)
1238 {
1239 redaction_list_t *rl = rlu;
1240 zfs_refcount_destroy(&rl->rl_longholds);
1241
1242 kmem_free(rl, sizeof (redaction_list_t));
1243 }
1244
1245 void
dsl_redaction_list_rele(redaction_list_t * rl,const void * tag)1246 dsl_redaction_list_rele(redaction_list_t *rl, const void *tag)
1247 {
1248 if (rl->rl_bonus != rl->rl_dbuf)
1249 dmu_buf_rele(rl->rl_dbuf, tag);
1250 dmu_buf_rele(rl->rl_bonus, tag);
1251 }
1252
1253 int
dsl_redaction_list_hold_obj(dsl_pool_t * dp,uint64_t rlobj,const void * tag,redaction_list_t ** rlp)1254 dsl_redaction_list_hold_obj(dsl_pool_t *dp, uint64_t rlobj, const void *tag,
1255 redaction_list_t **rlp)
1256 {
1257 objset_t *mos = dp->dp_meta_objset;
1258 dmu_buf_t *dbuf, *spill_dbuf;
1259 redaction_list_t *rl;
1260 int err;
1261
1262 ASSERT(dsl_pool_config_held(dp));
1263
1264 err = dmu_bonus_hold(mos, rlobj, tag, &dbuf);
1265 if (err != 0)
1266 return (err);
1267
1268 rl = dmu_buf_get_user(dbuf);
1269 if (rl == NULL) {
1270 redaction_list_t *winner = NULL;
1271
1272 rl = kmem_zalloc(sizeof (redaction_list_t), KM_SLEEP);
1273 rl->rl_bonus = dbuf;
1274 if (dmu_spill_hold_existing(dbuf, tag, &spill_dbuf) == 0) {
1275 rl->rl_dbuf = spill_dbuf;
1276 } else {
1277 rl->rl_dbuf = dbuf;
1278 }
1279 rl->rl_object = rlobj;
1280 rl->rl_phys = rl->rl_dbuf->db_data;
1281 rl->rl_mos = dp->dp_meta_objset;
1282 zfs_refcount_create(&rl->rl_longholds);
1283 dmu_buf_init_user(&rl->rl_dbu, redaction_list_evict_sync, NULL,
1284 &rl->rl_bonus);
1285 if ((winner = dmu_buf_set_user_ie(dbuf, &rl->rl_dbu)) != NULL) {
1286 kmem_free(rl, sizeof (*rl));
1287 rl = winner;
1288 }
1289 }
1290 *rlp = rl;
1291 return (0);
1292 }
1293
1294 /*
1295 * Snapshot ds is being destroyed.
1296 *
1297 * Adjust the "freed_before_next" of any bookmarks between this snap
1298 * and the previous snapshot, because their "next snapshot" is changing.
1299 *
1300 * If there are any bookmarks with HAS_FBN at this snapshot, remove
1301 * their HAS_SNAP flag (note: there can be at most one snapshot of
1302 * each filesystem at a given txg), and return B_TRUE. In this case
1303 * the caller can not remove the key in the deadlist at this TXG, because
1304 * the HAS_FBN bookmarks require the key be there.
1305 *
1306 * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1307 * snapshot's TXG. In this case the caller can remove the key in the
1308 * deadlist at this TXG.
1309 */
1310 boolean_t
dsl_bookmark_ds_destroyed(dsl_dataset_t * ds,dmu_tx_t * tx)1311 dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
1312 {
1313 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1314
1315 dsl_dataset_t *head, *next;
1316 VERIFY0(dsl_dataset_hold_obj(dp,
1317 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &head));
1318 VERIFY0(dsl_dataset_hold_obj(dp,
1319 dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &next));
1320
1321 /*
1322 * Find the first bookmark that HAS_FBN at or after the
1323 * previous snapshot.
1324 */
1325 dsl_bookmark_node_t search = { 0 };
1326 avl_index_t idx;
1327 search.dbn_phys.zbm_creation_txg =
1328 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1329 search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1330 /*
1331 * The empty-string name can't be in the AVL, and it compares
1332 * before any entries with this TXG.
1333 */
1334 search.dbn_name = (char *)"";
1335 VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1336 dsl_bookmark_node_t *dbn =
1337 avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1338
1339 /*
1340 * Iterate over all bookmarks that are at or after the previous
1341 * snapshot, and before this (being deleted) snapshot. Adjust
1342 * their FBN based on their new next snapshot.
1343 */
1344 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg <
1345 dsl_dataset_phys(ds)->ds_creation_txg;
1346 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1347 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN))
1348 continue;
1349 /*
1350 * Increase our FBN by the amount of space that was live
1351 * (referenced) at the time of this bookmark (i.e.
1352 * birth <= zbm_creation_txg), and killed between this
1353 * (being deleted) snapshot and the next snapshot (i.e.
1354 * on the next snapshot's deadlist). (Space killed before
1355 * this are already on our FBN.)
1356 */
1357 uint64_t referenced, compressed, uncompressed;
1358 dsl_deadlist_space_range(&next->ds_deadlist,
1359 0, dbn->dbn_phys.zbm_creation_txg,
1360 &referenced, &compressed, &uncompressed);
1361 dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1362 referenced;
1363 dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1364 compressed;
1365 dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1366 uncompressed;
1367 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1368 dbn->dbn_name, sizeof (uint64_t),
1369 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1370 &dbn->dbn_phys, tx));
1371 }
1372 dsl_dataset_rele(next, FTAG);
1373
1374 /*
1375 * There may be several bookmarks at this txg (the TXG of the
1376 * snapshot being deleted). We need to clear the SNAPSHOT_EXISTS
1377 * flag on all of them, and return TRUE if there is at least 1
1378 * bookmark here with HAS_FBN (thus preventing the deadlist
1379 * key from being removed).
1380 */
1381 boolean_t rv = B_FALSE;
1382 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1383 dsl_dataset_phys(ds)->ds_creation_txg;
1384 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1385 if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1386 ASSERT(!(dbn->dbn_phys.zbm_flags &
1387 ZBM_FLAG_SNAPSHOT_EXISTS));
1388 continue;
1389 }
1390 ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS);
1391 dbn->dbn_phys.zbm_flags &= ~ZBM_FLAG_SNAPSHOT_EXISTS;
1392 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1393 dbn->dbn_name, sizeof (uint64_t),
1394 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1395 &dbn->dbn_phys, tx));
1396 rv = B_TRUE;
1397 }
1398 dsl_dataset_rele(head, FTAG);
1399 return (rv);
1400 }
1401
1402 /*
1403 * A snapshot is being created of this (head) dataset.
1404 *
1405 * We don't keep keys in the deadlist for the most recent snapshot, or any
1406 * bookmarks at or after it, because there can't be any blocks on the
1407 * deadlist in this range. Now that the most recent snapshot is after
1408 * all bookmarks, we need to add these keys. Note that the caller always
1409 * adds a key at the previous snapshot, so we only add keys for bookmarks
1410 * after that.
1411 */
1412 void
dsl_bookmark_snapshotted(dsl_dataset_t * ds,dmu_tx_t * tx)1413 dsl_bookmark_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
1414 {
1415 uint64_t last_key_added = UINT64_MAX;
1416 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1417 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >
1418 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1419 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1420 uint64_t creation_txg = dbn->dbn_phys.zbm_creation_txg;
1421 ASSERT3U(creation_txg, <=, last_key_added);
1422 /*
1423 * Note, there may be multiple bookmarks at this TXG,
1424 * and we only want to add the key for this TXG once.
1425 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1426 * these bookmarks in sequence.
1427 */
1428 if ((dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) &&
1429 creation_txg != last_key_added) {
1430 dsl_deadlist_add_key(&ds->ds_deadlist,
1431 creation_txg, tx);
1432 last_key_added = creation_txg;
1433 }
1434 }
1435 }
1436
1437 /*
1438 * The next snapshot of the origin dataset has changed, due to
1439 * promote or clone swap. If there are any bookmarks at this dataset,
1440 * we need to update their zbm_*_freed_before_next_snap to reflect this.
1441 * The head dataset has the relevant bookmarks in ds_bookmarks.
1442 */
1443 void
dsl_bookmark_next_changed(dsl_dataset_t * head,dsl_dataset_t * origin,dmu_tx_t * tx)1444 dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
1445 dmu_tx_t *tx)
1446 {
1447 dsl_pool_t *dp = dmu_tx_pool(tx);
1448
1449 /*
1450 * Find the first bookmark that HAS_FBN at the origin snapshot.
1451 */
1452 dsl_bookmark_node_t search = { 0 };
1453 avl_index_t idx;
1454 search.dbn_phys.zbm_creation_txg =
1455 dsl_dataset_phys(origin)->ds_creation_txg;
1456 search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1457 /*
1458 * The empty-string name can't be in the AVL, and it compares
1459 * before any entries with this TXG.
1460 */
1461 search.dbn_name = (char *)"";
1462 VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1463 dsl_bookmark_node_t *dbn =
1464 avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1465
1466 /*
1467 * Iterate over all bookmarks that are at the origin txg.
1468 * Adjust their FBN based on their new next snapshot.
1469 */
1470 for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1471 dsl_dataset_phys(origin)->ds_creation_txg &&
1472 (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1473 dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1474
1475 /*
1476 * Bookmark is at the origin, therefore its
1477 * "next dataset" is changing, so we need
1478 * to reset its FBN by recomputing it in
1479 * dsl_bookmark_set_phys().
1480 */
1481 ASSERT3U(dbn->dbn_phys.zbm_guid, ==,
1482 dsl_dataset_phys(origin)->ds_guid);
1483 ASSERT3U(dbn->dbn_phys.zbm_referenced_bytes_refd, ==,
1484 dsl_dataset_phys(origin)->ds_referenced_bytes);
1485 ASSERT(dbn->dbn_phys.zbm_flags &
1486 ZBM_FLAG_SNAPSHOT_EXISTS);
1487 /*
1488 * Save and restore the zbm_redaction_obj, which
1489 * is zeroed by dsl_bookmark_set_phys().
1490 */
1491 uint64_t redaction_obj =
1492 dbn->dbn_phys.zbm_redaction_obj;
1493 dsl_bookmark_set_phys(&dbn->dbn_phys, origin);
1494 dbn->dbn_phys.zbm_redaction_obj = redaction_obj;
1495
1496 VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1497 dbn->dbn_name, sizeof (uint64_t),
1498 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1499 &dbn->dbn_phys, tx));
1500 }
1501 }
1502
1503 /*
1504 * This block is no longer referenced by this (head) dataset.
1505 *
1506 * Adjust the FBN of any bookmarks that reference this block, whose "next"
1507 * is the head dataset.
1508 */
1509 void
dsl_bookmark_block_killed(dsl_dataset_t * ds,const blkptr_t * bp,dmu_tx_t * tx)1510 dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
1511 {
1512 (void) tx;
1513
1514 /*
1515 * Iterate over bookmarks whose "next" is the head dataset.
1516 */
1517 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1518 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1519 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1520 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1521 /*
1522 * If the block was live (referenced) at the time of this
1523 * bookmark, add its space to the bookmark's FBN.
1524 */
1525 if (BP_GET_LOGICAL_BIRTH(bp) <=
1526 dbn->dbn_phys.zbm_creation_txg &&
1527 (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1528 mutex_enter(&dbn->dbn_lock);
1529 dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1530 bp_get_dsize_sync(dsl_dataset_get_spa(ds), bp);
1531 dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1532 BP_GET_PSIZE(bp);
1533 dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1534 BP_GET_UCSIZE(bp);
1535 /*
1536 * Changing the ZAP object here would be too
1537 * expensive. Also, we may be called from the zio
1538 * interrupt thread, which can't block on i/o.
1539 * Therefore, we mark this bookmark as dirty and
1540 * modify the ZAP once per txg, in
1541 * dsl_bookmark_sync_done().
1542 */
1543 dbn->dbn_dirty = B_TRUE;
1544 mutex_exit(&dbn->dbn_lock);
1545 }
1546 }
1547 }
1548
1549 void
dsl_bookmark_sync_done(dsl_dataset_t * ds,dmu_tx_t * tx)1550 dsl_bookmark_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
1551 {
1552 dsl_pool_t *dp = dmu_tx_pool(tx);
1553
1554 if (dsl_dataset_is_snapshot(ds))
1555 return;
1556
1557 /*
1558 * We only dirty bookmarks that are at or after the most recent
1559 * snapshot. We can't create snapshots between
1560 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1561 * don't need to look at any bookmarks before ds_prev_snap_txg.
1562 */
1563 for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1564 dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1565 dsl_dataset_phys(ds)->ds_prev_snap_txg;
1566 dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1567 if (dbn->dbn_dirty) {
1568 /*
1569 * We only dirty nodes with HAS_FBN, therefore
1570 * we can always use the current bookmark struct size.
1571 */
1572 ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1573 VERIFY0(zap_update(dp->dp_meta_objset,
1574 ds->ds_bookmarks_obj,
1575 dbn->dbn_name, sizeof (uint64_t),
1576 sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1577 &dbn->dbn_phys, tx));
1578 dbn->dbn_dirty = B_FALSE;
1579 }
1580 }
1581 #ifdef ZFS_DEBUG
1582 for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
1583 dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
1584 ASSERT(!dbn->dbn_dirty);
1585 }
1586 #endif
1587 }
1588
1589 /*
1590 * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1591 */
1592 uint64_t
dsl_bookmark_latest_txg(dsl_dataset_t * ds)1593 dsl_bookmark_latest_txg(dsl_dataset_t *ds)
1594 {
1595 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1596 dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1597 if (dbn == NULL)
1598 return (0);
1599 return (dbn->dbn_phys.zbm_creation_txg);
1600 }
1601
1602 /*
1603 * Compare the redact_block_phys_t to the bookmark. If the last block in the
1604 * redact_block_phys_t is before the bookmark, return -1. If the first block in
1605 * the redact_block_phys_t is after the bookmark, return 1. Otherwise, the
1606 * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1607 */
1608 static int
redact_block_zb_compare(redact_block_phys_t * first,zbookmark_phys_t * second)1609 redact_block_zb_compare(redact_block_phys_t *first,
1610 zbookmark_phys_t *second)
1611 {
1612 /*
1613 * If the block_phys is for a previous object, or the last block in the
1614 * block_phys is strictly before the block in the bookmark, the
1615 * block_phys is earlier.
1616 */
1617 if (first->rbp_object < second->zb_object ||
1618 (first->rbp_object == second->zb_object &&
1619 first->rbp_blkid + (redact_block_get_count(first) - 1) <
1620 second->zb_blkid)) {
1621 return (-1);
1622 }
1623
1624 /*
1625 * If the bookmark is for a previous object, or the block in the
1626 * bookmark is strictly before the first block in the block_phys, the
1627 * bookmark is earlier.
1628 */
1629 if (first->rbp_object > second->zb_object ||
1630 (first->rbp_object == second->zb_object &&
1631 first->rbp_blkid > second->zb_blkid)) {
1632 return (1);
1633 }
1634
1635 return (0);
1636 }
1637
1638 /*
1639 * Traverse the redaction list in the provided object, and call the callback for
1640 * each entry we find. Don't call the callback for any records before resume.
1641 */
1642 int
dsl_redaction_list_traverse(redaction_list_t * rl,zbookmark_phys_t * resume,rl_traverse_callback_t cb,void * arg)1643 dsl_redaction_list_traverse(redaction_list_t *rl, zbookmark_phys_t *resume,
1644 rl_traverse_callback_t cb, void *arg)
1645 {
1646 objset_t *mos = rl->rl_mos;
1647 int err = 0;
1648
1649 if (rl->rl_phys->rlp_last_object != UINT64_MAX ||
1650 rl->rl_phys->rlp_last_blkid != UINT64_MAX) {
1651 /*
1652 * When we finish a send, we update the last object and offset
1653 * to UINT64_MAX. If a send fails partway through, the last
1654 * object and offset will have some other value, indicating how
1655 * far the send got. The redaction list must be complete before
1656 * it can be traversed, so return EINVAL if the last object and
1657 * blkid are not set to UINT64_MAX.
1658 */
1659 return (SET_ERROR(EINVAL));
1660 }
1661
1662 /*
1663 * This allows us to skip the binary search and resume checking logic
1664 * below, if we're not resuming a redacted send.
1665 */
1666 if (ZB_IS_ZERO(resume))
1667 resume = NULL;
1668
1669 /*
1670 * Binary search for the point to resume from.
1671 */
1672 uint64_t maxidx = rl->rl_phys->rlp_num_entries - 1;
1673 uint64_t minidx = 0;
1674 while (resume != NULL && maxidx > minidx) {
1675 redact_block_phys_t rbp = { 0 };
1676 ASSERT3U(maxidx, >, minidx);
1677 uint64_t mididx = minidx + ((maxidx - minidx) / 2);
1678 err = dmu_read(mos, rl->rl_object, mididx * sizeof (rbp),
1679 sizeof (rbp), &rbp, DMU_READ_NO_PREFETCH);
1680 if (err != 0)
1681 break;
1682
1683 int cmp = redact_block_zb_compare(&rbp, resume);
1684
1685 if (cmp == 0) {
1686 minidx = mididx;
1687 break;
1688 } else if (cmp > 0) {
1689 maxidx =
1690 (mididx == minidx ? minidx : mididx - 1);
1691 } else {
1692 minidx = mididx + 1;
1693 }
1694 }
1695
1696 unsigned int bufsize = SPA_OLD_MAXBLOCKSIZE;
1697 redact_block_phys_t *buf = zio_data_buf_alloc(bufsize);
1698
1699 unsigned int entries_per_buf = bufsize / sizeof (redact_block_phys_t);
1700 uint64_t start_block = minidx / entries_per_buf;
1701 err = dmu_read(mos, rl->rl_object, start_block * bufsize, bufsize, buf,
1702 DMU_READ_PREFETCH);
1703
1704 for (uint64_t curidx = minidx;
1705 err == 0 && curidx < rl->rl_phys->rlp_num_entries;
1706 curidx++) {
1707 /*
1708 * We read in the redaction list one block at a time. Once we
1709 * finish with all the entries in a given block, we read in a
1710 * new one. The predictive prefetcher will take care of any
1711 * prefetching, and this code shouldn't be the bottleneck, so we
1712 * don't need to do manual prefetching.
1713 */
1714 if (curidx % entries_per_buf == 0) {
1715 err = dmu_read(mos, rl->rl_object, curidx *
1716 sizeof (*buf), bufsize, buf,
1717 DMU_READ_PREFETCH);
1718 if (err != 0)
1719 break;
1720 }
1721 redact_block_phys_t *rb = &buf[curidx % entries_per_buf];
1722 /*
1723 * If resume is non-null, we should either not send the data, or
1724 * null out resume so we don't have to keep doing these
1725 * comparisons.
1726 */
1727 if (resume != NULL) {
1728 /*
1729 * It is possible that after the binary search we got
1730 * a record before the resume point. There's two cases
1731 * where this can occur. If the record is the last
1732 * redaction record, and the resume point is after the
1733 * end of the redacted data, curidx will be the last
1734 * redaction record. In that case, the loop will end
1735 * after this iteration. The second case is if the
1736 * resume point is between two redaction records, the
1737 * binary search can return either the record before
1738 * or after the resume point. In that case, the next
1739 * iteration will be greater than the resume point.
1740 */
1741 if (redact_block_zb_compare(rb, resume) < 0) {
1742 ASSERT3U(curidx, ==, minidx);
1743 continue;
1744 } else {
1745 /*
1746 * If the place to resume is in the middle of
1747 * the range described by this
1748 * redact_block_phys, then modify the
1749 * redact_block_phys in memory so we generate
1750 * the right records.
1751 */
1752 if (resume->zb_object == rb->rbp_object &&
1753 resume->zb_blkid > rb->rbp_blkid) {
1754 uint64_t diff = resume->zb_blkid -
1755 rb->rbp_blkid;
1756 rb->rbp_blkid = resume->zb_blkid;
1757 redact_block_set_count(rb,
1758 redact_block_get_count(rb) - diff);
1759 }
1760 resume = NULL;
1761 }
1762 }
1763
1764 if (cb(rb, arg) != 0) {
1765 err = EINTR;
1766 break;
1767 }
1768 }
1769
1770 zio_data_buf_free(buf, bufsize);
1771 return (err);
1772 }
1773