1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2017, 2018 by Delphix. All rights reserved.
23 */
24
25 #include <sys/zfs_context.h>
26 #include <sys/txg.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dmu_redact.h>
30 #include <sys/bqueue.h>
31 #include <sys/objlist.h>
32 #include <sys/dmu_tx.h>
33 #ifdef _KERNEL
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zap.h>
36 #include <sys/zfs_znode.h>
37 #endif
38
39 /*
40 * This controls the number of entries in the buffer the redaction_list_update
41 * synctask uses to buffer writes to the redaction list.
42 */
43 static const int redact_sync_bufsize = 1024;
44
45 /*
46 * Controls how often to update the redaction list when creating a redaction
47 * list.
48 */
49 static const uint64_t redaction_list_update_interval_ns =
50 1000 * 1000 * 1000ULL; /* 1s */
51
52 /*
53 * This tunable controls the length of the queues that zfs redact worker threads
54 * use to communicate. If the dmu_redact_snap thread is blocking on these
55 * queues, this variable may need to be increased. If there is a significant
56 * slowdown at the start of a redact operation as these threads consume all the
57 * available IO resources, or the queues are consuming too much memory, this
58 * variable may need to be decreased.
59 */
60 static const int zfs_redact_queue_length = 1024 * 1024;
61
62 /*
63 * These tunables control the fill fraction of the queues by zfs redact. The
64 * fill fraction controls the frequency with which threads have to be
65 * cv_signaled. If a lot of cpu time is being spent on cv_signal, then these
66 * should be tuned down. If the queues empty before the signalled thread can
67 * catch up, then these should be tuned up.
68 */
69 static const uint64_t zfs_redact_queue_ff = 20;
70
71 struct redact_record {
72 bqueue_node_t ln;
73 boolean_t eos_marker; /* Marks the end of the stream */
74 uint64_t start_object;
75 uint64_t start_blkid;
76 uint64_t end_object;
77 uint64_t end_blkid;
78 uint8_t indblkshift;
79 uint32_t datablksz;
80 };
81
82 struct redact_thread_arg {
83 bqueue_t q;
84 objset_t *os; /* Objset to traverse */
85 dsl_dataset_t *ds; /* Dataset to traverse */
86 struct redact_record *current_record;
87 int error_code;
88 boolean_t cancel;
89 zbookmark_phys_t resume;
90 objlist_t *deleted_objs;
91 uint64_t *num_blocks_visited;
92 uint64_t ignore_object; /* ignore further callbacks on this */
93 uint64_t txg; /* txg to traverse since */
94 };
95
96 /*
97 * The redaction node is a wrapper around the redaction record that is used
98 * by the redaction merging thread to sort the records and determine overlaps.
99 *
100 * It contains two nodes; one sorts the records by their start_zb, and the other
101 * sorts the records by their end_zb.
102 */
103 struct redact_node {
104 avl_node_t avl_node_start;
105 avl_node_t avl_node_end;
106 struct redact_record *record;
107 struct redact_thread_arg *rt_arg;
108 uint32_t thread_num;
109 };
110
111 struct merge_data {
112 list_t md_redact_block_pending;
113 redact_block_phys_t md_coalesce_block;
114 uint64_t md_last_time;
115 redact_block_phys_t md_furthest[TXG_SIZE];
116 /* Lists of struct redact_block_list_node. */
117 list_t md_blocks[TXG_SIZE];
118 boolean_t md_synctask_txg[TXG_SIZE];
119 uint64_t md_latest_synctask_txg;
120 redaction_list_t *md_redaction_list;
121 };
122
123 /*
124 * A wrapper around struct redact_block so it can be stored in a list_t.
125 */
126 struct redact_block_list_node {
127 redact_block_phys_t block;
128 list_node_t node;
129 };
130
131 /*
132 * We've found a new redaction candidate. In order to improve performance, we
133 * coalesce these blocks when they're adjacent to each other. This function
134 * handles that. If the new candidate block range is immediately after the
135 * range we're building, coalesce it into the range we're building. Otherwise,
136 * put the record we're building on the queue, and update the build pointer to
137 * point to the new record.
138 */
139 static void
record_merge_enqueue(bqueue_t * q,struct redact_record ** build,struct redact_record * new)140 record_merge_enqueue(bqueue_t *q, struct redact_record **build,
141 struct redact_record *new)
142 {
143 if (new->eos_marker) {
144 if (*build != NULL)
145 bqueue_enqueue(q, *build, sizeof (**build));
146 bqueue_enqueue_flush(q, new, sizeof (*new));
147 return;
148 }
149 if (*build == NULL) {
150 *build = new;
151 return;
152 }
153 struct redact_record *curbuild = *build;
154 if ((curbuild->end_object == new->start_object &&
155 curbuild->end_blkid + 1 == new->start_blkid &&
156 curbuild->end_blkid != UINT64_MAX) ||
157 (curbuild->end_object + 1 == new->start_object &&
158 curbuild->end_blkid == UINT64_MAX && new->start_blkid == 0)) {
159 curbuild->end_object = new->end_object;
160 curbuild->end_blkid = new->end_blkid;
161 kmem_free(new, sizeof (*new));
162 } else {
163 bqueue_enqueue(q, curbuild, sizeof (*curbuild));
164 *build = new;
165 }
166 }
167 #ifdef _KERNEL
168 struct objnode {
169 avl_node_t node;
170 uint64_t obj;
171 };
172
173 static int
objnode_compare(const void * o1,const void * o2)174 objnode_compare(const void *o1, const void *o2)
175 {
176 const struct objnode *obj1 = o1;
177 const struct objnode *obj2 = o2;
178 if (obj1->obj < obj2->obj)
179 return (-1);
180 if (obj1->obj > obj2->obj)
181 return (1);
182 return (0);
183 }
184
185
186 static objlist_t *
zfs_get_deleteq(objset_t * os)187 zfs_get_deleteq(objset_t *os)
188 {
189 objlist_t *deleteq_objlist = objlist_create();
190 uint64_t deleteq_obj;
191 zap_cursor_t zc;
192 zap_attribute_t *za;
193 dmu_object_info_t doi;
194
195 ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
196 VERIFY0(dmu_object_info(os, MASTER_NODE_OBJ, &doi));
197 ASSERT3U(doi.doi_type, ==, DMU_OT_MASTER_NODE);
198
199 VERIFY0(zap_lookup(os, MASTER_NODE_OBJ,
200 ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
201
202 /*
203 * In order to insert objects into the objlist, they must be in sorted
204 * order. We don't know what order we'll get them out of the ZAP in, so
205 * we insert them into and remove them from an avl_tree_t to sort them.
206 */
207 avl_tree_t at;
208 avl_create(&at, objnode_compare, sizeof (struct objnode),
209 offsetof(struct objnode, node));
210
211 za = zap_attribute_alloc();
212 for (zap_cursor_init(&zc, os, deleteq_obj);
213 zap_cursor_retrieve(&zc, za) == 0; zap_cursor_advance(&zc)) {
214 struct objnode *obj = kmem_zalloc(sizeof (*obj), KM_SLEEP);
215 obj->obj = za->za_first_integer;
216 avl_add(&at, obj);
217 }
218 zap_cursor_fini(&zc);
219 zap_attribute_free(za);
220
221 struct objnode *next, *found = avl_first(&at);
222 while (found != NULL) {
223 next = AVL_NEXT(&at, found);
224 objlist_insert(deleteq_objlist, found->obj);
225 found = next;
226 }
227
228 void *cookie = NULL;
229 while ((found = avl_destroy_nodes(&at, &cookie)) != NULL)
230 kmem_free(found, sizeof (*found));
231 avl_destroy(&at);
232 return (deleteq_objlist);
233 }
234 #endif
235
236 /*
237 * This is the callback function to traverse_dataset for the redaction threads
238 * for dmu_redact_snap. This thread is responsible for creating redaction
239 * records for all the data that is modified by the snapshots we're redacting
240 * with respect to. Redaction records represent ranges of data that have been
241 * modified by one of the redaction snapshots, and are stored in the
242 * redact_record struct. We need to create redaction records for three
243 * cases:
244 *
245 * First, if there's a normal write, we need to create a redaction record for
246 * that block.
247 *
248 * Second, if there's a hole, we need to create a redaction record that covers
249 * the whole range of the hole. If the hole is in the meta-dnode, it must cover
250 * every block in all of the objects in the hole.
251 *
252 * Third, if there is a deleted object, we need to create a redaction record for
253 * all of the blocks in that object.
254 */
255 static int
redact_cb(spa_t * spa,zilog_t * zilog,const blkptr_t * bp,const zbookmark_phys_t * zb,const struct dnode_phys * dnp,void * arg)256 redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
257 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
258 {
259 (void) spa, (void) zilog;
260 struct redact_thread_arg *rta = arg;
261 struct redact_record *record;
262
263 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
264 zb->zb_object >= rta->resume.zb_object);
265
266 if (rta->cancel)
267 return (SET_ERROR(EINTR));
268
269 if (rta->ignore_object == zb->zb_object)
270 return (0);
271
272 /*
273 * If we're visiting a dnode, we need to handle the case where the
274 * object has been deleted.
275 */
276 if (zb->zb_level == ZB_DNODE_LEVEL) {
277 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
278
279 if (zb->zb_object == 0)
280 return (0);
281
282 /*
283 * If the object has been deleted, redact all of the blocks in
284 * it.
285 */
286 if (dnp->dn_type == DMU_OT_NONE ||
287 objlist_exists(rta->deleted_objs, zb->zb_object)) {
288 rta->ignore_object = zb->zb_object;
289 record = kmem_zalloc(sizeof (struct redact_record),
290 KM_SLEEP);
291
292 record->eos_marker = B_FALSE;
293 record->start_object = record->end_object =
294 zb->zb_object;
295 record->start_blkid = 0;
296 record->end_blkid = UINT64_MAX;
297 record_merge_enqueue(&rta->q,
298 &rta->current_record, record);
299 }
300 return (0);
301 } else if (zb->zb_level < 0) {
302 return (0);
303 } else if (zb->zb_level > 0 && !BP_IS_HOLE(bp)) {
304 /*
305 * If this is an indirect block, but not a hole, it doesn't
306 * provide any useful information for redaction, so ignore it.
307 */
308 return (0);
309 }
310
311 /*
312 * At this point, there are two options left for the type of block we're
313 * looking at. Either this is a hole (which could be in the dnode or
314 * the meta-dnode), or it's a level 0 block of some sort. If it's a
315 * hole, we create a redaction record that covers the whole range. If
316 * the hole is in a dnode, we need to redact all the blocks in that
317 * hole. If the hole is in the meta-dnode, we instead need to redact
318 * all blocks in every object covered by that hole. If it's a level 0
319 * block, we only need to redact that single block.
320 */
321 record = kmem_zalloc(sizeof (struct redact_record), KM_SLEEP);
322 record->eos_marker = B_FALSE;
323
324 record->start_object = record->end_object = zb->zb_object;
325 if (BP_IS_HOLE(bp)) {
326 record->start_blkid = zb->zb_blkid *
327 bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
328
329 record->end_blkid = ((zb->zb_blkid + 1) *
330 bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level)) - 1;
331
332 if (zb->zb_object == DMU_META_DNODE_OBJECT) {
333 record->start_object = record->start_blkid *
334 ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
335 sizeof (dnode_phys_t));
336 record->start_blkid = 0;
337 record->end_object = ((record->end_blkid +
338 1) * ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
339 sizeof (dnode_phys_t))) - 1;
340 record->end_blkid = UINT64_MAX;
341 }
342 } else if (zb->zb_level != 0 ||
343 zb->zb_object == DMU_META_DNODE_OBJECT) {
344 kmem_free(record, sizeof (*record));
345 return (0);
346 } else {
347 record->start_blkid = record->end_blkid = zb->zb_blkid;
348 }
349 record->indblkshift = dnp->dn_indblkshift;
350 record->datablksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
351 record_merge_enqueue(&rta->q, &rta->current_record, record);
352
353 return (0);
354 }
355
356 static __attribute__((noreturn)) void
redact_traverse_thread(void * arg)357 redact_traverse_thread(void *arg)
358 {
359 struct redact_thread_arg *rt_arg = arg;
360 int err;
361 struct redact_record *data;
362 #ifdef _KERNEL
363 if (rt_arg->os->os_phys->os_type == DMU_OST_ZFS)
364 rt_arg->deleted_objs = zfs_get_deleteq(rt_arg->os);
365 else
366 rt_arg->deleted_objs = objlist_create();
367 #else
368 rt_arg->deleted_objs = objlist_create();
369 #endif
370
371 err = traverse_dataset_resume(rt_arg->ds, rt_arg->txg,
372 &rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
373 redact_cb, rt_arg);
374
375 if (err != EINTR)
376 rt_arg->error_code = err;
377 objlist_destroy(rt_arg->deleted_objs);
378 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
379 data->eos_marker = B_TRUE;
380 record_merge_enqueue(&rt_arg->q, &rt_arg->current_record, data);
381 thread_exit();
382 }
383
384 static inline void
create_zbookmark_from_obj_off(zbookmark_phys_t * zb,uint64_t object,uint64_t blkid)385 create_zbookmark_from_obj_off(zbookmark_phys_t *zb, uint64_t object,
386 uint64_t blkid)
387 {
388 zb->zb_object = object;
389 zb->zb_level = 0;
390 zb->zb_blkid = blkid;
391 }
392
393 /*
394 * This is a utility function that can do the comparison for the start or ends
395 * of the ranges in a redact_record.
396 */
397 static int
redact_range_compare(uint64_t obj1,uint64_t off1,uint32_t dbss1,uint64_t obj2,uint64_t off2,uint32_t dbss2)398 redact_range_compare(uint64_t obj1, uint64_t off1, uint32_t dbss1,
399 uint64_t obj2, uint64_t off2, uint32_t dbss2)
400 {
401 zbookmark_phys_t z1, z2;
402 create_zbookmark_from_obj_off(&z1, obj1, off1);
403 create_zbookmark_from_obj_off(&z2, obj2, off2);
404
405 return (zbookmark_compare(dbss1 >> SPA_MINBLOCKSHIFT, 0,
406 dbss2 >> SPA_MINBLOCKSHIFT, 0, &z1, &z2));
407 }
408
409 /*
410 * Compare two redaction records by their range's start location. Also makes
411 * eos records always compare last. We use the thread number in the redact_node
412 * to ensure that records do not compare equal (which is not allowed in our avl
413 * trees).
414 */
415 static int
redact_node_compare_start(const void * arg1,const void * arg2)416 redact_node_compare_start(const void *arg1, const void *arg2)
417 {
418 const struct redact_node *rn1 = arg1;
419 const struct redact_node *rn2 = arg2;
420 const struct redact_record *rr1 = rn1->record;
421 const struct redact_record *rr2 = rn2->record;
422 if (rr1->eos_marker)
423 return (1);
424 if (rr2->eos_marker)
425 return (-1);
426
427 int cmp = redact_range_compare(rr1->start_object, rr1->start_blkid,
428 rr1->datablksz, rr2->start_object, rr2->start_blkid,
429 rr2->datablksz);
430 if (cmp == 0)
431 cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
432 return (cmp);
433 }
434
435 /*
436 * Compare two redaction records by their range's end location. Also makes
437 * eos records always compare last. We use the thread number in the redact_node
438 * to ensure that records do not compare equal (which is not allowed in our avl
439 * trees).
440 */
441 static int
redact_node_compare_end(const void * arg1,const void * arg2)442 redact_node_compare_end(const void *arg1, const void *arg2)
443 {
444 const struct redact_node *rn1 = arg1;
445 const struct redact_node *rn2 = arg2;
446 const struct redact_record *srr1 = rn1->record;
447 const struct redact_record *srr2 = rn2->record;
448 if (srr1->eos_marker)
449 return (1);
450 if (srr2->eos_marker)
451 return (-1);
452
453 int cmp = redact_range_compare(srr1->end_object, srr1->end_blkid,
454 srr1->datablksz, srr2->end_object, srr2->end_blkid,
455 srr2->datablksz);
456 if (cmp == 0)
457 cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
458 return (cmp);
459 }
460
461 /*
462 * Utility function that compares two redaction records to determine if any part
463 * of the "from" record is before any part of the "to" record. Also causes End
464 * of Stream redaction records to compare after all others, so that the
465 * redaction merging logic can stay simple.
466 */
467 static boolean_t
redact_record_before(const struct redact_record * from,const struct redact_record * to)468 redact_record_before(const struct redact_record *from,
469 const struct redact_record *to)
470 {
471 if (from->eos_marker == B_TRUE)
472 return (B_FALSE);
473 else if (to->eos_marker == B_TRUE)
474 return (B_TRUE);
475 return (redact_range_compare(from->start_object, from->start_blkid,
476 from->datablksz, to->end_object, to->end_blkid,
477 to->datablksz) <= 0);
478 }
479
480 /*
481 * Pop a new redaction record off the queue, check that the records are in the
482 * right order, and free the old data.
483 */
484 static struct redact_record *
get_next_redact_record(bqueue_t * bq,struct redact_record * prev)485 get_next_redact_record(bqueue_t *bq, struct redact_record *prev)
486 {
487 struct redact_record *next = bqueue_dequeue(bq);
488 ASSERT(redact_record_before(prev, next));
489 kmem_free(prev, sizeof (*prev));
490 return (next);
491 }
492
493 /*
494 * Remove the given redaction node from both trees, pull a new redaction record
495 * off the queue, free the old redaction record, update the redaction node, and
496 * reinsert the node into the trees.
497 */
498 static int
update_avl_trees(avl_tree_t * start_tree,avl_tree_t * end_tree,struct redact_node * redact_node)499 update_avl_trees(avl_tree_t *start_tree, avl_tree_t *end_tree,
500 struct redact_node *redact_node)
501 {
502 avl_remove(start_tree, redact_node);
503 avl_remove(end_tree, redact_node);
504 redact_node->record = get_next_redact_record(&redact_node->rt_arg->q,
505 redact_node->record);
506 avl_add(end_tree, redact_node);
507 avl_add(start_tree, redact_node);
508 return (redact_node->rt_arg->error_code);
509 }
510
511 /*
512 * Synctask for updating redaction lists. We first take this txg's list of
513 * redacted blocks and append those to the redaction list. We then update the
514 * redaction list's bonus buffer. We store the furthest blocks we visited and
515 * the list of snapshots that we're redacting with respect to. We need these so
516 * that redacted sends and receives can be correctly resumed.
517 */
518 static void
redaction_list_update_sync(void * arg,dmu_tx_t * tx)519 redaction_list_update_sync(void *arg, dmu_tx_t *tx)
520 {
521 struct merge_data *md = arg;
522 uint64_t txg = dmu_tx_get_txg(tx);
523 list_t *list = &md->md_blocks[txg & TXG_MASK];
524 redact_block_phys_t *furthest_visited =
525 &md->md_furthest[txg & TXG_MASK];
526 objset_t *mos = tx->tx_pool->dp_meta_objset;
527 redaction_list_t *rl = md->md_redaction_list;
528 int bufsize = redact_sync_bufsize;
529 redact_block_phys_t *buf = kmem_alloc(bufsize * sizeof (*buf),
530 KM_SLEEP);
531 int index = 0;
532
533 dmu_buf_will_dirty(rl->rl_dbuf, tx);
534
535 for (struct redact_block_list_node *rbln = list_remove_head(list);
536 rbln != NULL; rbln = list_remove_head(list)) {
537 ASSERT3U(rbln->block.rbp_object, <=,
538 furthest_visited->rbp_object);
539 ASSERT(rbln->block.rbp_object < furthest_visited->rbp_object ||
540 rbln->block.rbp_blkid <= furthest_visited->rbp_blkid);
541 buf[index] = rbln->block;
542 index++;
543 if (index == bufsize) {
544 dmu_write(mos, rl->rl_object,
545 rl->rl_phys->rlp_num_entries * sizeof (*buf),
546 bufsize * sizeof (*buf), buf, tx);
547 rl->rl_phys->rlp_num_entries += bufsize;
548 index = 0;
549 }
550 kmem_free(rbln, sizeof (*rbln));
551 }
552 if (index > 0) {
553 dmu_write(mos, rl->rl_object, rl->rl_phys->rlp_num_entries *
554 sizeof (*buf), index * sizeof (*buf), buf, tx);
555 rl->rl_phys->rlp_num_entries += index;
556 }
557 kmem_free(buf, bufsize * sizeof (*buf));
558
559 md->md_synctask_txg[txg & TXG_MASK] = B_FALSE;
560 rl->rl_phys->rlp_last_object = furthest_visited->rbp_object;
561 rl->rl_phys->rlp_last_blkid = furthest_visited->rbp_blkid;
562 }
563
564 static void
commit_rl_updates(objset_t * os,struct merge_data * md,uint64_t object,uint64_t blkid)565 commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
566 uint64_t blkid)
567 {
568 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
569 dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
570 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
571 uint64_t txg = dmu_tx_get_txg(tx);
572 if (!md->md_synctask_txg[txg & TXG_MASK]) {
573 dsl_sync_task_nowait(dmu_tx_pool(tx),
574 redaction_list_update_sync, md, tx);
575 md->md_synctask_txg[txg & TXG_MASK] = B_TRUE;
576 md->md_latest_synctask_txg = txg;
577 }
578 md->md_furthest[txg & TXG_MASK].rbp_object = object;
579 md->md_furthest[txg & TXG_MASK].rbp_blkid = blkid;
580 list_move_tail(&md->md_blocks[txg & TXG_MASK],
581 &md->md_redact_block_pending);
582 dmu_tx_commit(tx);
583 md->md_last_time = gethrtime();
584 }
585
586 /*
587 * We want to store the list of blocks that we're redacting in the bookmark's
588 * redaction list. However, this list is stored in the MOS, which means it can
589 * only be written to in syncing context. To get around this, we create a
590 * synctask that will write to the mos for us. We tell it what to write by
591 * a linked list for each current transaction group; every time we decide to
592 * redact a block, we append it to the transaction group that is currently in
593 * open context. We also update some progress information that the synctask
594 * will store to enable resumable redacted sends.
595 */
596 static void
update_redaction_list(struct merge_data * md,objset_t * os,uint64_t object,uint64_t blkid,uint64_t endblkid,uint32_t blksz)597 update_redaction_list(struct merge_data *md, objset_t *os,
598 uint64_t object, uint64_t blkid, uint64_t endblkid, uint32_t blksz)
599 {
600 boolean_t enqueue = B_FALSE;
601 redact_block_phys_t cur = {0};
602 uint64_t count = endblkid - blkid + 1;
603 while (count > REDACT_BLOCK_MAX_COUNT) {
604 update_redaction_list(md, os, object, blkid,
605 blkid + REDACT_BLOCK_MAX_COUNT - 1, blksz);
606 blkid += REDACT_BLOCK_MAX_COUNT;
607 count -= REDACT_BLOCK_MAX_COUNT;
608 }
609 redact_block_phys_t *coalesce = &md->md_coalesce_block;
610 boolean_t new;
611 if (coalesce->rbp_size_count == 0) {
612 new = B_TRUE;
613 enqueue = B_FALSE;
614 } else {
615 uint64_t old_count = redact_block_get_count(coalesce);
616 if (coalesce->rbp_object == object &&
617 coalesce->rbp_blkid + old_count == blkid &&
618 old_count + count <= REDACT_BLOCK_MAX_COUNT) {
619 ASSERT3U(redact_block_get_size(coalesce), ==, blksz);
620 redact_block_set_count(coalesce, old_count + count);
621 new = B_FALSE;
622 enqueue = B_FALSE;
623 } else {
624 new = B_TRUE;
625 enqueue = B_TRUE;
626 }
627 }
628
629 if (new) {
630 cur = *coalesce;
631 coalesce->rbp_blkid = blkid;
632 coalesce->rbp_object = object;
633
634 redact_block_set_count(coalesce, count);
635 redact_block_set_size(coalesce, blksz);
636 }
637
638 if (enqueue && redact_block_get_size(&cur) != 0) {
639 struct redact_block_list_node *rbln =
640 kmem_alloc(sizeof (struct redact_block_list_node),
641 KM_SLEEP);
642 rbln->block = cur;
643 list_insert_tail(&md->md_redact_block_pending, rbln);
644 }
645
646 if (gethrtime() > md->md_last_time +
647 redaction_list_update_interval_ns) {
648 commit_rl_updates(os, md, object, blkid);
649 }
650 }
651
652 /*
653 * This thread merges all the redaction records provided by the worker threads,
654 * and determines which blocks are redacted by all the snapshots. The algorithm
655 * for doing so is similar to performing a merge in mergesort with n sub-lists
656 * instead of 2, with some added complexity due to the fact that the entries are
657 * ranges, not just single blocks. This algorithm relies on the fact that the
658 * queues are sorted, which is ensured by the fact that traverse_dataset
659 * traverses the dataset in a consistent order. We pull one entry off the front
660 * of the queues of each secure dataset traversal thread. Then we repeat the
661 * following: each record represents a range of blocks modified by one of the
662 * redaction snapshots, and each block in that range may need to be redacted in
663 * the send stream. Find the record with the latest start of its range, and the
664 * record with the earliest end of its range. If the last start is before the
665 * first end, then we know that the blocks in the range [last_start, first_end]
666 * are covered by all of the ranges at the front of the queues, which means
667 * every thread redacts that whole range. For example, let's say the ranges on
668 * each queue look like this:
669 *
670 * Block Id 1 2 3 4 5 6 7 8 9 10 11
671 * Thread 1 | [====================]
672 * Thread 2 | [========]
673 * Thread 3 | [=================]
674 *
675 * Thread 3 has the last start (5), and the thread 2 has the last end (6). All
676 * three threads modified the range [5,6], so that data should not be sent over
677 * the wire. After we've determined whether or not to redact anything, we take
678 * the record with the first end. We discard that record, and pull a new one
679 * off the front of the queue it came from. In the above example, we would
680 * discard Thread 2's record, and pull a new one. Let's say the next record we
681 * pulled from Thread 2 covered range [10,11]. The new layout would look like
682 * this:
683 *
684 * Block Id 1 2 3 4 5 6 7 8 9 10 11
685 * Thread 1 | [====================]
686 * Thread 2 | [==]
687 * Thread 3 | [=================]
688 *
689 * When we compare the last start (10, from Thread 2) and the first end (9, from
690 * Thread 1), we see that the last start is greater than the first end.
691 * Therefore, we do not redact anything from these records. We'll iterate by
692 * replacing the record from Thread 1.
693 *
694 * We iterate by replacing the record with the lowest end because we know
695 * that the record with the lowest end has helped us as much as it can. All the
696 * ranges before it that we will ever redact have been redacted. In addition,
697 * by replacing the one with the lowest end, we guarantee we catch all ranges
698 * that need to be redacted. For example, if in the case above we had replaced
699 * the record from Thread 1 instead, we might have ended up with the following:
700 *
701 * Block Id 1 2 3 4 5 6 7 8 9 10 11 12
702 * Thread 1 | [==]
703 * Thread 2 | [========]
704 * Thread 3 | [=================]
705 *
706 * If the next record from Thread 2 had been [8,10], for example, we should have
707 * redacted part of that range, but because we updated Thread 1's record, we
708 * missed it.
709 *
710 * We implement this algorithm by using two trees. The first sorts the
711 * redaction records by their start_zb, and the second sorts them by their
712 * end_zb. We use these to find the record with the last start and the record
713 * with the first end. We create a record with that start and end, and send it
714 * on. The overall runtime of this implementation is O(n log m), where n is the
715 * total number of redaction records from all the different redaction snapshots,
716 * and m is the number of redaction snapshots.
717 *
718 * If we redact with respect to zero snapshots, we create a redaction
719 * record with the start object and blkid to 0, and the end object and blkid to
720 * UINT64_MAX. This will result in us redacting every block.
721 */
722 static int
perform_thread_merge(bqueue_t * q,uint32_t num_threads,struct redact_thread_arg * thread_args,boolean_t * cancel)723 perform_thread_merge(bqueue_t *q, uint32_t num_threads,
724 struct redact_thread_arg *thread_args, boolean_t *cancel)
725 {
726 struct redact_node *redact_nodes = NULL;
727 avl_tree_t start_tree, end_tree;
728 struct redact_record *record;
729 struct redact_record *current_record = NULL;
730 int err = 0;
731 struct merge_data md = { {0} };
732 list_create(&md.md_redact_block_pending,
733 sizeof (struct redact_block_list_node),
734 offsetof(struct redact_block_list_node, node));
735
736 /*
737 * If we're redacting with respect to zero snapshots, then no data is
738 * permitted to be sent. We enqueue a record that redacts all blocks,
739 * and an eos marker.
740 */
741 if (num_threads == 0) {
742 record = kmem_zalloc(sizeof (struct redact_record),
743 KM_SLEEP);
744 // We can't redact object 0, so don't try.
745 record->start_object = 1;
746 record->start_blkid = 0;
747 record->end_object = record->end_blkid = UINT64_MAX;
748 bqueue_enqueue(q, record, sizeof (*record));
749 return (0);
750 }
751 redact_nodes = vmem_zalloc(num_threads *
752 sizeof (*redact_nodes), KM_SLEEP);
753
754 avl_create(&start_tree, redact_node_compare_start,
755 sizeof (struct redact_node),
756 offsetof(struct redact_node, avl_node_start));
757 avl_create(&end_tree, redact_node_compare_end,
758 sizeof (struct redact_node),
759 offsetof(struct redact_node, avl_node_end));
760
761 for (int i = 0; i < num_threads; i++) {
762 struct redact_node *node = &redact_nodes[i];
763 struct redact_thread_arg *targ = &thread_args[i];
764 node->record = bqueue_dequeue(&targ->q);
765 node->rt_arg = targ;
766 node->thread_num = i;
767 avl_add(&start_tree, node);
768 avl_add(&end_tree, node);
769 }
770
771 /*
772 * Once the first record in the end tree has returned EOS, every record
773 * must be an EOS record, so we should stop.
774 */
775 while (err == 0 && !((struct redact_node *)avl_first(&end_tree))->
776 record->eos_marker) {
777 if (*cancel) {
778 err = EINTR;
779 break;
780 }
781 struct redact_node *last_start = avl_last(&start_tree);
782 struct redact_node *first_end = avl_first(&end_tree);
783
784 /*
785 * If the last start record is before the first end record,
786 * then we have blocks that are redacted by all threads.
787 * Therefore, we should redact them. Copy the record, and send
788 * it to the main thread.
789 */
790 if (redact_record_before(last_start->record,
791 first_end->record)) {
792 record = kmem_zalloc(sizeof (struct redact_record),
793 KM_SLEEP);
794 *record = *first_end->record;
795 record->start_object = last_start->record->start_object;
796 record->start_blkid = last_start->record->start_blkid;
797 record_merge_enqueue(q, ¤t_record,
798 record);
799 }
800 err = update_avl_trees(&start_tree, &end_tree, first_end);
801 }
802
803 /*
804 * We're done; if we were cancelled, we need to cancel our workers and
805 * clear out their queues. Either way, we need to remove every thread's
806 * redact_node struct from the avl trees.
807 */
808 for (int i = 0; i < num_threads; i++) {
809 if (err != 0) {
810 thread_args[i].cancel = B_TRUE;
811 while (!redact_nodes[i].record->eos_marker) {
812 (void) update_avl_trees(&start_tree, &end_tree,
813 &redact_nodes[i]);
814 }
815 }
816 avl_remove(&start_tree, &redact_nodes[i]);
817 avl_remove(&end_tree, &redact_nodes[i]);
818 kmem_free(redact_nodes[i].record,
819 sizeof (struct redact_record));
820 bqueue_destroy(&thread_args[i].q);
821 }
822
823 avl_destroy(&start_tree);
824 avl_destroy(&end_tree);
825 vmem_free(redact_nodes, num_threads * sizeof (*redact_nodes));
826 if (current_record != NULL)
827 bqueue_enqueue(q, current_record, sizeof (*current_record));
828 return (err);
829 }
830
831 struct redact_merge_thread_arg {
832 bqueue_t q;
833 spa_t *spa;
834 int numsnaps;
835 struct redact_thread_arg *thr_args;
836 boolean_t cancel;
837 int error_code;
838 };
839
840 static __attribute__((noreturn)) void
redact_merge_thread(void * arg)841 redact_merge_thread(void *arg)
842 {
843 struct redact_merge_thread_arg *rmta = arg;
844 rmta->error_code = perform_thread_merge(&rmta->q,
845 rmta->numsnaps, rmta->thr_args, &rmta->cancel);
846 struct redact_record *rec = kmem_zalloc(sizeof (*rec), KM_SLEEP);
847 rec->eos_marker = B_TRUE;
848 bqueue_enqueue_flush(&rmta->q, rec, 1);
849 thread_exit();
850 }
851
852 /*
853 * Find the next object in or after the redaction range passed in, and hold
854 * its dnode with the provided tag. Also update *object to contain the new
855 * object number.
856 */
857 static int
hold_next_object(objset_t * os,struct redact_record * rec,const void * tag,uint64_t * object,dnode_t ** dn)858 hold_next_object(objset_t *os, struct redact_record *rec, const void *tag,
859 uint64_t *object, dnode_t **dn)
860 {
861 int err = 0;
862 if (*dn != NULL)
863 dnode_rele(*dn, tag);
864 *dn = NULL;
865 if (*object < rec->start_object) {
866 *object = rec->start_object - 1;
867 }
868 err = dmu_object_next(os, object, B_FALSE, 0);
869 if (err != 0)
870 return (err);
871
872 err = dnode_hold(os, *object, tag, dn);
873 while (err == 0 && (*object < rec->start_object ||
874 DMU_OT_IS_METADATA((*dn)->dn_type))) {
875 dnode_rele(*dn, tag);
876 *dn = NULL;
877 err = dmu_object_next(os, object, B_FALSE, 0);
878 if (err != 0)
879 break;
880 err = dnode_hold(os, *object, tag, dn);
881 }
882 return (err);
883 }
884
885 static int
perform_redaction(objset_t * os,redaction_list_t * rl,struct redact_merge_thread_arg * rmta)886 perform_redaction(objset_t *os, redaction_list_t *rl,
887 struct redact_merge_thread_arg *rmta)
888 {
889 int err = 0;
890 bqueue_t *q = &rmta->q;
891 struct redact_record *rec = NULL;
892 struct merge_data md = { {0} };
893
894 list_create(&md.md_redact_block_pending,
895 sizeof (struct redact_block_list_node),
896 offsetof(struct redact_block_list_node, node));
897 md.md_redaction_list = rl;
898
899 for (int i = 0; i < TXG_SIZE; i++) {
900 list_create(&md.md_blocks[i],
901 sizeof (struct redact_block_list_node),
902 offsetof(struct redact_block_list_node, node));
903 }
904 dnode_t *dn = NULL;
905 uint64_t prev_obj = 0;
906 for (rec = bqueue_dequeue(q); !rec->eos_marker && err == 0;
907 rec = get_next_redact_record(q, rec)) {
908 ASSERT3U(rec->start_object, !=, 0);
909 uint64_t object;
910 if (prev_obj != rec->start_object) {
911 object = rec->start_object - 1;
912 err = hold_next_object(os, rec, FTAG, &object, &dn);
913 } else {
914 object = prev_obj;
915 }
916 while (err == 0 && object <= rec->end_object) {
917 if (issig()) {
918 err = EINTR;
919 break;
920 }
921 /*
922 * Part of the current object is contained somewhere in
923 * the range covered by rec.
924 */
925 uint64_t startblkid;
926 uint64_t endblkid;
927 uint64_t maxblkid = dn->dn_phys->dn_maxblkid;
928
929 if (rec->start_object < object)
930 startblkid = 0;
931 else if (rec->start_blkid > maxblkid)
932 break;
933 else
934 startblkid = rec->start_blkid;
935
936 if (rec->end_object > object || rec->end_blkid >
937 maxblkid) {
938 endblkid = maxblkid;
939 } else {
940 endblkid = rec->end_blkid;
941 }
942 update_redaction_list(&md, os, object, startblkid,
943 endblkid, dn->dn_datablksz);
944
945 if (object == rec->end_object)
946 break;
947 err = hold_next_object(os, rec, FTAG, &object, &dn);
948 }
949 if (err == ESRCH)
950 err = 0;
951 if (dn != NULL)
952 prev_obj = object;
953 }
954 if (err == 0 && dn != NULL)
955 dnode_rele(dn, FTAG);
956
957 if (err == ESRCH)
958 err = 0;
959 rmta->cancel = B_TRUE;
960 while (!rec->eos_marker)
961 rec = get_next_redact_record(q, rec);
962 kmem_free(rec, sizeof (*rec));
963
964 /*
965 * There may be a block that's being coalesced, sync that out before we
966 * return.
967 */
968 if (err == 0 && md.md_coalesce_block.rbp_size_count != 0) {
969 struct redact_block_list_node *rbln =
970 kmem_alloc(sizeof (struct redact_block_list_node),
971 KM_SLEEP);
972 rbln->block = md.md_coalesce_block;
973 list_insert_tail(&md.md_redact_block_pending, rbln);
974 }
975 commit_rl_updates(os, &md, UINT64_MAX, UINT64_MAX);
976
977 /*
978 * Wait for all the redaction info to sync out before we return, so that
979 * anyone who attempts to resume this redaction will have all the data
980 * they need.
981 */
982 dsl_pool_t *dp = spa_get_dsl(os->os_spa);
983 if (md.md_latest_synctask_txg != 0)
984 txg_wait_synced(dp, md.md_latest_synctask_txg);
985 for (int i = 0; i < TXG_SIZE; i++)
986 list_destroy(&md.md_blocks[i]);
987 return (err);
988 }
989
990 static boolean_t
redact_snaps_contains(uint64_t * snaps,uint64_t num_snaps,uint64_t guid)991 redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
992 {
993 for (int i = 0; i < num_snaps; i++) {
994 if (snaps[i] == guid)
995 return (B_TRUE);
996 }
997 return (B_FALSE);
998 }
999
1000 int
dmu_redact_snap(const char * snapname,nvlist_t * redactnvl,const char * redactbook)1001 dmu_redact_snap(const char *snapname, nvlist_t *redactnvl,
1002 const char *redactbook)
1003 {
1004 int err = 0;
1005 dsl_pool_t *dp = NULL;
1006 dsl_dataset_t *ds = NULL;
1007 int numsnaps = 0;
1008 objset_t *os;
1009 struct redact_thread_arg *args = NULL;
1010 redaction_list_t *new_rl = NULL;
1011 char *newredactbook;
1012
1013 if ((err = dsl_pool_hold(snapname, FTAG, &dp)) != 0)
1014 return (err);
1015
1016 newredactbook = kmem_zalloc(sizeof (char) * ZFS_MAX_DATASET_NAME_LEN,
1017 KM_SLEEP);
1018
1019 if ((err = dsl_dataset_hold_flags(dp, snapname, DS_HOLD_FLAG_DECRYPT,
1020 FTAG, &ds)) != 0) {
1021 goto out;
1022 }
1023 dsl_dataset_long_hold(ds, FTAG);
1024 if (!ds->ds_is_snapshot || dmu_objset_from_ds(ds, &os) != 0) {
1025 err = EINVAL;
1026 goto out;
1027 }
1028 if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_REDACTED_DATASETS)) {
1029 err = EALREADY;
1030 goto out;
1031 }
1032
1033 numsnaps = fnvlist_num_pairs(redactnvl);
1034 if (numsnaps > 0)
1035 args = vmem_zalloc(numsnaps * sizeof (*args), KM_SLEEP);
1036
1037 nvpair_t *pair = NULL;
1038 for (int i = 0; i < numsnaps; i++) {
1039 pair = nvlist_next_nvpair(redactnvl, pair);
1040 const char *name = nvpair_name(pair);
1041 struct redact_thread_arg *rta = &args[i];
1042 err = dsl_dataset_hold_flags(dp, name, DS_HOLD_FLAG_DECRYPT,
1043 FTAG, &rta->ds);
1044 if (err != 0)
1045 break;
1046 /*
1047 * We want to do the long hold before we can get any other
1048 * errors, because the cleanup code will release the long
1049 * hold if rta->ds is filled in.
1050 */
1051 dsl_dataset_long_hold(rta->ds, FTAG);
1052
1053 err = dmu_objset_from_ds(rta->ds, &rta->os);
1054 if (err != 0)
1055 break;
1056 if (!dsl_dataset_is_before(rta->ds, ds, 0)) {
1057 err = EINVAL;
1058 break;
1059 }
1060 if (dsl_dataset_feature_is_active(rta->ds,
1061 SPA_FEATURE_REDACTED_DATASETS)) {
1062 err = EALREADY;
1063 break;
1064
1065 }
1066 }
1067 if (err != 0)
1068 goto out;
1069 VERIFY3P(nvlist_next_nvpair(redactnvl, pair), ==, NULL);
1070
1071 boolean_t resuming = B_FALSE;
1072 zfs_bookmark_phys_t bookmark;
1073
1074 (void) strlcpy(newredactbook, snapname, ZFS_MAX_DATASET_NAME_LEN);
1075 char *c = strchr(newredactbook, '@');
1076 ASSERT3P(c, !=, NULL);
1077 int n = snprintf(c, ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook),
1078 "#%s", redactbook);
1079 if (n >= ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook)) {
1080 dsl_pool_rele(dp, FTAG);
1081 kmem_free(newredactbook,
1082 sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
1083 if (args != NULL)
1084 vmem_free(args, numsnaps * sizeof (*args));
1085 return (SET_ERROR(ENAMETOOLONG));
1086 }
1087 err = dsl_bookmark_lookup(dp, newredactbook, NULL, &bookmark);
1088 if (err == 0) {
1089 resuming = B_TRUE;
1090 if (bookmark.zbm_redaction_obj == 0) {
1091 err = EEXIST;
1092 goto out;
1093 }
1094 err = dsl_redaction_list_hold_obj(dp,
1095 bookmark.zbm_redaction_obj, FTAG, &new_rl);
1096 if (err != 0) {
1097 err = EIO;
1098 goto out;
1099 }
1100 dsl_redaction_list_long_hold(dp, new_rl, FTAG);
1101 if (new_rl->rl_phys->rlp_num_snaps != numsnaps) {
1102 err = ESRCH;
1103 goto out;
1104 }
1105 for (int i = 0; i < numsnaps; i++) {
1106 struct redact_thread_arg *rta = &args[i];
1107 if (!redact_snaps_contains(new_rl->rl_phys->rlp_snaps,
1108 new_rl->rl_phys->rlp_num_snaps,
1109 dsl_dataset_phys(rta->ds)->ds_guid)) {
1110 err = ESRCH;
1111 goto out;
1112 }
1113 }
1114 if (new_rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
1115 new_rl->rl_phys->rlp_last_object == UINT64_MAX) {
1116 err = EEXIST;
1117 goto out;
1118 }
1119 dsl_pool_rele(dp, FTAG);
1120 dp = NULL;
1121 } else {
1122 uint64_t *guids = NULL;
1123 if (numsnaps > 0) {
1124 guids = vmem_zalloc(numsnaps * sizeof (uint64_t),
1125 KM_SLEEP);
1126 }
1127 for (int i = 0; i < numsnaps; i++) {
1128 struct redact_thread_arg *rta = &args[i];
1129 guids[i] = dsl_dataset_phys(rta->ds)->ds_guid;
1130 }
1131
1132 dsl_pool_rele(dp, FTAG);
1133 dp = NULL;
1134 err = dsl_bookmark_create_redacted(newredactbook, snapname,
1135 numsnaps, guids, FTAG, &new_rl);
1136 vmem_free(guids, numsnaps * sizeof (uint64_t));
1137 if (err != 0)
1138 goto out;
1139 }
1140
1141 for (int i = 0; i < numsnaps; i++) {
1142 struct redact_thread_arg *rta = &args[i];
1143 (void) bqueue_init(&rta->q, zfs_redact_queue_ff,
1144 zfs_redact_queue_length,
1145 offsetof(struct redact_record, ln));
1146 if (resuming) {
1147 rta->resume.zb_blkid =
1148 new_rl->rl_phys->rlp_last_blkid;
1149 rta->resume.zb_object =
1150 new_rl->rl_phys->rlp_last_object;
1151 }
1152 rta->txg = dsl_dataset_phys(ds)->ds_creation_txg;
1153 (void) thread_create(NULL, 0, redact_traverse_thread, rta,
1154 0, curproc, TS_RUN, minclsyspri);
1155 }
1156
1157 struct redact_merge_thread_arg *rmta;
1158 rmta = kmem_zalloc(sizeof (struct redact_merge_thread_arg), KM_SLEEP);
1159
1160 (void) bqueue_init(&rmta->q, zfs_redact_queue_ff,
1161 zfs_redact_queue_length, offsetof(struct redact_record, ln));
1162 rmta->numsnaps = numsnaps;
1163 rmta->spa = os->os_spa;
1164 rmta->thr_args = args;
1165 (void) thread_create(NULL, 0, redact_merge_thread, rmta, 0, curproc,
1166 TS_RUN, minclsyspri);
1167 err = perform_redaction(os, new_rl, rmta);
1168 bqueue_destroy(&rmta->q);
1169 kmem_free(rmta, sizeof (struct redact_merge_thread_arg));
1170
1171 out:
1172 kmem_free(newredactbook, sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
1173
1174 if (new_rl != NULL) {
1175 dsl_redaction_list_long_rele(new_rl, FTAG);
1176 dsl_redaction_list_rele(new_rl, FTAG);
1177 }
1178 for (int i = 0; i < numsnaps; i++) {
1179 struct redact_thread_arg *rta = &args[i];
1180 /*
1181 * rta->ds may be NULL if we got an error while filling
1182 * it in.
1183 */
1184 if (rta->ds != NULL) {
1185 dsl_dataset_long_rele(rta->ds, FTAG);
1186 dsl_dataset_rele_flags(rta->ds,
1187 DS_HOLD_FLAG_DECRYPT, FTAG);
1188 }
1189 }
1190
1191 if (args != NULL)
1192 vmem_free(args, numsnaps * sizeof (*args));
1193 if (dp != NULL)
1194 dsl_pool_rele(dp, FTAG);
1195 if (ds != NULL) {
1196 dsl_dataset_long_rele(ds, FTAG);
1197 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1198 }
1199 return (SET_ERROR(err));
1200
1201 }
1202