1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 * Copyright 2014 HybridCluster. All rights reserved. 27 * Copyright 2016 RackTop Systems. 28 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 29 * Copyright (c) 2019, Klara Inc. 30 * Copyright (c) 2019, Allan Jude 31 */ 32 33 #include <sys/dmu.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dmu_tx.h> 36 #include <sys/dbuf.h> 37 #include <sys/dnode.h> 38 #include <sys/zfs_context.h> 39 #include <sys/dmu_objset.h> 40 #include <sys/dmu_traverse.h> 41 #include <sys/dsl_dataset.h> 42 #include <sys/dsl_dir.h> 43 #include <sys/dsl_prop.h> 44 #include <sys/dsl_pool.h> 45 #include <sys/dsl_synctask.h> 46 #include <sys/spa_impl.h> 47 #include <sys/zfs_ioctl.h> 48 #include <sys/zap.h> 49 #include <sys/zio_checksum.h> 50 #include <sys/zfs_znode.h> 51 #include <zfs_fletcher.h> 52 #include <sys/avl.h> 53 #include <sys/ddt.h> 54 #include <sys/zfs_onexit.h> 55 #include <sys/dmu_send.h> 56 #include <sys/dmu_recv.h> 57 #include <sys/dsl_destroy.h> 58 #include <sys/blkptr.h> 59 #include <sys/dsl_bookmark.h> 60 #include <sys/zfeature.h> 61 #include <sys/bqueue.h> 62 #include <sys/zvol.h> 63 #include <sys/policy.h> 64 #include <sys/objlist.h> 65 #ifdef _KERNEL 66 #include <sys/zfs_vfsops.h> 67 #endif 68 69 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 70 static int zfs_send_corrupt_data = B_FALSE; 71 /* 72 * This tunable controls the amount of data (measured in bytes) that will be 73 * prefetched by zfs send. If the main thread is blocking on reads that haven't 74 * completed, this variable might need to be increased. If instead the main 75 * thread is issuing new reads because the prefetches have fallen out of the 76 * cache, this may need to be decreased. 77 */ 78 static uint_t zfs_send_queue_length = SPA_MAXBLOCKSIZE; 79 /* 80 * This tunable controls the length of the queues that zfs send worker threads 81 * use to communicate. If the send_main_thread is blocking on these queues, 82 * this variable may need to be increased. If there is a significant slowdown 83 * at the start of a send as these threads consume all the available IO 84 * resources, this variable may need to be decreased. 85 */ 86 static uint_t zfs_send_no_prefetch_queue_length = 1024 * 1024; 87 /* 88 * These tunables control the fill fraction of the queues by zfs send. The fill 89 * fraction controls the frequency with which threads have to be cv_signaled. 90 * If a lot of cpu time is being spent on cv_signal, then these should be tuned 91 * down. If the queues empty before the signalled thread can catch up, then 92 * these should be tuned up. 93 */ 94 static uint_t zfs_send_queue_ff = 20; 95 static uint_t zfs_send_no_prefetch_queue_ff = 20; 96 97 /* 98 * Use this to override the recordsize calculation for fast zfs send estimates. 99 */ 100 static uint_t zfs_override_estimate_recordsize = 0; 101 102 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ 103 static const boolean_t zfs_send_set_freerecords_bit = B_TRUE; 104 105 /* Set this tunable to FALSE is disable sending unmodified spill blocks. */ 106 static int zfs_send_unmodified_spill_blocks = B_TRUE; 107 108 static inline boolean_t 109 overflow_multiply(uint64_t a, uint64_t b, uint64_t *c) 110 { 111 uint64_t temp = a * b; 112 if (b != 0 && temp / b != a) 113 return (B_FALSE); 114 *c = temp; 115 return (B_TRUE); 116 } 117 118 struct send_thread_arg { 119 bqueue_t q; 120 objset_t *os; /* Objset to traverse */ 121 uint64_t fromtxg; /* Traverse from this txg */ 122 int flags; /* flags to pass to traverse_dataset */ 123 int error_code; 124 boolean_t cancel; 125 zbookmark_phys_t resume; 126 uint64_t *num_blocks_visited; 127 }; 128 129 struct redact_list_thread_arg { 130 boolean_t cancel; 131 bqueue_t q; 132 zbookmark_phys_t resume; 133 redaction_list_t *rl; 134 boolean_t mark_redact; 135 int error_code; 136 uint64_t *num_blocks_visited; 137 }; 138 139 struct send_merge_thread_arg { 140 bqueue_t q; 141 objset_t *os; 142 struct redact_list_thread_arg *from_arg; 143 struct send_thread_arg *to_arg; 144 struct redact_list_thread_arg *redact_arg; 145 int error; 146 boolean_t cancel; 147 }; 148 149 struct send_range { 150 boolean_t eos_marker; /* Marks the end of the stream */ 151 uint64_t object; 152 uint64_t start_blkid; 153 uint64_t end_blkid; 154 bqueue_node_t ln; 155 enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT, 156 PREVIOUSLY_REDACTED} type; 157 union { 158 struct srd { 159 dmu_object_type_t obj_type; 160 uint32_t datablksz; // logical size 161 uint32_t datasz; // payload size 162 blkptr_t bp; 163 arc_buf_t *abuf; 164 abd_t *abd; 165 kmutex_t lock; 166 kcondvar_t cv; 167 boolean_t io_outstanding; 168 boolean_t io_compressed; 169 int io_err; 170 } data; 171 struct srh { 172 uint32_t datablksz; 173 } hole; 174 struct sro { 175 /* 176 * This is a pointer because embedding it in the 177 * struct causes these structures to be massively larger 178 * for all range types; this makes the code much less 179 * memory efficient. 180 */ 181 dnode_phys_t *dnp; 182 blkptr_t bp; 183 } object; 184 struct srr { 185 uint32_t datablksz; 186 } redact; 187 struct sror { 188 blkptr_t bp; 189 } object_range; 190 } sru; 191 }; 192 193 /* 194 * The list of data whose inclusion in a send stream can be pending from 195 * one call to backup_cb to another. Multiple calls to dump_free(), 196 * dump_freeobjects(), and dump_redact() can be aggregated into a single 197 * DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record. 198 */ 199 typedef enum { 200 PENDING_NONE, 201 PENDING_FREE, 202 PENDING_FREEOBJECTS, 203 PENDING_REDACT 204 } dmu_pendop_t; 205 206 typedef struct dmu_send_cookie { 207 dmu_replay_record_t *dsc_drr; 208 dmu_send_outparams_t *dsc_dso; 209 offset_t *dsc_off; 210 objset_t *dsc_os; 211 zio_cksum_t dsc_zc; 212 uint64_t dsc_toguid; 213 uint64_t dsc_fromtxg; 214 int dsc_err; 215 dmu_pendop_t dsc_pending_op; 216 uint64_t dsc_featureflags; 217 uint64_t dsc_last_data_object; 218 uint64_t dsc_last_data_offset; 219 uint64_t dsc_resume_object; 220 uint64_t dsc_resume_offset; 221 boolean_t dsc_sent_begin; 222 boolean_t dsc_sent_end; 223 } dmu_send_cookie_t; 224 225 static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range); 226 227 static void 228 range_free(struct send_range *range) 229 { 230 if (range->type == OBJECT) { 231 size_t size = sizeof (dnode_phys_t) * 232 (range->sru.object.dnp->dn_extra_slots + 1); 233 kmem_free(range->sru.object.dnp, size); 234 } else if (range->type == DATA) { 235 mutex_enter(&range->sru.data.lock); 236 while (range->sru.data.io_outstanding) 237 cv_wait(&range->sru.data.cv, &range->sru.data.lock); 238 if (range->sru.data.abd != NULL) 239 abd_free(range->sru.data.abd); 240 if (range->sru.data.abuf != NULL) { 241 arc_buf_destroy(range->sru.data.abuf, 242 &range->sru.data.abuf); 243 } 244 mutex_exit(&range->sru.data.lock); 245 246 cv_destroy(&range->sru.data.cv); 247 mutex_destroy(&range->sru.data.lock); 248 } 249 kmem_free(range, sizeof (*range)); 250 } 251 252 /* 253 * For all record types except BEGIN, fill in the checksum (overlaid in 254 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything 255 * up to the start of the checksum itself. 256 */ 257 static int 258 dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len) 259 { 260 dmu_send_outparams_t *dso = dscp->dsc_dso; 261 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 262 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 263 (void) fletcher_4_incremental_native(dscp->dsc_drr, 264 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 265 &dscp->dsc_zc); 266 if (dscp->dsc_drr->drr_type == DRR_BEGIN) { 267 dscp->dsc_sent_begin = B_TRUE; 268 } else { 269 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u. 270 drr_checksum.drr_checksum)); 271 dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc; 272 } 273 if (dscp->dsc_drr->drr_type == DRR_END) { 274 dscp->dsc_sent_end = B_TRUE; 275 } 276 (void) fletcher_4_incremental_native(&dscp->dsc_drr-> 277 drr_u.drr_checksum.drr_checksum, 278 sizeof (zio_cksum_t), &dscp->dsc_zc); 279 *dscp->dsc_off += sizeof (dmu_replay_record_t); 280 dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr, 281 sizeof (dmu_replay_record_t), dso->dso_arg); 282 if (dscp->dsc_err != 0) 283 return (SET_ERROR(EINTR)); 284 if (payload_len != 0) { 285 *dscp->dsc_off += payload_len; 286 /* 287 * payload is null when dso_dryrun == B_TRUE (i.e. when we're 288 * doing a send size calculation) 289 */ 290 if (payload != NULL) { 291 (void) fletcher_4_incremental_native( 292 payload, payload_len, &dscp->dsc_zc); 293 } 294 295 /* 296 * The code does not rely on this (len being a multiple of 8). 297 * We keep this assertion because of the corresponding assertion 298 * in receive_read(). Keeping this assertion ensures that we do 299 * not inadvertently break backwards compatibility (causing the 300 * assertion in receive_read() to trigger on old software). 301 * 302 * Raw sends cannot be received on old software, and so can 303 * bypass this assertion. 304 */ 305 306 ASSERT((payload_len % 8 == 0) || 307 (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)); 308 309 dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload, 310 payload_len, dso->dso_arg); 311 if (dscp->dsc_err != 0) 312 return (SET_ERROR(EINTR)); 313 } 314 return (0); 315 } 316 317 /* 318 * Fill in the drr_free struct, or perform aggregation if the previous record is 319 * also a free record, and the two are adjacent. 320 * 321 * Note that we send free records even for a full send, because we want to be 322 * able to receive a full send as a clone, which requires a list of all the free 323 * and freeobject records that were generated on the source. 324 */ 325 static int 326 dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, 327 uint64_t length) 328 { 329 struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free); 330 331 /* 332 * When we receive a free record, dbuf_free_range() assumes 333 * that the receiving system doesn't have any dbufs in the range 334 * being freed. This is always true because there is a one-record 335 * constraint: we only send one WRITE record for any given 336 * object,offset. We know that the one-record constraint is 337 * true because we always send data in increasing order by 338 * object,offset. 339 * 340 * If the increasing-order constraint ever changes, we should find 341 * another way to assert that the one-record constraint is still 342 * satisfied. 343 */ 344 ASSERT(object > dscp->dsc_last_data_object || 345 (object == dscp->dsc_last_data_object && 346 offset > dscp->dsc_last_data_offset)); 347 348 /* 349 * If there is a pending op, but it's not PENDING_FREE, push it out, 350 * since free block aggregation can only be done for blocks of the 351 * same type (i.e., DRR_FREE records can only be aggregated with 352 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 353 * aggregated with other DRR_FREEOBJECTS records). 354 */ 355 if (dscp->dsc_pending_op != PENDING_NONE && 356 dscp->dsc_pending_op != PENDING_FREE) { 357 if (dump_record(dscp, NULL, 0) != 0) 358 return (SET_ERROR(EINTR)); 359 dscp->dsc_pending_op = PENDING_NONE; 360 } 361 362 if (dscp->dsc_pending_op == PENDING_FREE) { 363 /* 364 * Check to see whether this free block can be aggregated 365 * with pending one. 366 */ 367 if (drrf->drr_object == object && drrf->drr_offset + 368 drrf->drr_length == offset) { 369 if (offset + length < offset || length == UINT64_MAX) 370 drrf->drr_length = UINT64_MAX; 371 else 372 drrf->drr_length += length; 373 return (0); 374 } else { 375 /* not a continuation. Push out pending record */ 376 if (dump_record(dscp, NULL, 0) != 0) 377 return (SET_ERROR(EINTR)); 378 dscp->dsc_pending_op = PENDING_NONE; 379 } 380 } 381 /* create a FREE record and make it pending */ 382 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 383 dscp->dsc_drr->drr_type = DRR_FREE; 384 drrf->drr_object = object; 385 drrf->drr_offset = offset; 386 if (offset + length < offset) 387 drrf->drr_length = DMU_OBJECT_END; 388 else 389 drrf->drr_length = length; 390 drrf->drr_toguid = dscp->dsc_toguid; 391 if (length == DMU_OBJECT_END) { 392 if (dump_record(dscp, NULL, 0) != 0) 393 return (SET_ERROR(EINTR)); 394 } else { 395 dscp->dsc_pending_op = PENDING_FREE; 396 } 397 398 return (0); 399 } 400 401 /* 402 * Fill in the drr_redact struct, or perform aggregation if the previous record 403 * is also a redaction record, and the two are adjacent. 404 */ 405 static int 406 dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, 407 uint64_t length) 408 { 409 struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact; 410 411 /* 412 * If there is a pending op, but it's not PENDING_REDACT, push it out, 413 * since free block aggregation can only be done for blocks of the 414 * same type (i.e., DRR_REDACT records can only be aggregated with 415 * other DRR_REDACT records). 416 */ 417 if (dscp->dsc_pending_op != PENDING_NONE && 418 dscp->dsc_pending_op != PENDING_REDACT) { 419 if (dump_record(dscp, NULL, 0) != 0) 420 return (SET_ERROR(EINTR)); 421 dscp->dsc_pending_op = PENDING_NONE; 422 } 423 424 if (dscp->dsc_pending_op == PENDING_REDACT) { 425 /* 426 * Check to see whether this redacted block can be aggregated 427 * with pending one. 428 */ 429 if (drrr->drr_object == object && drrr->drr_offset + 430 drrr->drr_length == offset) { 431 drrr->drr_length += length; 432 return (0); 433 } else { 434 /* not a continuation. Push out pending record */ 435 if (dump_record(dscp, NULL, 0) != 0) 436 return (SET_ERROR(EINTR)); 437 dscp->dsc_pending_op = PENDING_NONE; 438 } 439 } 440 /* create a REDACT record and make it pending */ 441 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 442 dscp->dsc_drr->drr_type = DRR_REDACT; 443 drrr->drr_object = object; 444 drrr->drr_offset = offset; 445 drrr->drr_length = length; 446 drrr->drr_toguid = dscp->dsc_toguid; 447 dscp->dsc_pending_op = PENDING_REDACT; 448 449 return (0); 450 } 451 452 static int 453 dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object, 454 uint64_t offset, int lsize, int psize, const blkptr_t *bp, 455 boolean_t io_compressed, void *data) 456 { 457 uint64_t payload_size; 458 boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW); 459 struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write); 460 461 /* 462 * We send data in increasing object, offset order. 463 * See comment in dump_free() for details. 464 */ 465 ASSERT(object > dscp->dsc_last_data_object || 466 (object == dscp->dsc_last_data_object && 467 offset > dscp->dsc_last_data_offset)); 468 dscp->dsc_last_data_object = object; 469 dscp->dsc_last_data_offset = offset + lsize - 1; 470 471 /* 472 * If there is any kind of pending aggregation (currently either 473 * a grouping of free objects or free blocks), push it out to 474 * the stream, since aggregation can't be done across operations 475 * of different types. 476 */ 477 if (dscp->dsc_pending_op != PENDING_NONE) { 478 if (dump_record(dscp, NULL, 0) != 0) 479 return (SET_ERROR(EINTR)); 480 dscp->dsc_pending_op = PENDING_NONE; 481 } 482 /* write a WRITE record */ 483 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 484 dscp->dsc_drr->drr_type = DRR_WRITE; 485 drrw->drr_object = object; 486 drrw->drr_type = type; 487 drrw->drr_offset = offset; 488 drrw->drr_toguid = dscp->dsc_toguid; 489 drrw->drr_logical_size = lsize; 490 491 /* only set the compression fields if the buf is compressed or raw */ 492 boolean_t compressed = 493 (bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && 494 io_compressed : lsize != psize); 495 if (raw || compressed) { 496 ASSERT(bp != NULL); 497 ASSERT(raw || dscp->dsc_featureflags & 498 DMU_BACKUP_FEATURE_COMPRESSED); 499 ASSERT(!BP_IS_EMBEDDED(bp)); 500 ASSERT3S(psize, >, 0); 501 502 if (raw) { 503 ASSERT(BP_IS_PROTECTED(bp)); 504 505 /* 506 * This is a raw protected block so we need to pass 507 * along everything the receiving side will need to 508 * interpret this block, including the byteswap, salt, 509 * IV, and MAC. 510 */ 511 if (BP_SHOULD_BYTESWAP(bp)) 512 drrw->drr_flags |= DRR_RAW_BYTESWAP; 513 zio_crypt_decode_params_bp(bp, drrw->drr_salt, 514 drrw->drr_iv); 515 zio_crypt_decode_mac_bp(bp, drrw->drr_mac); 516 } else { 517 /* this is a compressed block */ 518 ASSERT(dscp->dsc_featureflags & 519 DMU_BACKUP_FEATURE_COMPRESSED); 520 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 521 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp))); 522 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF); 523 ASSERT3S(lsize, >=, psize); 524 } 525 526 /* set fields common to compressed and raw sends */ 527 drrw->drr_compressiontype = BP_GET_COMPRESS(bp); 528 drrw->drr_compressed_size = psize; 529 payload_size = drrw->drr_compressed_size; 530 } else { 531 payload_size = drrw->drr_logical_size; 532 } 533 534 if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) { 535 /* 536 * There's no pre-computed checksum for partial-block writes, 537 * embedded BP's, or encrypted BP's that are being sent as 538 * plaintext, so (like fletcher4-checksummed blocks) userland 539 * will have to compute a dedup-capable checksum itself. 540 */ 541 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; 542 } else { 543 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 544 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & 545 ZCHECKSUM_FLAG_DEDUP) 546 drrw->drr_flags |= DRR_CHECKSUM_DEDUP; 547 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 548 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 549 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 550 DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp)); 551 drrw->drr_key.ddk_cksum = bp->blk_cksum; 552 } 553 554 if (dump_record(dscp, data, payload_size) != 0) 555 return (SET_ERROR(EINTR)); 556 return (0); 557 } 558 559 static int 560 dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, 561 int blksz, const blkptr_t *bp) 562 { 563 char buf[BPE_PAYLOAD_SIZE]; 564 struct drr_write_embedded *drrw = 565 &(dscp->dsc_drr->drr_u.drr_write_embedded); 566 567 if (dscp->dsc_pending_op != PENDING_NONE) { 568 if (dump_record(dscp, NULL, 0) != 0) 569 return (SET_ERROR(EINTR)); 570 dscp->dsc_pending_op = PENDING_NONE; 571 } 572 573 ASSERT(BP_IS_EMBEDDED(bp)); 574 575 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 576 dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED; 577 drrw->drr_object = object; 578 drrw->drr_offset = offset; 579 drrw->drr_length = blksz; 580 drrw->drr_toguid = dscp->dsc_toguid; 581 drrw->drr_compression = BP_GET_COMPRESS(bp); 582 drrw->drr_etype = BPE_GET_ETYPE(bp); 583 drrw->drr_lsize = BPE_GET_LSIZE(bp); 584 drrw->drr_psize = BPE_GET_PSIZE(bp); 585 586 decode_embedded_bp_compressed(bp, buf); 587 588 uint32_t psize = drrw->drr_psize; 589 uint32_t rsize = P2ROUNDUP(psize, 8); 590 591 if (psize != rsize) 592 memset(buf + psize, 0, rsize - psize); 593 594 if (dump_record(dscp, buf, rsize) != 0) 595 return (SET_ERROR(EINTR)); 596 return (0); 597 } 598 599 static int 600 dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object, 601 void *data) 602 { 603 struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill); 604 uint64_t blksz = BP_GET_LSIZE(bp); 605 uint64_t payload_size = blksz; 606 607 if (dscp->dsc_pending_op != PENDING_NONE) { 608 if (dump_record(dscp, NULL, 0) != 0) 609 return (SET_ERROR(EINTR)); 610 dscp->dsc_pending_op = PENDING_NONE; 611 } 612 613 /* write a SPILL record */ 614 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 615 dscp->dsc_drr->drr_type = DRR_SPILL; 616 drrs->drr_object = object; 617 drrs->drr_length = blksz; 618 drrs->drr_toguid = dscp->dsc_toguid; 619 620 /* See comment in dump_dnode() for full details */ 621 if (zfs_send_unmodified_spill_blocks && 622 (BP_GET_LOGICAL_BIRTH(bp) <= dscp->dsc_fromtxg)) { 623 drrs->drr_flags |= DRR_SPILL_UNMODIFIED; 624 } 625 626 /* handle raw send fields */ 627 if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) { 628 ASSERT(BP_IS_PROTECTED(bp)); 629 630 if (BP_SHOULD_BYTESWAP(bp)) 631 drrs->drr_flags |= DRR_RAW_BYTESWAP; 632 drrs->drr_compressiontype = BP_GET_COMPRESS(bp); 633 drrs->drr_compressed_size = BP_GET_PSIZE(bp); 634 zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv); 635 zio_crypt_decode_mac_bp(bp, drrs->drr_mac); 636 payload_size = drrs->drr_compressed_size; 637 } 638 639 if (dump_record(dscp, data, payload_size) != 0) 640 return (SET_ERROR(EINTR)); 641 return (0); 642 } 643 644 static int 645 dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs) 646 { 647 struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects); 648 uint64_t maxobj = DNODES_PER_BLOCK * 649 (DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1); 650 651 /* 652 * ZoL < 0.7 does not handle large FREEOBJECTS records correctly, 653 * leading to zfs recv never completing. to avoid this issue, don't 654 * send FREEOBJECTS records for object IDs which cannot exist on the 655 * receiving side. 656 */ 657 if (maxobj > 0) { 658 if (maxobj <= firstobj) 659 return (0); 660 661 if (maxobj < firstobj + numobjs) 662 numobjs = maxobj - firstobj; 663 } 664 665 /* 666 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 667 * push it out, since free block aggregation can only be done for 668 * blocks of the same type (i.e., DRR_FREE records can only be 669 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 670 * can only be aggregated with other DRR_FREEOBJECTS records). 671 */ 672 if (dscp->dsc_pending_op != PENDING_NONE && 673 dscp->dsc_pending_op != PENDING_FREEOBJECTS) { 674 if (dump_record(dscp, NULL, 0) != 0) 675 return (SET_ERROR(EINTR)); 676 dscp->dsc_pending_op = PENDING_NONE; 677 } 678 679 if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) { 680 /* 681 * See whether this free object array can be aggregated 682 * with pending one 683 */ 684 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 685 drrfo->drr_numobjs += numobjs; 686 return (0); 687 } else { 688 /* can't be aggregated. Push out pending record */ 689 if (dump_record(dscp, NULL, 0) != 0) 690 return (SET_ERROR(EINTR)); 691 dscp->dsc_pending_op = PENDING_NONE; 692 } 693 } 694 695 /* write a FREEOBJECTS record */ 696 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 697 dscp->dsc_drr->drr_type = DRR_FREEOBJECTS; 698 drrfo->drr_firstobj = firstobj; 699 drrfo->drr_numobjs = numobjs; 700 drrfo->drr_toguid = dscp->dsc_toguid; 701 702 dscp->dsc_pending_op = PENDING_FREEOBJECTS; 703 704 return (0); 705 } 706 707 static int 708 dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object, 709 dnode_phys_t *dnp) 710 { 711 struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object); 712 int bonuslen; 713 714 if (object < dscp->dsc_resume_object) { 715 /* 716 * Note: when resuming, we will visit all the dnodes in 717 * the block of dnodes that we are resuming from. In 718 * this case it's unnecessary to send the dnodes prior to 719 * the one we are resuming from. We should be at most one 720 * block's worth of dnodes behind the resume point. 721 */ 722 ASSERT3U(dscp->dsc_resume_object - object, <, 723 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); 724 return (0); 725 } 726 727 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 728 return (dump_freeobjects(dscp, object, 1)); 729 730 if (dscp->dsc_pending_op != PENDING_NONE) { 731 if (dump_record(dscp, NULL, 0) != 0) 732 return (SET_ERROR(EINTR)); 733 dscp->dsc_pending_op = PENDING_NONE; 734 } 735 736 /* write an OBJECT record */ 737 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 738 dscp->dsc_drr->drr_type = DRR_OBJECT; 739 drro->drr_object = object; 740 drro->drr_type = dnp->dn_type; 741 drro->drr_bonustype = dnp->dn_bonustype; 742 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 743 drro->drr_bonuslen = dnp->dn_bonuslen; 744 drro->drr_dn_slots = dnp->dn_extra_slots + 1; 745 drro->drr_checksumtype = dnp->dn_checksum; 746 drro->drr_compress = dnp->dn_compress; 747 drro->drr_toguid = dscp->dsc_toguid; 748 749 if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 750 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) 751 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; 752 753 bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8); 754 755 if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) { 756 ASSERT(BP_IS_ENCRYPTED(bp)); 757 758 if (BP_SHOULD_BYTESWAP(bp)) 759 drro->drr_flags |= DRR_RAW_BYTESWAP; 760 761 /* needed for reconstructing dnp on recv side */ 762 drro->drr_maxblkid = dnp->dn_maxblkid; 763 drro->drr_indblkshift = dnp->dn_indblkshift; 764 drro->drr_nlevels = dnp->dn_nlevels; 765 drro->drr_nblkptr = dnp->dn_nblkptr; 766 767 /* 768 * Since we encrypt the entire bonus area, the (raw) part 769 * beyond the bonuslen is actually nonzero, so we need 770 * to send it. 771 */ 772 if (bonuslen != 0) { 773 if (drro->drr_bonuslen > DN_MAX_BONUS_LEN(dnp)) 774 return (SET_ERROR(EINVAL)); 775 drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp); 776 bonuslen = drro->drr_raw_bonuslen; 777 } 778 } 779 780 /* 781 * DRR_OBJECT_SPILL is set for every dnode which references a 782 * spill block. This allows the receiving pool to definitively 783 * determine when a spill block should be kept or freed. 784 */ 785 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 786 drro->drr_flags |= DRR_OBJECT_SPILL; 787 788 if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0) 789 return (SET_ERROR(EINTR)); 790 791 /* Free anything past the end of the file. */ 792 if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) * 793 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0) 794 return (SET_ERROR(EINTR)); 795 796 /* 797 * Send DRR_SPILL records for unmodified spill blocks. This is useful 798 * because changing certain attributes of the object (e.g. blocksize) 799 * can cause old versions of ZFS to incorrectly remove a spill block. 800 * Including these records in the stream forces an up to date version 801 * to always be written ensuring they're never lost. Current versions 802 * of the code which understand the DRR_FLAG_SPILL_BLOCK feature can 803 * ignore these unmodified spill blocks. 804 */ 805 if (zfs_send_unmodified_spill_blocks && 806 (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) && 807 (BP_GET_LOGICAL_BIRTH(DN_SPILL_BLKPTR(dnp)) <= dscp->dsc_fromtxg)) { 808 struct send_range record; 809 blkptr_t *bp = DN_SPILL_BLKPTR(dnp); 810 811 memset(&record, 0, sizeof (struct send_range)); 812 record.type = DATA; 813 record.object = object; 814 record.eos_marker = B_FALSE; 815 record.start_blkid = DMU_SPILL_BLKID; 816 record.end_blkid = record.start_blkid + 1; 817 record.sru.data.bp = *bp; 818 record.sru.data.obj_type = dnp->dn_type; 819 record.sru.data.datablksz = BP_GET_LSIZE(bp); 820 821 if (do_dump(dscp, &record) != 0) 822 return (SET_ERROR(EINTR)); 823 } 824 825 if (dscp->dsc_err != 0) 826 return (SET_ERROR(EINTR)); 827 828 return (0); 829 } 830 831 static int 832 dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp, 833 uint64_t firstobj, uint64_t numslots) 834 { 835 struct drr_object_range *drror = 836 &(dscp->dsc_drr->drr_u.drr_object_range); 837 838 /* we only use this record type for raw sends */ 839 ASSERT(BP_IS_PROTECTED(bp)); 840 ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW); 841 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 842 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE); 843 ASSERT0(BP_GET_LEVEL(bp)); 844 845 if (dscp->dsc_pending_op != PENDING_NONE) { 846 if (dump_record(dscp, NULL, 0) != 0) 847 return (SET_ERROR(EINTR)); 848 dscp->dsc_pending_op = PENDING_NONE; 849 } 850 851 memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t)); 852 dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE; 853 drror->drr_firstobj = firstobj; 854 drror->drr_numslots = numslots; 855 drror->drr_toguid = dscp->dsc_toguid; 856 if (BP_SHOULD_BYTESWAP(bp)) 857 drror->drr_flags |= DRR_RAW_BYTESWAP; 858 zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv); 859 zio_crypt_decode_mac_bp(bp, drror->drr_mac); 860 861 if (dump_record(dscp, NULL, 0) != 0) 862 return (SET_ERROR(EINTR)); 863 return (0); 864 } 865 866 static boolean_t 867 send_do_embed(const blkptr_t *bp, uint64_t featureflags) 868 { 869 if (!BP_IS_EMBEDDED(bp)) 870 return (B_FALSE); 871 872 /* 873 * Compression function must be legacy, or explicitly enabled. 874 */ 875 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && 876 !(featureflags & DMU_BACKUP_FEATURE_LZ4))) 877 return (B_FALSE); 878 879 /* 880 * If we have not set the ZSTD feature flag, we can't send ZSTD 881 * compressed embedded blocks, as the receiver may not support them. 882 */ 883 if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD && 884 !(featureflags & DMU_BACKUP_FEATURE_ZSTD))) 885 return (B_FALSE); 886 887 /* 888 * Embed type must be explicitly enabled. 889 */ 890 switch (BPE_GET_ETYPE(bp)) { 891 case BP_EMBEDDED_TYPE_DATA: 892 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 893 return (B_TRUE); 894 break; 895 default: 896 return (B_FALSE); 897 } 898 return (B_FALSE); 899 } 900 901 /* 902 * This function actually handles figuring out what kind of record needs to be 903 * dumped, and calling the appropriate helper function. In most cases, 904 * the data has already been read by send_reader_thread(). 905 */ 906 static int 907 do_dump(dmu_send_cookie_t *dscp, struct send_range *range) 908 { 909 int err = 0; 910 switch (range->type) { 911 case OBJECT: 912 err = dump_dnode(dscp, &range->sru.object.bp, range->object, 913 range->sru.object.dnp); 914 return (err); 915 case OBJECT_RANGE: { 916 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); 917 if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) { 918 return (0); 919 } 920 uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >> 921 DNODE_SHIFT; 922 uint64_t firstobj = range->start_blkid * epb; 923 err = dump_object_range(dscp, &range->sru.object_range.bp, 924 firstobj, epb); 925 break; 926 } 927 case REDACT: { 928 struct srr *srrp = &range->sru.redact; 929 err = dump_redact(dscp, range->object, range->start_blkid * 930 srrp->datablksz, (range->end_blkid - range->start_blkid) * 931 srrp->datablksz); 932 return (err); 933 } 934 case DATA: { 935 struct srd *srdp = &range->sru.data; 936 blkptr_t *bp = &srdp->bp; 937 spa_t *spa = 938 dmu_objset_spa(dscp->dsc_os); 939 940 ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp)); 941 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); 942 if (BP_GET_TYPE(bp) == DMU_OT_SA) { 943 arc_flags_t aflags = ARC_FLAG_WAIT; 944 zio_flag_t zioflags = ZIO_FLAG_CANFAIL; 945 946 if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) { 947 ASSERT(BP_IS_PROTECTED(bp)); 948 zioflags |= ZIO_FLAG_RAW; 949 } 950 951 zbookmark_phys_t zb; 952 ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID); 953 zb.zb_objset = dmu_objset_id(dscp->dsc_os); 954 zb.zb_object = range->object; 955 zb.zb_level = 0; 956 zb.zb_blkid = range->start_blkid; 957 958 arc_buf_t *abuf = NULL; 959 if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa, 960 bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, 961 zioflags, &aflags, &zb) != 0) 962 return (SET_ERROR(EIO)); 963 964 err = dump_spill(dscp, bp, zb.zb_object, 965 (abuf == NULL ? NULL : abuf->b_data)); 966 if (abuf != NULL) 967 arc_buf_destroy(abuf, &abuf); 968 return (err); 969 } 970 if (send_do_embed(bp, dscp->dsc_featureflags)) { 971 err = dump_write_embedded(dscp, range->object, 972 range->start_blkid * srdp->datablksz, 973 srdp->datablksz, bp); 974 return (err); 975 } 976 ASSERT(range->object > dscp->dsc_resume_object || 977 (range->object == dscp->dsc_resume_object && 978 range->start_blkid * srdp->datablksz >= 979 dscp->dsc_resume_offset)); 980 /* it's a level-0 block of a regular object */ 981 982 mutex_enter(&srdp->lock); 983 while (srdp->io_outstanding) 984 cv_wait(&srdp->cv, &srdp->lock); 985 err = srdp->io_err; 986 mutex_exit(&srdp->lock); 987 988 if (err != 0) { 989 if (zfs_send_corrupt_data && 990 !dscp->dsc_dso->dso_dryrun) { 991 /* 992 * Send a block filled with 0x"zfs badd bloc" 993 */ 994 srdp->abuf = arc_alloc_buf(spa, &srdp->abuf, 995 ARC_BUFC_DATA, srdp->datablksz); 996 uint64_t *ptr; 997 for (ptr = srdp->abuf->b_data; 998 (char *)ptr < (char *)srdp->abuf->b_data + 999 srdp->datablksz; ptr++) 1000 *ptr = 0x2f5baddb10cULL; 1001 } else { 1002 return (SET_ERROR(EIO)); 1003 } 1004 } 1005 1006 ASSERT(dscp->dsc_dso->dso_dryrun || 1007 srdp->abuf != NULL || srdp->abd != NULL); 1008 1009 uint64_t offset = range->start_blkid * srdp->datablksz; 1010 1011 char *data = NULL; 1012 if (srdp->abd != NULL) { 1013 data = abd_to_buf(srdp->abd); 1014 ASSERT3P(srdp->abuf, ==, NULL); 1015 } else if (srdp->abuf != NULL) { 1016 data = srdp->abuf->b_data; 1017 } 1018 1019 /* 1020 * If we have large blocks stored on disk but the send flags 1021 * don't allow us to send large blocks, we split the data from 1022 * the arc buf into chunks. 1023 */ 1024 if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE && 1025 !(dscp->dsc_featureflags & 1026 DMU_BACKUP_FEATURE_LARGE_BLOCKS)) { 1027 while (srdp->datablksz > 0 && err == 0) { 1028 int n = MIN(srdp->datablksz, 1029 SPA_OLD_MAXBLOCKSIZE); 1030 err = dmu_dump_write(dscp, srdp->obj_type, 1031 range->object, offset, n, n, NULL, B_FALSE, 1032 data); 1033 offset += n; 1034 /* 1035 * When doing dry run, data==NULL is used as a 1036 * sentinel value by 1037 * dmu_dump_write()->dump_record(). 1038 */ 1039 if (data != NULL) 1040 data += n; 1041 srdp->datablksz -= n; 1042 } 1043 } else { 1044 err = dmu_dump_write(dscp, srdp->obj_type, 1045 range->object, offset, 1046 srdp->datablksz, srdp->datasz, bp, 1047 srdp->io_compressed, data); 1048 } 1049 return (err); 1050 } 1051 case HOLE: { 1052 struct srh *srhp = &range->sru.hole; 1053 if (range->object == DMU_META_DNODE_OBJECT) { 1054 uint32_t span = srhp->datablksz >> DNODE_SHIFT; 1055 uint64_t first_obj = range->start_blkid * span; 1056 uint64_t numobj = range->end_blkid * span - first_obj; 1057 return (dump_freeobjects(dscp, first_obj, numobj)); 1058 } 1059 uint64_t offset = 0; 1060 1061 /* 1062 * If this multiply overflows, we don't need to send this block. 1063 * Even if it has a birth time, it can never not be a hole, so 1064 * we don't need to send records for it. 1065 */ 1066 if (!overflow_multiply(range->start_blkid, srhp->datablksz, 1067 &offset)) { 1068 return (0); 1069 } 1070 uint64_t len = 0; 1071 1072 if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len)) 1073 len = UINT64_MAX; 1074 len = len - offset; 1075 return (dump_free(dscp, range->object, offset, len)); 1076 } 1077 default: 1078 panic("Invalid range type in do_dump: %d", range->type); 1079 } 1080 return (err); 1081 } 1082 1083 static struct send_range * 1084 range_alloc(enum type type, uint64_t object, uint64_t start_blkid, 1085 uint64_t end_blkid, boolean_t eos) 1086 { 1087 struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP); 1088 range->type = type; 1089 range->object = object; 1090 range->start_blkid = start_blkid; 1091 range->end_blkid = end_blkid; 1092 range->eos_marker = eos; 1093 if (type == DATA) { 1094 range->sru.data.abd = NULL; 1095 range->sru.data.abuf = NULL; 1096 mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL); 1097 cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL); 1098 range->sru.data.io_outstanding = 0; 1099 range->sru.data.io_err = 0; 1100 range->sru.data.io_compressed = B_FALSE; 1101 } 1102 return (range); 1103 } 1104 1105 /* 1106 * This is the callback function to traverse_dataset that acts as a worker 1107 * thread for dmu_send_impl. 1108 */ 1109 static int 1110 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1111 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) 1112 { 1113 (void) zilog; 1114 struct send_thread_arg *sta = arg; 1115 struct send_range *record; 1116 1117 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 1118 zb->zb_object >= sta->resume.zb_object); 1119 1120 /* 1121 * All bps of an encrypted os should have the encryption bit set. 1122 * If this is not true it indicates tampering and we report an error. 1123 */ 1124 if (sta->os->os_encrypted && 1125 !BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) { 1126 spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp)); 1127 return (SET_ERROR(EIO)); 1128 } 1129 1130 if (sta->cancel) 1131 return (SET_ERROR(EINTR)); 1132 if (zb->zb_object != DMU_META_DNODE_OBJECT && 1133 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) 1134 return (0); 1135 atomic_inc_64(sta->num_blocks_visited); 1136 1137 if (zb->zb_level == ZB_DNODE_LEVEL) { 1138 if (zb->zb_object == DMU_META_DNODE_OBJECT) 1139 return (0); 1140 record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE); 1141 record->sru.object.bp = *bp; 1142 size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1); 1143 record->sru.object.dnp = kmem_alloc(size, KM_SLEEP); 1144 memcpy(record->sru.object.dnp, dnp, size); 1145 bqueue_enqueue(&sta->q, record, sizeof (*record)); 1146 return (0); 1147 } 1148 if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT && 1149 !BP_IS_HOLE(bp)) { 1150 record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid, 1151 zb->zb_blkid + 1, B_FALSE); 1152 record->sru.object_range.bp = *bp; 1153 bqueue_enqueue(&sta->q, record, sizeof (*record)); 1154 return (0); 1155 } 1156 if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp))) 1157 return (0); 1158 if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp)) 1159 return (0); 1160 1161 uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level); 1162 uint64_t start; 1163 1164 /* 1165 * If this multiply overflows, we don't need to send this block. 1166 * Even if it has a birth time, it can never not be a hole, so 1167 * we don't need to send records for it. 1168 */ 1169 if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid == 1170 DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) && 1171 span * zb->zb_blkid > dnp->dn_maxblkid)) { 1172 ASSERT(BP_IS_HOLE(bp)); 1173 return (0); 1174 } 1175 1176 if (zb->zb_blkid == DMU_SPILL_BLKID) 1177 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA); 1178 1179 enum type record_type = DATA; 1180 if (BP_IS_HOLE(bp)) 1181 record_type = HOLE; 1182 else if (BP_IS_REDACTED(bp)) 1183 record_type = REDACT; 1184 else 1185 record_type = DATA; 1186 1187 record = range_alloc(record_type, zb->zb_object, start, 1188 (start + span < start ? 0 : start + span), B_FALSE); 1189 1190 uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ? 1191 BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 1192 1193 if (BP_IS_HOLE(bp)) { 1194 record->sru.hole.datablksz = datablksz; 1195 } else if (BP_IS_REDACTED(bp)) { 1196 record->sru.redact.datablksz = datablksz; 1197 } else { 1198 record->sru.data.datablksz = datablksz; 1199 record->sru.data.obj_type = dnp->dn_type; 1200 record->sru.data.bp = *bp; 1201 } 1202 1203 bqueue_enqueue(&sta->q, record, sizeof (*record)); 1204 return (0); 1205 } 1206 1207 struct redact_list_cb_arg { 1208 uint64_t *num_blocks_visited; 1209 bqueue_t *q; 1210 boolean_t *cancel; 1211 boolean_t mark_redact; 1212 }; 1213 1214 static int 1215 redact_list_cb(redact_block_phys_t *rb, void *arg) 1216 { 1217 struct redact_list_cb_arg *rlcap = arg; 1218 1219 atomic_inc_64(rlcap->num_blocks_visited); 1220 if (*rlcap->cancel) 1221 return (-1); 1222 1223 struct send_range *data = range_alloc(REDACT, rb->rbp_object, 1224 rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE); 1225 ASSERT3U(data->end_blkid, >, rb->rbp_blkid); 1226 if (rlcap->mark_redact) { 1227 data->type = REDACT; 1228 data->sru.redact.datablksz = redact_block_get_size(rb); 1229 } else { 1230 data->type = PREVIOUSLY_REDACTED; 1231 } 1232 bqueue_enqueue(rlcap->q, data, sizeof (*data)); 1233 1234 return (0); 1235 } 1236 1237 /* 1238 * This function kicks off the traverse_dataset. It also handles setting the 1239 * error code of the thread in case something goes wrong, and pushes the End of 1240 * Stream record when the traverse_dataset call has finished. 1241 */ 1242 static __attribute__((noreturn)) void 1243 send_traverse_thread(void *arg) 1244 { 1245 struct send_thread_arg *st_arg = arg; 1246 int err = 0; 1247 struct send_range *data; 1248 fstrans_cookie_t cookie = spl_fstrans_mark(); 1249 1250 err = traverse_dataset_resume(st_arg->os->os_dsl_dataset, 1251 st_arg->fromtxg, &st_arg->resume, 1252 st_arg->flags, send_cb, st_arg); 1253 1254 if (err != EINTR) 1255 st_arg->error_code = err; 1256 data = range_alloc(DATA, 0, 0, 0, B_TRUE); 1257 bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data)); 1258 spl_fstrans_unmark(cookie); 1259 thread_exit(); 1260 } 1261 1262 /* 1263 * Utility function that causes End of Stream records to compare after of all 1264 * others, so that other threads' comparison logic can stay simple. 1265 */ 1266 static int __attribute__((unused)) 1267 send_range_after(const struct send_range *from, const struct send_range *to) 1268 { 1269 if (from->eos_marker == B_TRUE) 1270 return (1); 1271 if (to->eos_marker == B_TRUE) 1272 return (-1); 1273 1274 uint64_t from_obj = from->object; 1275 uint64_t from_end_obj = from->object + 1; 1276 uint64_t to_obj = to->object; 1277 uint64_t to_end_obj = to->object + 1; 1278 if (from_obj == 0) { 1279 ASSERT(from->type == HOLE || from->type == OBJECT_RANGE); 1280 from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT; 1281 from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT; 1282 } 1283 if (to_obj == 0) { 1284 ASSERT(to->type == HOLE || to->type == OBJECT_RANGE); 1285 to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT; 1286 to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT; 1287 } 1288 1289 if (from_end_obj <= to_obj) 1290 return (-1); 1291 if (from_obj >= to_end_obj) 1292 return (1); 1293 int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type == 1294 OBJECT_RANGE); 1295 if (unlikely(cmp)) 1296 return (cmp); 1297 cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT); 1298 if (unlikely(cmp)) 1299 return (cmp); 1300 if (from->end_blkid <= to->start_blkid) 1301 return (-1); 1302 if (from->start_blkid >= to->end_blkid) 1303 return (1); 1304 return (0); 1305 } 1306 1307 /* 1308 * Pop the new data off the queue, check that the records we receive are in 1309 * the right order, but do not free the old data. This is used so that the 1310 * records can be sent on to the main thread without copying the data. 1311 */ 1312 static struct send_range * 1313 get_next_range_nofree(bqueue_t *bq, struct send_range *prev) 1314 { 1315 struct send_range *next = bqueue_dequeue(bq); 1316 ASSERT3S(send_range_after(prev, next), ==, -1); 1317 return (next); 1318 } 1319 1320 /* 1321 * Pop the new data off the queue, check that the records we receive are in 1322 * the right order, and free the old data. 1323 */ 1324 static struct send_range * 1325 get_next_range(bqueue_t *bq, struct send_range *prev) 1326 { 1327 struct send_range *next = get_next_range_nofree(bq, prev); 1328 range_free(prev); 1329 return (next); 1330 } 1331 1332 static __attribute__((noreturn)) void 1333 redact_list_thread(void *arg) 1334 { 1335 struct redact_list_thread_arg *rlt_arg = arg; 1336 struct send_range *record; 1337 fstrans_cookie_t cookie = spl_fstrans_mark(); 1338 if (rlt_arg->rl != NULL) { 1339 struct redact_list_cb_arg rlcba = {0}; 1340 rlcba.cancel = &rlt_arg->cancel; 1341 rlcba.q = &rlt_arg->q; 1342 rlcba.num_blocks_visited = rlt_arg->num_blocks_visited; 1343 rlcba.mark_redact = rlt_arg->mark_redact; 1344 int err = dsl_redaction_list_traverse(rlt_arg->rl, 1345 &rlt_arg->resume, redact_list_cb, &rlcba); 1346 if (err != EINTR) 1347 rlt_arg->error_code = err; 1348 } 1349 record = range_alloc(DATA, 0, 0, 0, B_TRUE); 1350 bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record)); 1351 spl_fstrans_unmark(cookie); 1352 1353 thread_exit(); 1354 } 1355 1356 /* 1357 * Compare the start point of the two provided ranges. End of stream ranges 1358 * compare last, objects compare before any data or hole inside that object and 1359 * multi-object holes that start at the same object. 1360 */ 1361 static int 1362 send_range_start_compare(struct send_range *r1, struct send_range *r2) 1363 { 1364 uint64_t r1_objequiv = r1->object; 1365 uint64_t r1_l0equiv = r1->start_blkid; 1366 uint64_t r2_objequiv = r2->object; 1367 uint64_t r2_l0equiv = r2->start_blkid; 1368 int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker); 1369 if (unlikely(cmp)) 1370 return (cmp); 1371 if (r1->object == 0) { 1372 r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK; 1373 r1_l0equiv = 0; 1374 } 1375 if (r2->object == 0) { 1376 r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK; 1377 r2_l0equiv = 0; 1378 } 1379 1380 cmp = TREE_CMP(r1_objequiv, r2_objequiv); 1381 if (likely(cmp)) 1382 return (cmp); 1383 cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE); 1384 if (unlikely(cmp)) 1385 return (cmp); 1386 cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT); 1387 if (unlikely(cmp)) 1388 return (cmp); 1389 1390 return (TREE_CMP(r1_l0equiv, r2_l0equiv)); 1391 } 1392 1393 enum q_idx { 1394 REDACT_IDX = 0, 1395 TO_IDX, 1396 FROM_IDX, 1397 NUM_THREADS 1398 }; 1399 1400 /* 1401 * This function returns the next range the send_merge_thread should operate on. 1402 * The inputs are two arrays; the first one stores the range at the front of the 1403 * queues stored in the second one. The ranges are sorted in descending 1404 * priority order; the metadata from earlier ranges overrules metadata from 1405 * later ranges. out_mask is used to return which threads the ranges came from; 1406 * bit i is set if ranges[i] started at the same place as the returned range. 1407 * 1408 * This code is not hardcoded to compare a specific number of threads; it could 1409 * be used with any number, just by changing the q_idx enum. 1410 * 1411 * The "next range" is the one with the earliest start; if two starts are equal, 1412 * the highest-priority range is the next to operate on. If a higher-priority 1413 * range starts in the middle of the first range, then the first range will be 1414 * truncated to end where the higher-priority range starts, and we will operate 1415 * on that one next time. In this way, we make sure that each block covered by 1416 * some range gets covered by a returned range, and each block covered is 1417 * returned using the metadata of the highest-priority range it appears in. 1418 * 1419 * For example, if the three ranges at the front of the queues were [2,4), 1420 * [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata 1421 * from the third range, [2,4) with the metadata from the first range, and then 1422 * [4,5) with the metadata from the second. 1423 */ 1424 static struct send_range * 1425 find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask) 1426 { 1427 int idx = 0; // index of the range with the earliest start 1428 int i; 1429 uint64_t bmask = 0; 1430 for (i = 1; i < NUM_THREADS; i++) { 1431 if (send_range_start_compare(ranges[i], ranges[idx]) < 0) 1432 idx = i; 1433 } 1434 if (ranges[idx]->eos_marker) { 1435 struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE); 1436 *out_mask = 0; 1437 return (ret); 1438 } 1439 /* 1440 * Find all the ranges that start at that same point. 1441 */ 1442 for (i = 0; i < NUM_THREADS; i++) { 1443 if (send_range_start_compare(ranges[i], ranges[idx]) == 0) 1444 bmask |= 1 << i; 1445 } 1446 *out_mask = bmask; 1447 /* 1448 * OBJECT_RANGE records only come from the TO thread, and should always 1449 * be treated as overlapping with nothing and sent on immediately. They 1450 * are only used in raw sends, and are never redacted. 1451 */ 1452 if (ranges[idx]->type == OBJECT_RANGE) { 1453 ASSERT3U(idx, ==, TO_IDX); 1454 ASSERT3U(*out_mask, ==, 1 << TO_IDX); 1455 struct send_range *ret = ranges[idx]; 1456 ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]); 1457 return (ret); 1458 } 1459 /* 1460 * Find the first start or end point after the start of the first range. 1461 */ 1462 uint64_t first_change = ranges[idx]->end_blkid; 1463 for (i = 0; i < NUM_THREADS; i++) { 1464 if (i == idx || ranges[i]->eos_marker || 1465 ranges[i]->object > ranges[idx]->object || 1466 ranges[i]->object == DMU_META_DNODE_OBJECT) 1467 continue; 1468 ASSERT3U(ranges[i]->object, ==, ranges[idx]->object); 1469 if (first_change > ranges[i]->start_blkid && 1470 (bmask & (1 << i)) == 0) 1471 first_change = ranges[i]->start_blkid; 1472 else if (first_change > ranges[i]->end_blkid) 1473 first_change = ranges[i]->end_blkid; 1474 } 1475 /* 1476 * Update all ranges to no longer overlap with the range we're 1477 * returning. All such ranges must start at the same place as the range 1478 * being returned, and end at or after first_change. Thus we update 1479 * their start to first_change. If that makes them size 0, then free 1480 * them and pull a new range from that thread. 1481 */ 1482 for (i = 0; i < NUM_THREADS; i++) { 1483 if (i == idx || (bmask & (1 << i)) == 0) 1484 continue; 1485 ASSERT3U(first_change, >, ranges[i]->start_blkid); 1486 ranges[i]->start_blkid = first_change; 1487 ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid); 1488 if (ranges[i]->start_blkid == ranges[i]->end_blkid) 1489 ranges[i] = get_next_range(qs[i], ranges[i]); 1490 } 1491 /* 1492 * Short-circuit the simple case; if the range doesn't overlap with 1493 * anything else, or it only overlaps with things that start at the same 1494 * place and are longer, send it on. 1495 */ 1496 if (first_change == ranges[idx]->end_blkid) { 1497 struct send_range *ret = ranges[idx]; 1498 ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]); 1499 return (ret); 1500 } 1501 1502 /* 1503 * Otherwise, return a truncated copy of ranges[idx] and move the start 1504 * of ranges[idx] back to first_change. 1505 */ 1506 struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP); 1507 *ret = *ranges[idx]; 1508 ret->end_blkid = first_change; 1509 ranges[idx]->start_blkid = first_change; 1510 return (ret); 1511 } 1512 1513 #define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX)) 1514 1515 /* 1516 * Merge the results from the from thread and the to thread, and then hand the 1517 * records off to send_prefetch_thread to prefetch them. If this is not a 1518 * send from a redaction bookmark, the from thread will push an end of stream 1519 * record and stop, and we'll just send everything that was changed in the 1520 * to_ds since the ancestor's creation txg. If it is, then since 1521 * traverse_dataset has a canonical order, we can compare each change as 1522 * they're pulled off the queues. That will give us a stream that is 1523 * appropriately sorted, and covers all records. In addition, we pull the 1524 * data from the redact_list_thread and use that to determine which blocks 1525 * should be redacted. 1526 */ 1527 static __attribute__((noreturn)) void 1528 send_merge_thread(void *arg) 1529 { 1530 struct send_merge_thread_arg *smt_arg = arg; 1531 struct send_range *front_ranges[NUM_THREADS]; 1532 bqueue_t *queues[NUM_THREADS]; 1533 int err = 0; 1534 fstrans_cookie_t cookie = spl_fstrans_mark(); 1535 1536 if (smt_arg->redact_arg == NULL) { 1537 front_ranges[REDACT_IDX] = 1538 kmem_zalloc(sizeof (struct send_range), KM_SLEEP); 1539 front_ranges[REDACT_IDX]->eos_marker = B_TRUE; 1540 front_ranges[REDACT_IDX]->type = REDACT; 1541 queues[REDACT_IDX] = NULL; 1542 } else { 1543 front_ranges[REDACT_IDX] = 1544 bqueue_dequeue(&smt_arg->redact_arg->q); 1545 queues[REDACT_IDX] = &smt_arg->redact_arg->q; 1546 } 1547 front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q); 1548 queues[TO_IDX] = &smt_arg->to_arg->q; 1549 front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q); 1550 queues[FROM_IDX] = &smt_arg->from_arg->q; 1551 uint64_t mask = 0; 1552 struct send_range *range; 1553 for (range = find_next_range(front_ranges, queues, &mask); 1554 !range->eos_marker && err == 0 && !smt_arg->cancel; 1555 range = find_next_range(front_ranges, queues, &mask)) { 1556 /* 1557 * If the range in question was in both the from redact bookmark 1558 * and the bookmark we're using to redact, then don't send it. 1559 * It's already redacted on the receiving system, so a redaction 1560 * record would be redundant. 1561 */ 1562 if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) { 1563 ASSERT3U(range->type, ==, REDACT); 1564 range_free(range); 1565 continue; 1566 } 1567 bqueue_enqueue(&smt_arg->q, range, sizeof (*range)); 1568 1569 if (smt_arg->to_arg->error_code != 0) { 1570 err = smt_arg->to_arg->error_code; 1571 } else if (smt_arg->from_arg->error_code != 0) { 1572 err = smt_arg->from_arg->error_code; 1573 } else if (smt_arg->redact_arg != NULL && 1574 smt_arg->redact_arg->error_code != 0) { 1575 err = smt_arg->redact_arg->error_code; 1576 } 1577 } 1578 if (smt_arg->cancel && err == 0) 1579 err = SET_ERROR(EINTR); 1580 smt_arg->error = err; 1581 if (smt_arg->error != 0) { 1582 smt_arg->to_arg->cancel = B_TRUE; 1583 smt_arg->from_arg->cancel = B_TRUE; 1584 if (smt_arg->redact_arg != NULL) 1585 smt_arg->redact_arg->cancel = B_TRUE; 1586 } 1587 for (int i = 0; i < NUM_THREADS; i++) { 1588 while (!front_ranges[i]->eos_marker) { 1589 front_ranges[i] = get_next_range(queues[i], 1590 front_ranges[i]); 1591 } 1592 range_free(front_ranges[i]); 1593 } 1594 range->eos_marker = B_TRUE; 1595 bqueue_enqueue_flush(&smt_arg->q, range, 1); 1596 spl_fstrans_unmark(cookie); 1597 thread_exit(); 1598 } 1599 1600 struct send_reader_thread_arg { 1601 struct send_merge_thread_arg *smta; 1602 bqueue_t q; 1603 boolean_t cancel; 1604 boolean_t issue_reads; 1605 uint64_t featureflags; 1606 int error; 1607 }; 1608 1609 static void 1610 dmu_send_read_done(zio_t *zio) 1611 { 1612 struct send_range *range = zio->io_private; 1613 1614 mutex_enter(&range->sru.data.lock); 1615 if (zio->io_error != 0) { 1616 abd_free(range->sru.data.abd); 1617 range->sru.data.abd = NULL; 1618 range->sru.data.io_err = zio->io_error; 1619 } 1620 1621 ASSERT(range->sru.data.io_outstanding); 1622 range->sru.data.io_outstanding = B_FALSE; 1623 cv_broadcast(&range->sru.data.cv); 1624 mutex_exit(&range->sru.data.lock); 1625 } 1626 1627 static void 1628 issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range) 1629 { 1630 struct srd *srdp = &range->sru.data; 1631 blkptr_t *bp = &srdp->bp; 1632 objset_t *os = srta->smta->os; 1633 1634 ASSERT3U(range->type, ==, DATA); 1635 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); 1636 /* 1637 * If we have large blocks stored on disk but 1638 * the send flags don't allow us to send large 1639 * blocks, we split the data from the arc buf 1640 * into chunks. 1641 */ 1642 boolean_t split_large_blocks = 1643 srdp->datablksz > SPA_OLD_MAXBLOCKSIZE && 1644 !(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS); 1645 /* 1646 * We should only request compressed data from the ARC if all 1647 * the following are true: 1648 * - stream compression was requested 1649 * - we aren't splitting large blocks into smaller chunks 1650 * - the data won't need to be byteswapped before sending 1651 * - this isn't an embedded block 1652 * - this isn't metadata (if receiving on a different endian 1653 * system it can be byteswapped more easily) 1654 */ 1655 boolean_t request_compressed = 1656 (srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) && 1657 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) && 1658 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp)); 1659 1660 zio_flag_t zioflags = ZIO_FLAG_CANFAIL; 1661 1662 if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) { 1663 zioflags |= ZIO_FLAG_RAW; 1664 srdp->io_compressed = B_TRUE; 1665 } else if (request_compressed) { 1666 zioflags |= ZIO_FLAG_RAW_COMPRESS; 1667 srdp->io_compressed = B_TRUE; 1668 } 1669 1670 srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ? 1671 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp); 1672 1673 if (!srta->issue_reads) 1674 return; 1675 if (BP_IS_REDACTED(bp)) 1676 return; 1677 if (send_do_embed(bp, srta->featureflags)) 1678 return; 1679 1680 zbookmark_phys_t zb = { 1681 .zb_objset = dmu_objset_id(os), 1682 .zb_object = range->object, 1683 .zb_level = 0, 1684 .zb_blkid = range->start_blkid, 1685 }; 1686 1687 arc_flags_t aflags = ARC_FLAG_CACHED_ONLY; 1688 1689 int arc_err = arc_read(NULL, os->os_spa, bp, 1690 arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ, 1691 zioflags, &aflags, &zb); 1692 /* 1693 * If the data is not already cached in the ARC, we read directly 1694 * from zio. This avoids the performance overhead of adding a new 1695 * entry to the ARC, and we also avoid polluting the ARC cache with 1696 * data that is not likely to be used in the future. 1697 */ 1698 if (arc_err != 0) { 1699 srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE); 1700 srdp->io_outstanding = B_TRUE; 1701 zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd, 1702 srdp->datasz, dmu_send_read_done, range, 1703 ZIO_PRIORITY_ASYNC_READ, zioflags, &zb)); 1704 } 1705 } 1706 1707 /* 1708 * Create a new record with the given values. 1709 */ 1710 static void 1711 enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn, 1712 uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz) 1713 { 1714 enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE : 1715 (BP_IS_REDACTED(bp) ? REDACT : DATA)); 1716 1717 struct send_range *range = range_alloc(range_type, dn->dn_object, 1718 blkid, blkid + count, B_FALSE); 1719 1720 if (blkid == DMU_SPILL_BLKID) { 1721 ASSERT3P(bp, !=, NULL); 1722 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA); 1723 } 1724 1725 switch (range_type) { 1726 case HOLE: 1727 range->sru.hole.datablksz = datablksz; 1728 break; 1729 case DATA: 1730 ASSERT3U(count, ==, 1); 1731 range->sru.data.datablksz = datablksz; 1732 range->sru.data.obj_type = dn->dn_type; 1733 range->sru.data.bp = *bp; 1734 issue_data_read(srta, range); 1735 break; 1736 case REDACT: 1737 range->sru.redact.datablksz = datablksz; 1738 break; 1739 default: 1740 break; 1741 } 1742 bqueue_enqueue(q, range, datablksz); 1743 } 1744 1745 /* 1746 * This thread is responsible for two things: First, it retrieves the correct 1747 * blkptr in the to ds if we need to send the data because of something from 1748 * the from thread. As a result of this, we're the first ones to discover that 1749 * some indirect blocks can be discarded because they're not holes. Second, 1750 * it issues prefetches for the data we need to send. 1751 */ 1752 static __attribute__((noreturn)) void 1753 send_reader_thread(void *arg) 1754 { 1755 struct send_reader_thread_arg *srta = arg; 1756 struct send_merge_thread_arg *smta = srta->smta; 1757 bqueue_t *inq = &smta->q; 1758 bqueue_t *outq = &srta->q; 1759 objset_t *os = smta->os; 1760 fstrans_cookie_t cookie = spl_fstrans_mark(); 1761 struct send_range *range = bqueue_dequeue(inq); 1762 int err = 0; 1763 1764 /* 1765 * If the record we're analyzing is from a redaction bookmark from the 1766 * fromds, then we need to know whether or not it exists in the tods so 1767 * we know whether to create records for it or not. If it does, we need 1768 * the datablksz so we can generate an appropriate record for it. 1769 * Finally, if it isn't redacted, we need the blkptr so that we can send 1770 * a WRITE record containing the actual data. 1771 */ 1772 uint64_t last_obj = UINT64_MAX; 1773 uint64_t last_obj_exists = B_TRUE; 1774 while (!range->eos_marker && !srta->cancel && smta->error == 0 && 1775 err == 0) { 1776 switch (range->type) { 1777 case DATA: 1778 issue_data_read(srta, range); 1779 bqueue_enqueue(outq, range, range->sru.data.datablksz); 1780 range = get_next_range_nofree(inq, range); 1781 break; 1782 case HOLE: 1783 case OBJECT: 1784 case OBJECT_RANGE: 1785 case REDACT: // Redacted blocks must exist 1786 bqueue_enqueue(outq, range, sizeof (*range)); 1787 range = get_next_range_nofree(inq, range); 1788 break; 1789 case PREVIOUSLY_REDACTED: { 1790 /* 1791 * This entry came from the "from bookmark" when 1792 * sending from a bookmark that has a redaction 1793 * list. We need to check if this object/blkid 1794 * exists in the target ("to") dataset, and if 1795 * not then we drop this entry. We also need 1796 * to fill in the block pointer so that we know 1797 * what to prefetch. 1798 * 1799 * To accomplish the above, we first cache whether or 1800 * not the last object we examined exists. If it 1801 * doesn't, we can drop this record. If it does, we hold 1802 * the dnode and use it to call dbuf_dnode_findbp. We do 1803 * this instead of dbuf_bookmark_findbp because we will 1804 * often operate on large ranges, and holding the dnode 1805 * once is more efficient. 1806 */ 1807 boolean_t object_exists = B_TRUE; 1808 /* 1809 * If the data is redacted, we only care if it exists, 1810 * so that we don't send records for objects that have 1811 * been deleted. 1812 */ 1813 dnode_t *dn; 1814 if (range->object == last_obj && !last_obj_exists) { 1815 /* 1816 * If we're still examining the same object as 1817 * previously, and it doesn't exist, we don't 1818 * need to call dbuf_bookmark_findbp. 1819 */ 1820 object_exists = B_FALSE; 1821 } else { 1822 err = dnode_hold(os, range->object, FTAG, &dn); 1823 if (err == ENOENT) { 1824 object_exists = B_FALSE; 1825 err = 0; 1826 } 1827 last_obj = range->object; 1828 last_obj_exists = object_exists; 1829 } 1830 1831 if (err != 0) { 1832 break; 1833 } else if (!object_exists) { 1834 /* 1835 * The block was modified, but doesn't 1836 * exist in the to dataset; if it was 1837 * deleted in the to dataset, then we'll 1838 * visit the hole bp for it at some point. 1839 */ 1840 range = get_next_range(inq, range); 1841 continue; 1842 } 1843 uint64_t file_max = 1844 MIN(dn->dn_maxblkid, range->end_blkid); 1845 /* 1846 * The object exists, so we need to try to find the 1847 * blkptr for each block in the range we're processing. 1848 */ 1849 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1850 for (uint64_t blkid = range->start_blkid; 1851 blkid < file_max; blkid++) { 1852 blkptr_t bp; 1853 uint32_t datablksz = 1854 dn->dn_phys->dn_datablkszsec << 1855 SPA_MINBLOCKSHIFT; 1856 uint64_t offset = blkid * datablksz; 1857 /* 1858 * This call finds the next non-hole block in 1859 * the object. This is to prevent a 1860 * performance problem where we're unredacting 1861 * a large hole. Using dnode_next_offset to 1862 * skip over the large hole avoids iterating 1863 * over every block in it. 1864 */ 1865 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK, 1866 &offset, 1, 1, 0); 1867 if (err == ESRCH) { 1868 offset = UINT64_MAX; 1869 err = 0; 1870 } else if (err != 0) { 1871 break; 1872 } 1873 if (offset != blkid * datablksz) { 1874 /* 1875 * if there is a hole from here 1876 * (blkid) to offset 1877 */ 1878 offset = MIN(offset, file_max * 1879 datablksz); 1880 uint64_t nblks = (offset / datablksz) - 1881 blkid; 1882 enqueue_range(srta, outq, dn, blkid, 1883 nblks, NULL, datablksz); 1884 blkid += nblks; 1885 } 1886 if (blkid >= file_max) 1887 break; 1888 err = dbuf_dnode_findbp(dn, 0, blkid, &bp, 1889 NULL, NULL); 1890 if (err != 0) 1891 break; 1892 ASSERT(!BP_IS_HOLE(&bp)); 1893 enqueue_range(srta, outq, dn, blkid, 1, &bp, 1894 datablksz); 1895 } 1896 rw_exit(&dn->dn_struct_rwlock); 1897 dnode_rele(dn, FTAG); 1898 range = get_next_range(inq, range); 1899 } 1900 } 1901 } 1902 if (srta->cancel || err != 0) { 1903 smta->cancel = B_TRUE; 1904 srta->error = err; 1905 } else if (smta->error != 0) { 1906 srta->error = smta->error; 1907 } 1908 while (!range->eos_marker) 1909 range = get_next_range(inq, range); 1910 1911 bqueue_enqueue_flush(outq, range, 1); 1912 spl_fstrans_unmark(cookie); 1913 thread_exit(); 1914 } 1915 1916 #define NUM_SNAPS_NOT_REDACTED UINT64_MAX 1917 1918 struct dmu_send_params { 1919 /* Pool args */ 1920 const void *tag; // Tag dp was held with, will be used to release dp. 1921 dsl_pool_t *dp; 1922 /* To snapshot args */ 1923 const char *tosnap; 1924 dsl_dataset_t *to_ds; 1925 /* From snapshot args */ 1926 zfs_bookmark_phys_t ancestor_zb; 1927 uint64_t *fromredactsnaps; 1928 /* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */ 1929 uint64_t numfromredactsnaps; 1930 /* Stream params */ 1931 boolean_t is_clone; 1932 boolean_t embedok; 1933 boolean_t large_block_ok; 1934 boolean_t compressok; 1935 boolean_t rawok; 1936 boolean_t savedok; 1937 uint64_t resumeobj; 1938 uint64_t resumeoff; 1939 uint64_t saved_guid; 1940 zfs_bookmark_phys_t *redactbook; 1941 /* Stream output params */ 1942 dmu_send_outparams_t *dso; 1943 1944 /* Stream progress params */ 1945 offset_t *off; 1946 int outfd; 1947 char saved_toname[MAXNAMELEN]; 1948 }; 1949 1950 static int 1951 setup_featureflags(struct dmu_send_params *dspp, objset_t *os, 1952 uint64_t *featureflags) 1953 { 1954 dsl_dataset_t *to_ds = dspp->to_ds; 1955 dsl_pool_t *dp = dspp->dp; 1956 1957 if (dmu_objset_type(os) == DMU_OST_ZFS) { 1958 uint64_t version; 1959 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) 1960 return (SET_ERROR(EINVAL)); 1961 1962 if (version >= ZPL_VERSION_SA) 1963 *featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; 1964 } 1965 1966 /* raw sends imply large_block_ok */ 1967 if ((dspp->rawok || dspp->large_block_ok) && 1968 dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) { 1969 *featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; 1970 } 1971 1972 /* encrypted datasets will not have embedded blocks */ 1973 if ((dspp->embedok || dspp->rawok) && !os->os_encrypted && 1974 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { 1975 *featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; 1976 } 1977 1978 /* raw send implies compressok */ 1979 if (dspp->compressok || dspp->rawok) 1980 *featureflags |= DMU_BACKUP_FEATURE_COMPRESSED; 1981 1982 if (dspp->rawok && os->os_encrypted) 1983 *featureflags |= DMU_BACKUP_FEATURE_RAW; 1984 1985 if ((*featureflags & 1986 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED | 1987 DMU_BACKUP_FEATURE_RAW)) != 0 && 1988 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) { 1989 *featureflags |= DMU_BACKUP_FEATURE_LZ4; 1990 } 1991 1992 /* 1993 * We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to 1994 * allow sending ZSTD compressed datasets to a receiver that does not 1995 * support ZSTD 1996 */ 1997 if ((*featureflags & 1998 (DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 && 1999 dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) { 2000 *featureflags |= DMU_BACKUP_FEATURE_ZSTD; 2001 } 2002 2003 if (dspp->resumeobj != 0 || dspp->resumeoff != 0) { 2004 *featureflags |= DMU_BACKUP_FEATURE_RESUMING; 2005 } 2006 2007 if (dspp->redactbook != NULL) { 2008 *featureflags |= DMU_BACKUP_FEATURE_REDACTED; 2009 } 2010 2011 if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) { 2012 *featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE; 2013 } 2014 return (0); 2015 } 2016 2017 static dmu_replay_record_t * 2018 create_begin_record(struct dmu_send_params *dspp, objset_t *os, 2019 uint64_t featureflags) 2020 { 2021 dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t), 2022 KM_SLEEP); 2023 drr->drr_type = DRR_BEGIN; 2024 2025 struct drr_begin *drrb = &drr->drr_u.drr_begin; 2026 dsl_dataset_t *to_ds = dspp->to_ds; 2027 2028 drrb->drr_magic = DMU_BACKUP_MAGIC; 2029 drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time; 2030 drrb->drr_type = dmu_objset_type(os); 2031 drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; 2032 drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid; 2033 2034 DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM); 2035 DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags); 2036 2037 if (dspp->is_clone) 2038 drrb->drr_flags |= DRR_FLAG_CLONE; 2039 if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET) 2040 drrb->drr_flags |= DRR_FLAG_CI_DATA; 2041 if (zfs_send_set_freerecords_bit) 2042 drrb->drr_flags |= DRR_FLAG_FREERECORDS; 2043 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK; 2044 2045 if (dspp->savedok) { 2046 drrb->drr_toguid = dspp->saved_guid; 2047 strlcpy(drrb->drr_toname, dspp->saved_toname, 2048 sizeof (drrb->drr_toname)); 2049 } else { 2050 dsl_dataset_name(to_ds, drrb->drr_toname); 2051 if (!to_ds->ds_is_snapshot) { 2052 (void) strlcat(drrb->drr_toname, "@--head--", 2053 sizeof (drrb->drr_toname)); 2054 } 2055 } 2056 return (drr); 2057 } 2058 2059 static void 2060 setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os, 2061 dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok) 2062 { 2063 VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff, 2064 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2065 offsetof(struct send_range, ln))); 2066 to_arg->error_code = 0; 2067 to_arg->cancel = B_FALSE; 2068 to_arg->os = to_os; 2069 to_arg->fromtxg = fromtxg; 2070 to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA; 2071 if (rawok) 2072 to_arg->flags |= TRAVERSE_NO_DECRYPT; 2073 if (zfs_send_corrupt_data) 2074 to_arg->flags |= TRAVERSE_HARD; 2075 to_arg->num_blocks_visited = &dssp->dss_blocks; 2076 (void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0, 2077 curproc, TS_RUN, minclsyspri); 2078 } 2079 2080 static void 2081 setup_from_thread(struct redact_list_thread_arg *from_arg, 2082 redaction_list_t *from_rl, dmu_sendstatus_t *dssp) 2083 { 2084 VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff, 2085 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2086 offsetof(struct send_range, ln))); 2087 from_arg->error_code = 0; 2088 from_arg->cancel = B_FALSE; 2089 from_arg->rl = from_rl; 2090 from_arg->mark_redact = B_FALSE; 2091 from_arg->num_blocks_visited = &dssp->dss_blocks; 2092 /* 2093 * If from_ds is null, send_traverse_thread just returns success and 2094 * enqueues an eos marker. 2095 */ 2096 (void) thread_create(NULL, 0, redact_list_thread, from_arg, 0, 2097 curproc, TS_RUN, minclsyspri); 2098 } 2099 2100 static void 2101 setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg, 2102 struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp) 2103 { 2104 if (dspp->redactbook == NULL) 2105 return; 2106 2107 rlt_arg->cancel = B_FALSE; 2108 VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff, 2109 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2110 offsetof(struct send_range, ln))); 2111 rlt_arg->error_code = 0; 2112 rlt_arg->mark_redact = B_TRUE; 2113 rlt_arg->rl = rl; 2114 rlt_arg->num_blocks_visited = &dssp->dss_blocks; 2115 2116 (void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0, 2117 curproc, TS_RUN, minclsyspri); 2118 } 2119 2120 static void 2121 setup_merge_thread(struct send_merge_thread_arg *smt_arg, 2122 struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg, 2123 struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg, 2124 objset_t *os) 2125 { 2126 VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff, 2127 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2128 offsetof(struct send_range, ln))); 2129 smt_arg->cancel = B_FALSE; 2130 smt_arg->error = 0; 2131 smt_arg->from_arg = from_arg; 2132 smt_arg->to_arg = to_arg; 2133 if (dspp->redactbook != NULL) 2134 smt_arg->redact_arg = rlt_arg; 2135 2136 smt_arg->os = os; 2137 (void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc, 2138 TS_RUN, minclsyspri); 2139 } 2140 2141 static void 2142 setup_reader_thread(struct send_reader_thread_arg *srt_arg, 2143 struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg, 2144 uint64_t featureflags) 2145 { 2146 VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff, 2147 MAX(zfs_send_queue_length, 2 * zfs_max_recordsize), 2148 offsetof(struct send_range, ln))); 2149 srt_arg->smta = smt_arg; 2150 srt_arg->issue_reads = !dspp->dso->dso_dryrun; 2151 srt_arg->featureflags = featureflags; 2152 (void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0, 2153 curproc, TS_RUN, minclsyspri); 2154 } 2155 2156 static int 2157 setup_resume_points(struct dmu_send_params *dspp, 2158 struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg, 2159 struct redact_list_thread_arg *rlt_arg, 2160 struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os, 2161 redaction_list_t *redact_rl, nvlist_t *nvl) 2162 { 2163 (void) smt_arg; 2164 dsl_dataset_t *to_ds = dspp->to_ds; 2165 int err = 0; 2166 2167 uint64_t obj = 0; 2168 uint64_t blkid = 0; 2169 if (resuming) { 2170 obj = dspp->resumeobj; 2171 dmu_object_info_t to_doi; 2172 err = dmu_object_info(os, obj, &to_doi); 2173 if (err != 0) 2174 return (err); 2175 2176 blkid = dspp->resumeoff / to_doi.doi_data_block_size; 2177 } 2178 /* 2179 * If we're resuming a redacted send, we can skip to the appropriate 2180 * point in the redaction bookmark by binary searching through it. 2181 */ 2182 if (redact_rl != NULL) { 2183 SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid); 2184 } 2185 2186 SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid); 2187 if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) { 2188 uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj; 2189 /* 2190 * Note: If the resume point is in an object whose 2191 * blocksize is different in the from vs to snapshots, 2192 * we will have divided by the "wrong" blocksize. 2193 * However, in this case fromsnap's send_cb() will 2194 * detect that the blocksize has changed and therefore 2195 * ignore this object. 2196 * 2197 * If we're resuming a send from a redaction bookmark, 2198 * we still cannot accidentally suggest blocks behind 2199 * the to_ds. In addition, we know that any blocks in 2200 * the object in the to_ds will have to be sent, since 2201 * the size changed. Therefore, we can't cause any harm 2202 * this way either. 2203 */ 2204 SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid); 2205 } 2206 if (resuming) { 2207 fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj); 2208 fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff); 2209 } 2210 return (0); 2211 } 2212 2213 static dmu_sendstatus_t * 2214 setup_send_progress(struct dmu_send_params *dspp) 2215 { 2216 dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP); 2217 dssp->dss_outfd = dspp->outfd; 2218 dssp->dss_off = dspp->off; 2219 dssp->dss_proc = curproc; 2220 mutex_enter(&dspp->to_ds->ds_sendstream_lock); 2221 list_insert_head(&dspp->to_ds->ds_sendstreams, dssp); 2222 mutex_exit(&dspp->to_ds->ds_sendstream_lock); 2223 return (dssp); 2224 } 2225 2226 /* 2227 * Actually do the bulk of the work in a zfs send. 2228 * 2229 * The idea is that we want to do a send from ancestor_zb to to_ds. We also 2230 * want to not send any data that has been modified by all the datasets in 2231 * redactsnaparr, and store the list of blocks that are redacted in this way in 2232 * a bookmark named redactbook, created on the to_ds. We do this by creating 2233 * several worker threads, whose function is described below. 2234 * 2235 * There are three cases. 2236 * The first case is a redacted zfs send. In this case there are 5 threads. 2237 * The first thread is the to_ds traversal thread: it calls dataset_traverse on 2238 * the to_ds and finds all the blocks that have changed since ancestor_zb (if 2239 * it's a full send, that's all blocks in the dataset). It then sends those 2240 * blocks on to the send merge thread. The redact list thread takes the data 2241 * from the redaction bookmark and sends those blocks on to the send merge 2242 * thread. The send merge thread takes the data from the to_ds traversal 2243 * thread, and combines it with the redaction records from the redact list 2244 * thread. If a block appears in both the to_ds's data and the redaction data, 2245 * the send merge thread will mark it as redacted and send it on to the prefetch 2246 * thread. Otherwise, the send merge thread will send the block on to the 2247 * prefetch thread unchanged. The prefetch thread will issue prefetch reads for 2248 * any data that isn't redacted, and then send the data on to the main thread. 2249 * The main thread behaves the same as in a normal send case, issuing demand 2250 * reads for data blocks and sending out records over the network 2251 * 2252 * The graphic below diagrams the flow of data in the case of a redacted zfs 2253 * send. Each box represents a thread, and each line represents the flow of 2254 * data. 2255 * 2256 * Records from the | 2257 * redaction bookmark | 2258 * +--------------------+ | +---------------------------+ 2259 * | | v | Send Merge Thread | 2260 * | Redact List Thread +----------> Apply redaction marks to | 2261 * | | | records as specified by | 2262 * +--------------------+ | redaction ranges | 2263 * +----^---------------+------+ 2264 * | | Merged data 2265 * | | 2266 * | +------------v--------+ 2267 * | | Prefetch Thread | 2268 * +--------------------+ | | Issues prefetch | 2269 * | to_ds Traversal | | | reads of data blocks| 2270 * | Thread (finds +---------------+ +------------+--------+ 2271 * | candidate blocks) | Blocks modified | Prefetched data 2272 * +--------------------+ by to_ds since | 2273 * ancestor_zb +------------v----+ 2274 * | Main Thread | File Descriptor 2275 * | Sends data over +->(to zfs receive) 2276 * | wire | 2277 * +-----------------+ 2278 * 2279 * The second case is an incremental send from a redaction bookmark. The to_ds 2280 * traversal thread and the main thread behave the same as in the redacted 2281 * send case. The new thread is the from bookmark traversal thread. It 2282 * iterates over the redaction list in the redaction bookmark, and enqueues 2283 * records for each block that was redacted in the original send. The send 2284 * merge thread now has to merge the data from the two threads. For details 2285 * about that process, see the header comment of send_merge_thread(). Any data 2286 * it decides to send on will be prefetched by the prefetch thread. Note that 2287 * you can perform a redacted send from a redaction bookmark; in that case, 2288 * the data flow behaves very similarly to the flow in the redacted send case, 2289 * except with the addition of the bookmark traversal thread iterating over the 2290 * redaction bookmark. The send_merge_thread also has to take on the 2291 * responsibility of merging the redact list thread's records, the bookmark 2292 * traversal thread's records, and the to_ds records. 2293 * 2294 * +---------------------+ 2295 * | | 2296 * | Redact List Thread +--------------+ 2297 * | | | 2298 * +---------------------+ | 2299 * Blocks in redaction list | Ranges modified by every secure snap 2300 * of from bookmark | (or EOS if not readcted) 2301 * | 2302 * +---------------------+ | +----v----------------------+ 2303 * | bookmark Traversal | v | Send Merge Thread | 2304 * | Thread (finds +---------> Merges bookmark, rlt, and | 2305 * | candidate blocks) | | to_ds send records | 2306 * +---------------------+ +----^---------------+------+ 2307 * | | Merged data 2308 * | +------------v--------+ 2309 * | | Prefetch Thread | 2310 * +--------------------+ | | Issues prefetch | 2311 * | to_ds Traversal | | | reads of data blocks| 2312 * | Thread (finds +---------------+ +------------+--------+ 2313 * | candidate blocks) | Blocks modified | Prefetched data 2314 * +--------------------+ by to_ds since +------------v----+ 2315 * ancestor_zb | Main Thread | File Descriptor 2316 * | Sends data over +->(to zfs receive) 2317 * | wire | 2318 * +-----------------+ 2319 * 2320 * The final case is a simple zfs full or incremental send. The to_ds traversal 2321 * thread behaves the same as always. The redact list thread is never started. 2322 * The send merge thread takes all the blocks that the to_ds traversal thread 2323 * sends it, prefetches the data, and sends the blocks on to the main thread. 2324 * The main thread sends the data over the wire. 2325 * 2326 * To keep performance acceptable, we want to prefetch the data in the worker 2327 * threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH 2328 * feature built into traverse_dataset, the combining and deletion of records 2329 * due to redaction and sends from redaction bookmarks mean that we could 2330 * issue many unnecessary prefetches. As a result, we only prefetch data 2331 * after we've determined that the record is not going to be redacted. To 2332 * prevent the prefetching from getting too far ahead of the main thread, the 2333 * blocking queues that are used for communication are capped not by the 2334 * number of entries in the queue, but by the sum of the size of the 2335 * prefetches associated with them. The limit on the amount of data that the 2336 * thread can prefetch beyond what the main thread has reached is controlled 2337 * by the global variable zfs_send_queue_length. In addition, to prevent poor 2338 * performance in the beginning of a send, we also limit the distance ahead 2339 * that the traversal threads can be. That distance is controlled by the 2340 * zfs_send_no_prefetch_queue_length tunable. 2341 * 2342 * Note: Releases dp using the specified tag. 2343 */ 2344 static int 2345 dmu_send_impl(struct dmu_send_params *dspp) 2346 { 2347 objset_t *os; 2348 dmu_replay_record_t *drr; 2349 dmu_sendstatus_t *dssp; 2350 dmu_send_cookie_t dsc = {0}; 2351 int err; 2352 uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg; 2353 uint64_t featureflags = 0; 2354 struct redact_list_thread_arg *from_arg; 2355 struct send_thread_arg *to_arg; 2356 struct redact_list_thread_arg *rlt_arg; 2357 struct send_merge_thread_arg *smt_arg; 2358 struct send_reader_thread_arg *srt_arg; 2359 struct send_range *range; 2360 redaction_list_t *from_rl = NULL; 2361 redaction_list_t *redact_rl = NULL; 2362 boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0); 2363 boolean_t book_resuming = resuming; 2364 2365 dsl_dataset_t *to_ds = dspp->to_ds; 2366 zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb; 2367 dsl_pool_t *dp = dspp->dp; 2368 const void *tag = dspp->tag; 2369 2370 err = dmu_objset_from_ds(to_ds, &os); 2371 if (err != 0) { 2372 dsl_pool_rele(dp, tag); 2373 return (err); 2374 } 2375 2376 /* 2377 * If this is a non-raw send of an encrypted ds, we can ensure that 2378 * the objset_phys_t is authenticated. This is safe because this is 2379 * either a snapshot or we have owned the dataset, ensuring that 2380 * it can't be modified. 2381 */ 2382 if (!dspp->rawok && os->os_encrypted && 2383 arc_is_unauthenticated(os->os_phys_buf)) { 2384 zbookmark_phys_t zb; 2385 2386 SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT, 2387 ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 2388 err = arc_untransform(os->os_phys_buf, os->os_spa, 2389 &zb, B_FALSE); 2390 if (err != 0) { 2391 dsl_pool_rele(dp, tag); 2392 return (err); 2393 } 2394 2395 ASSERT0(arc_is_unauthenticated(os->os_phys_buf)); 2396 } 2397 2398 if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) { 2399 dsl_pool_rele(dp, tag); 2400 return (err); 2401 } 2402 2403 /* 2404 * If we're doing a redacted send, hold the bookmark's redaction list. 2405 */ 2406 if (dspp->redactbook != NULL) { 2407 err = dsl_redaction_list_hold_obj(dp, 2408 dspp->redactbook->zbm_redaction_obj, FTAG, 2409 &redact_rl); 2410 if (err != 0) { 2411 dsl_pool_rele(dp, tag); 2412 return (SET_ERROR(EINVAL)); 2413 } 2414 dsl_redaction_list_long_hold(dp, redact_rl, FTAG); 2415 } 2416 2417 /* 2418 * If we're sending from a redaction bookmark, hold the redaction list 2419 * so that we can consider sending the redacted blocks. 2420 */ 2421 if (ancestor_zb->zbm_redaction_obj != 0) { 2422 err = dsl_redaction_list_hold_obj(dp, 2423 ancestor_zb->zbm_redaction_obj, FTAG, &from_rl); 2424 if (err != 0) { 2425 if (redact_rl != NULL) { 2426 dsl_redaction_list_long_rele(redact_rl, FTAG); 2427 dsl_redaction_list_rele(redact_rl, FTAG); 2428 } 2429 dsl_pool_rele(dp, tag); 2430 return (SET_ERROR(EINVAL)); 2431 } 2432 dsl_redaction_list_long_hold(dp, from_rl, FTAG); 2433 } 2434 2435 dsl_dataset_long_hold(to_ds, FTAG); 2436 2437 from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP); 2438 to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP); 2439 rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP); 2440 smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP); 2441 srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP); 2442 2443 drr = create_begin_record(dspp, os, featureflags); 2444 dssp = setup_send_progress(dspp); 2445 2446 dsc.dsc_drr = drr; 2447 dsc.dsc_dso = dspp->dso; 2448 dsc.dsc_os = os; 2449 dsc.dsc_off = dspp->off; 2450 dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid; 2451 dsc.dsc_fromtxg = fromtxg; 2452 dsc.dsc_pending_op = PENDING_NONE; 2453 dsc.dsc_featureflags = featureflags; 2454 dsc.dsc_resume_object = dspp->resumeobj; 2455 dsc.dsc_resume_offset = dspp->resumeoff; 2456 2457 dsl_pool_rele(dp, tag); 2458 2459 void *payload = NULL; 2460 size_t payload_len = 0; 2461 nvlist_t *nvl = fnvlist_alloc(); 2462 2463 /* 2464 * If we're doing a redacted send, we include the snapshots we're 2465 * redacted with respect to so that the target system knows what send 2466 * streams can be correctly received on top of this dataset. If we're 2467 * instead sending a redacted dataset, we include the snapshots that the 2468 * dataset was created with respect to. 2469 */ 2470 if (dspp->redactbook != NULL) { 2471 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, 2472 redact_rl->rl_phys->rlp_snaps, 2473 redact_rl->rl_phys->rlp_num_snaps); 2474 } else if (dsl_dataset_feature_is_active(to_ds, 2475 SPA_FEATURE_REDACTED_DATASETS)) { 2476 uint64_t *tods_guids; 2477 uint64_t length; 2478 VERIFY(dsl_dataset_get_uint64_array_feature(to_ds, 2479 SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids)); 2480 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids, 2481 length); 2482 } 2483 2484 /* 2485 * If we're sending from a redaction bookmark, then we should retrieve 2486 * the guids of that bookmark so we can send them over the wire. 2487 */ 2488 if (from_rl != NULL) { 2489 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS, 2490 from_rl->rl_phys->rlp_snaps, 2491 from_rl->rl_phys->rlp_num_snaps); 2492 } 2493 2494 /* 2495 * If the snapshot we're sending from is redacted, include the redaction 2496 * list in the stream. 2497 */ 2498 if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) { 2499 ASSERT3P(from_rl, ==, NULL); 2500 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS, 2501 dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps); 2502 if (dspp->numfromredactsnaps > 0) { 2503 kmem_free(dspp->fromredactsnaps, 2504 dspp->numfromredactsnaps * sizeof (uint64_t)); 2505 dspp->fromredactsnaps = NULL; 2506 } 2507 } 2508 2509 if (resuming || book_resuming) { 2510 err = setup_resume_points(dspp, to_arg, from_arg, 2511 rlt_arg, smt_arg, resuming, os, redact_rl, nvl); 2512 if (err != 0) 2513 goto out; 2514 } 2515 2516 if (featureflags & DMU_BACKUP_FEATURE_RAW) { 2517 uint64_t ivset_guid = ancestor_zb->zbm_ivset_guid; 2518 nvlist_t *keynvl = NULL; 2519 ASSERT(os->os_encrypted); 2520 2521 err = dsl_crypto_populate_key_nvlist(os, ivset_guid, 2522 &keynvl); 2523 if (err != 0) { 2524 fnvlist_free(nvl); 2525 goto out; 2526 } 2527 2528 fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl); 2529 fnvlist_free(keynvl); 2530 } 2531 2532 if (!nvlist_empty(nvl)) { 2533 payload = fnvlist_pack(nvl, &payload_len); 2534 drr->drr_payloadlen = payload_len; 2535 } 2536 2537 fnvlist_free(nvl); 2538 err = dump_record(&dsc, payload, payload_len); 2539 fnvlist_pack_free(payload, payload_len); 2540 if (err != 0) { 2541 err = dsc.dsc_err; 2542 goto out; 2543 } 2544 2545 setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok); 2546 setup_from_thread(from_arg, from_rl, dssp); 2547 setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp); 2548 setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os); 2549 setup_reader_thread(srt_arg, dspp, smt_arg, featureflags); 2550 2551 range = bqueue_dequeue(&srt_arg->q); 2552 while (err == 0 && !range->eos_marker) { 2553 err = do_dump(&dsc, range); 2554 range = get_next_range(&srt_arg->q, range); 2555 if (issig()) 2556 err = SET_ERROR(EINTR); 2557 } 2558 2559 /* 2560 * If we hit an error or are interrupted, cancel our worker threads and 2561 * clear the queue of any pending records. The threads will pass the 2562 * cancel up the tree of worker threads, and each one will clean up any 2563 * pending records before exiting. 2564 */ 2565 if (err != 0) { 2566 srt_arg->cancel = B_TRUE; 2567 while (!range->eos_marker) { 2568 range = get_next_range(&srt_arg->q, range); 2569 } 2570 } 2571 range_free(range); 2572 2573 bqueue_destroy(&srt_arg->q); 2574 bqueue_destroy(&smt_arg->q); 2575 if (dspp->redactbook != NULL) 2576 bqueue_destroy(&rlt_arg->q); 2577 bqueue_destroy(&to_arg->q); 2578 bqueue_destroy(&from_arg->q); 2579 2580 if (err == 0 && srt_arg->error != 0) 2581 err = srt_arg->error; 2582 2583 if (err != 0) 2584 goto out; 2585 2586 if (dsc.dsc_pending_op != PENDING_NONE) 2587 if (dump_record(&dsc, NULL, 0) != 0) 2588 err = SET_ERROR(EINTR); 2589 2590 if (err != 0) { 2591 if (err == EINTR && dsc.dsc_err != 0) 2592 err = dsc.dsc_err; 2593 goto out; 2594 } 2595 2596 /* 2597 * Send the DRR_END record if this is not a saved stream. 2598 * Otherwise, the omitted DRR_END record will signal to 2599 * the receive side that the stream is incomplete. 2600 */ 2601 if (!dspp->savedok) { 2602 memset(drr, 0, sizeof (dmu_replay_record_t)); 2603 drr->drr_type = DRR_END; 2604 drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc; 2605 drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid; 2606 2607 if (dump_record(&dsc, NULL, 0) != 0) 2608 err = dsc.dsc_err; 2609 } 2610 out: 2611 mutex_enter(&to_ds->ds_sendstream_lock); 2612 list_remove(&to_ds->ds_sendstreams, dssp); 2613 mutex_exit(&to_ds->ds_sendstream_lock); 2614 2615 VERIFY(err != 0 || (dsc.dsc_sent_begin && 2616 (dsc.dsc_sent_end || dspp->savedok))); 2617 2618 kmem_free(drr, sizeof (dmu_replay_record_t)); 2619 kmem_free(dssp, sizeof (dmu_sendstatus_t)); 2620 kmem_free(from_arg, sizeof (*from_arg)); 2621 kmem_free(to_arg, sizeof (*to_arg)); 2622 kmem_free(rlt_arg, sizeof (*rlt_arg)); 2623 kmem_free(smt_arg, sizeof (*smt_arg)); 2624 kmem_free(srt_arg, sizeof (*srt_arg)); 2625 2626 dsl_dataset_long_rele(to_ds, FTAG); 2627 if (from_rl != NULL) { 2628 dsl_redaction_list_long_rele(from_rl, FTAG); 2629 dsl_redaction_list_rele(from_rl, FTAG); 2630 } 2631 if (redact_rl != NULL) { 2632 dsl_redaction_list_long_rele(redact_rl, FTAG); 2633 dsl_redaction_list_rele(redact_rl, FTAG); 2634 } 2635 2636 return (err); 2637 } 2638 2639 int 2640 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 2641 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, 2642 boolean_t rawok, boolean_t savedok, int outfd, offset_t *off, 2643 dmu_send_outparams_t *dsop) 2644 { 2645 int err; 2646 dsl_dataset_t *fromds; 2647 ds_hold_flags_t dsflags; 2648 struct dmu_send_params dspp = {0}; 2649 dspp.embedok = embedok; 2650 dspp.large_block_ok = large_block_ok; 2651 dspp.compressok = compressok; 2652 dspp.outfd = outfd; 2653 dspp.off = off; 2654 dspp.dso = dsop; 2655 dspp.tag = FTAG; 2656 dspp.rawok = rawok; 2657 dspp.savedok = savedok; 2658 2659 dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; 2660 err = dsl_pool_hold(pool, FTAG, &dspp.dp); 2661 if (err != 0) 2662 return (err); 2663 2664 err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG, 2665 &dspp.to_ds); 2666 if (err != 0) { 2667 dsl_pool_rele(dspp.dp, FTAG); 2668 return (err); 2669 } 2670 2671 if (fromsnap != 0) { 2672 err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags, 2673 FTAG, &fromds); 2674 if (err != 0) { 2675 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); 2676 dsl_pool_rele(dspp.dp, FTAG); 2677 return (err); 2678 } 2679 dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 2680 dspp.ancestor_zb.zbm_creation_txg = 2681 dsl_dataset_phys(fromds)->ds_creation_txg; 2682 dspp.ancestor_zb.zbm_creation_time = 2683 dsl_dataset_phys(fromds)->ds_creation_time; 2684 2685 if (dsl_dataset_is_zapified(fromds)) { 2686 (void) zap_lookup(dspp.dp->dp_meta_objset, 2687 fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1, 2688 &dspp.ancestor_zb.zbm_ivset_guid); 2689 } 2690 2691 /* See dmu_send for the reasons behind this. */ 2692 uint64_t *fromredact; 2693 2694 if (!dsl_dataset_get_uint64_array_feature(fromds, 2695 SPA_FEATURE_REDACTED_DATASETS, 2696 &dspp.numfromredactsnaps, 2697 &fromredact)) { 2698 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2699 } else if (dspp.numfromredactsnaps > 0) { 2700 uint64_t size = dspp.numfromredactsnaps * 2701 sizeof (uint64_t); 2702 dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP); 2703 memcpy(dspp.fromredactsnaps, fromredact, size); 2704 } 2705 2706 boolean_t is_before = 2707 dsl_dataset_is_before(dspp.to_ds, fromds, 0); 2708 dspp.is_clone = (dspp.to_ds->ds_dir != 2709 fromds->ds_dir); 2710 dsl_dataset_rele(fromds, FTAG); 2711 if (!is_before) { 2712 dsl_pool_rele(dspp.dp, FTAG); 2713 err = SET_ERROR(EXDEV); 2714 } else { 2715 err = dmu_send_impl(&dspp); 2716 } 2717 } else { 2718 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2719 err = dmu_send_impl(&dspp); 2720 } 2721 if (dspp.fromredactsnaps) 2722 kmem_free(dspp.fromredactsnaps, 2723 dspp.numfromredactsnaps * sizeof (uint64_t)); 2724 2725 dsl_dataset_rele(dspp.to_ds, FTAG); 2726 return (err); 2727 } 2728 2729 int 2730 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, 2731 boolean_t large_block_ok, boolean_t compressok, boolean_t rawok, 2732 boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff, 2733 const char *redactbook, int outfd, offset_t *off, 2734 dmu_send_outparams_t *dsop) 2735 { 2736 int err = 0; 2737 ds_hold_flags_t dsflags; 2738 boolean_t owned = B_FALSE; 2739 dsl_dataset_t *fromds = NULL; 2740 zfs_bookmark_phys_t book = {0}; 2741 struct dmu_send_params dspp = {0}; 2742 2743 dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; 2744 dspp.tosnap = tosnap; 2745 dspp.embedok = embedok; 2746 dspp.large_block_ok = large_block_ok; 2747 dspp.compressok = compressok; 2748 dspp.outfd = outfd; 2749 dspp.off = off; 2750 dspp.dso = dsop; 2751 dspp.tag = FTAG; 2752 dspp.resumeobj = resumeobj; 2753 dspp.resumeoff = resumeoff; 2754 dspp.rawok = rawok; 2755 dspp.savedok = savedok; 2756 2757 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) 2758 return (SET_ERROR(EINVAL)); 2759 2760 err = dsl_pool_hold(tosnap, FTAG, &dspp.dp); 2761 if (err != 0) 2762 return (err); 2763 2764 if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) { 2765 /* 2766 * We are sending a filesystem or volume. Ensure 2767 * that it doesn't change by owning the dataset. 2768 */ 2769 2770 if (savedok) { 2771 /* 2772 * We are looking for the dataset that represents the 2773 * partially received send stream. If this stream was 2774 * received as a new snapshot of an existing dataset, 2775 * this will be saved in a hidden clone named 2776 * "<pool>/<dataset>/%recv". Otherwise, the stream 2777 * will be saved in the live dataset itself. In 2778 * either case we need to use dsl_dataset_own_force() 2779 * because the stream is marked as inconsistent, 2780 * which would normally make it unavailable to be 2781 * owned. 2782 */ 2783 char *name = kmem_asprintf("%s/%s", tosnap, 2784 recv_clone_name); 2785 err = dsl_dataset_own_force(dspp.dp, name, dsflags, 2786 FTAG, &dspp.to_ds); 2787 if (err == ENOENT) { 2788 err = dsl_dataset_own_force(dspp.dp, tosnap, 2789 dsflags, FTAG, &dspp.to_ds); 2790 } 2791 2792 if (err == 0) { 2793 owned = B_TRUE; 2794 err = zap_lookup(dspp.dp->dp_meta_objset, 2795 dspp.to_ds->ds_object, 2796 DS_FIELD_RESUME_TOGUID, 8, 1, 2797 &dspp.saved_guid); 2798 } 2799 2800 if (err == 0) { 2801 err = zap_lookup(dspp.dp->dp_meta_objset, 2802 dspp.to_ds->ds_object, 2803 DS_FIELD_RESUME_TONAME, 1, 2804 sizeof (dspp.saved_toname), 2805 dspp.saved_toname); 2806 } 2807 /* Only disown if there was an error in the lookups */ 2808 if (owned && (err != 0)) 2809 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); 2810 2811 kmem_strfree(name); 2812 } else { 2813 err = dsl_dataset_own(dspp.dp, tosnap, dsflags, 2814 FTAG, &dspp.to_ds); 2815 if (err == 0) 2816 owned = B_TRUE; 2817 } 2818 } else { 2819 err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG, 2820 &dspp.to_ds); 2821 } 2822 2823 if (err != 0) { 2824 /* Note: dsl dataset is not owned at this point */ 2825 dsl_pool_rele(dspp.dp, FTAG); 2826 return (err); 2827 } 2828 2829 if (redactbook != NULL) { 2830 char path[ZFS_MAX_DATASET_NAME_LEN]; 2831 (void) strlcpy(path, tosnap, sizeof (path)); 2832 char *at = strchr(path, '@'); 2833 if (at == NULL) { 2834 err = EINVAL; 2835 } else { 2836 (void) snprintf(at, sizeof (path) - (at - path), "#%s", 2837 redactbook); 2838 err = dsl_bookmark_lookup(dspp.dp, path, 2839 NULL, &book); 2840 dspp.redactbook = &book; 2841 } 2842 } 2843 2844 if (err != 0) { 2845 dsl_pool_rele(dspp.dp, FTAG); 2846 if (owned) 2847 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); 2848 else 2849 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); 2850 return (err); 2851 } 2852 2853 if (fromsnap != NULL) { 2854 zfs_bookmark_phys_t *zb = &dspp.ancestor_zb; 2855 int fsnamelen; 2856 if (strpbrk(tosnap, "@#") != NULL) 2857 fsnamelen = strpbrk(tosnap, "@#") - tosnap; 2858 else 2859 fsnamelen = strlen(tosnap); 2860 2861 /* 2862 * If the fromsnap is in a different filesystem, then 2863 * mark the send stream as a clone. 2864 */ 2865 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || 2866 (fromsnap[fsnamelen] != '@' && 2867 fromsnap[fsnamelen] != '#')) { 2868 dspp.is_clone = B_TRUE; 2869 } 2870 2871 if (strchr(fromsnap, '@') != NULL) { 2872 err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG, 2873 &fromds); 2874 2875 if (err != 0) { 2876 ASSERT3P(fromds, ==, NULL); 2877 } else { 2878 /* 2879 * We need to make a deep copy of the redact 2880 * snapshots of the from snapshot, because the 2881 * array will be freed when we evict from_ds. 2882 */ 2883 uint64_t *fromredact; 2884 if (!dsl_dataset_get_uint64_array_feature( 2885 fromds, SPA_FEATURE_REDACTED_DATASETS, 2886 &dspp.numfromredactsnaps, 2887 &fromredact)) { 2888 dspp.numfromredactsnaps = 2889 NUM_SNAPS_NOT_REDACTED; 2890 } else if (dspp.numfromredactsnaps > 0) { 2891 uint64_t size = 2892 dspp.numfromredactsnaps * 2893 sizeof (uint64_t); 2894 dspp.fromredactsnaps = kmem_zalloc(size, 2895 KM_SLEEP); 2896 memcpy(dspp.fromredactsnaps, fromredact, 2897 size); 2898 } 2899 if (!dsl_dataset_is_before(dspp.to_ds, fromds, 2900 0)) { 2901 err = SET_ERROR(EXDEV); 2902 } else { 2903 zb->zbm_creation_txg = 2904 dsl_dataset_phys(fromds)-> 2905 ds_creation_txg; 2906 zb->zbm_creation_time = 2907 dsl_dataset_phys(fromds)-> 2908 ds_creation_time; 2909 zb->zbm_guid = 2910 dsl_dataset_phys(fromds)->ds_guid; 2911 zb->zbm_redaction_obj = 0; 2912 2913 if (dsl_dataset_is_zapified(fromds)) { 2914 (void) zap_lookup( 2915 dspp.dp->dp_meta_objset, 2916 fromds->ds_object, 2917 DS_FIELD_IVSET_GUID, 8, 1, 2918 &zb->zbm_ivset_guid); 2919 } 2920 } 2921 dsl_dataset_rele(fromds, FTAG); 2922 } 2923 } else { 2924 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2925 err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds, 2926 zb); 2927 if (err == EXDEV && zb->zbm_redaction_obj != 0 && 2928 zb->zbm_guid == 2929 dsl_dataset_phys(dspp.to_ds)->ds_guid) 2930 err = 0; 2931 } 2932 2933 if (err == 0) { 2934 /* dmu_send_impl will call dsl_pool_rele for us. */ 2935 err = dmu_send_impl(&dspp); 2936 } else { 2937 if (dspp.fromredactsnaps) 2938 kmem_free(dspp.fromredactsnaps, 2939 dspp.numfromredactsnaps * 2940 sizeof (uint64_t)); 2941 dsl_pool_rele(dspp.dp, FTAG); 2942 } 2943 } else { 2944 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2945 err = dmu_send_impl(&dspp); 2946 } 2947 if (owned) 2948 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); 2949 else 2950 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); 2951 return (err); 2952 } 2953 2954 static int 2955 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed, 2956 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep) 2957 { 2958 int err = 0; 2959 uint64_t size; 2960 /* 2961 * Assume that space (both on-disk and in-stream) is dominated by 2962 * data. We will adjust for indirect blocks and the copies property, 2963 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 2964 */ 2965 2966 uint64_t recordsize; 2967 uint64_t record_count; 2968 objset_t *os; 2969 VERIFY0(dmu_objset_from_ds(ds, &os)); 2970 2971 /* Assume all (uncompressed) blocks are recordsize. */ 2972 if (zfs_override_estimate_recordsize != 0) { 2973 recordsize = zfs_override_estimate_recordsize; 2974 } else if (os->os_phys->os_type == DMU_OST_ZVOL) { 2975 err = dsl_prop_get_int_ds(ds, 2976 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize); 2977 } else { 2978 err = dsl_prop_get_int_ds(ds, 2979 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize); 2980 } 2981 if (err != 0) 2982 return (err); 2983 record_count = uncompressed / recordsize; 2984 2985 /* 2986 * If we're estimating a send size for a compressed stream, use the 2987 * compressed data size to estimate the stream size. Otherwise, use the 2988 * uncompressed data size. 2989 */ 2990 size = stream_compressed ? compressed : uncompressed; 2991 2992 /* 2993 * Subtract out approximate space used by indirect blocks. 2994 * Assume most space is used by data blocks (non-indirect, non-dnode). 2995 * Assume no ditto blocks or internal fragmentation. 2996 * 2997 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 2998 * block. 2999 */ 3000 size -= record_count * sizeof (blkptr_t); 3001 3002 /* Add in the space for the record associated with each block. */ 3003 size += record_count * sizeof (dmu_replay_record_t); 3004 3005 *sizep = size; 3006 3007 return (0); 3008 } 3009 3010 int 3011 dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds, 3012 zfs_bookmark_phys_t *frombook, boolean_t stream_compressed, 3013 boolean_t saved, uint64_t *sizep) 3014 { 3015 int err; 3016 dsl_dataset_t *ds = origds; 3017 uint64_t uncomp, comp; 3018 3019 ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool)); 3020 ASSERT(fromds == NULL || frombook == NULL); 3021 3022 /* 3023 * If this is a saved send we may actually be sending 3024 * from the %recv clone used for resuming. 3025 */ 3026 if (saved) { 3027 objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset; 3028 uint64_t guid; 3029 char dsname[ZFS_MAX_DATASET_NAME_LEN + 6]; 3030 3031 dsl_dataset_name(origds, dsname); 3032 (void) strcat(dsname, "/"); 3033 (void) strlcat(dsname, recv_clone_name, sizeof (dsname)); 3034 3035 err = dsl_dataset_hold(origds->ds_dir->dd_pool, 3036 dsname, FTAG, &ds); 3037 if (err != ENOENT && err != 0) { 3038 return (err); 3039 } else if (err == ENOENT) { 3040 ds = origds; 3041 } 3042 3043 /* check that this dataset has partially received data */ 3044 err = zap_lookup(mos, ds->ds_object, 3045 DS_FIELD_RESUME_TOGUID, 8, 1, &guid); 3046 if (err != 0) { 3047 err = SET_ERROR(err == ENOENT ? EINVAL : err); 3048 goto out; 3049 } 3050 3051 err = zap_lookup(mos, ds->ds_object, 3052 DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname); 3053 if (err != 0) { 3054 err = SET_ERROR(err == ENOENT ? EINVAL : err); 3055 goto out; 3056 } 3057 } 3058 3059 /* tosnap must be a snapshot or the target of a saved send */ 3060 if (!ds->ds_is_snapshot && ds == origds) 3061 return (SET_ERROR(EINVAL)); 3062 3063 if (fromds != NULL) { 3064 uint64_t used; 3065 if (!fromds->ds_is_snapshot) { 3066 err = SET_ERROR(EINVAL); 3067 goto out; 3068 } 3069 3070 if (!dsl_dataset_is_before(ds, fromds, 0)) { 3071 err = SET_ERROR(EXDEV); 3072 goto out; 3073 } 3074 3075 err = dsl_dataset_space_written(fromds, ds, &used, &comp, 3076 &uncomp); 3077 if (err != 0) 3078 goto out; 3079 } else if (frombook != NULL) { 3080 uint64_t used; 3081 err = dsl_dataset_space_written_bookmark(frombook, ds, &used, 3082 &comp, &uncomp); 3083 if (err != 0) 3084 goto out; 3085 } else { 3086 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes; 3087 comp = dsl_dataset_phys(ds)->ds_compressed_bytes; 3088 } 3089 3090 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp, 3091 stream_compressed, sizep); 3092 /* 3093 * Add the size of the BEGIN and END records to the estimate. 3094 */ 3095 *sizep += 2 * sizeof (dmu_replay_record_t); 3096 3097 out: 3098 if (ds != origds) 3099 dsl_dataset_rele(ds, FTAG); 3100 return (err); 3101 } 3102 3103 ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW, 3104 "Allow sending corrupt data"); 3105 3106 ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, UINT, ZMOD_RW, 3107 "Maximum send queue length"); 3108 3109 ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW, 3110 "Send unmodified spill blocks"); 3111 3112 ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, UINT, ZMOD_RW, 3113 "Maximum send queue length for non-prefetch queues"); 3114 3115 ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, UINT, ZMOD_RW, 3116 "Send queue fill fraction"); 3117 3118 ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, UINT, ZMOD_RW, 3119 "Send queue fill fraction for non-prefetch queues"); 3120 3121 ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, UINT, ZMOD_RW, 3122 "Override block size estimate with fixed size"); 3123