1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 * Copyright 2014 HybridCluster. All rights reserved. 27 * Copyright 2016 RackTop Systems. 28 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 29 * Copyright (c) 2019, Klara Inc. 30 * Copyright (c) 2019, Allan Jude 31 */ 32 33 #include <sys/dmu.h> 34 #include <sys/dmu_impl.h> 35 #include <sys/dmu_tx.h> 36 #include <sys/dbuf.h> 37 #include <sys/dnode.h> 38 #include <sys/zfs_context.h> 39 #include <sys/dmu_objset.h> 40 #include <sys/dmu_traverse.h> 41 #include <sys/dsl_dataset.h> 42 #include <sys/dsl_dir.h> 43 #include <sys/dsl_prop.h> 44 #include <sys/dsl_pool.h> 45 #include <sys/dsl_synctask.h> 46 #include <sys/spa_impl.h> 47 #include <sys/zfs_ioctl.h> 48 #include <sys/zap.h> 49 #include <sys/zio_checksum.h> 50 #include <sys/zfs_znode.h> 51 #include <zfs_fletcher.h> 52 #include <sys/avl.h> 53 #include <sys/ddt.h> 54 #include <sys/zfs_onexit.h> 55 #include <sys/dmu_send.h> 56 #include <sys/dmu_recv.h> 57 #include <sys/dsl_destroy.h> 58 #include <sys/blkptr.h> 59 #include <sys/dsl_bookmark.h> 60 #include <sys/zfeature.h> 61 #include <sys/bqueue.h> 62 #include <sys/zvol.h> 63 #include <sys/policy.h> 64 #include <sys/objlist.h> 65 #ifdef _KERNEL 66 #include <sys/zfs_vfsops.h> 67 #endif 68 69 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 70 int zfs_send_corrupt_data = B_FALSE; 71 /* 72 * This tunable controls the amount of data (measured in bytes) that will be 73 * prefetched by zfs send. If the main thread is blocking on reads that haven't 74 * completed, this variable might need to be increased. If instead the main 75 * thread is issuing new reads because the prefetches have fallen out of the 76 * cache, this may need to be decreased. 77 */ 78 int zfs_send_queue_length = SPA_MAXBLOCKSIZE; 79 /* 80 * This tunable controls the length of the queues that zfs send worker threads 81 * use to communicate. If the send_main_thread is blocking on these queues, 82 * this variable may need to be increased. If there is a significant slowdown 83 * at the start of a send as these threads consume all the available IO 84 * resources, this variable may need to be decreased. 85 */ 86 int zfs_send_no_prefetch_queue_length = 1024 * 1024; 87 /* 88 * These tunables control the fill fraction of the queues by zfs send. The fill 89 * fraction controls the frequency with which threads have to be cv_signaled. 90 * If a lot of cpu time is being spent on cv_signal, then these should be tuned 91 * down. If the queues empty before the signalled thread can catch up, then 92 * these should be tuned up. 93 */ 94 int zfs_send_queue_ff = 20; 95 int zfs_send_no_prefetch_queue_ff = 20; 96 97 /* 98 * Use this to override the recordsize calculation for fast zfs send estimates. 99 */ 100 int zfs_override_estimate_recordsize = 0; 101 102 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ 103 int zfs_send_set_freerecords_bit = B_TRUE; 104 105 /* Set this tunable to FALSE is disable sending unmodified spill blocks. */ 106 int zfs_send_unmodified_spill_blocks = B_TRUE; 107 108 static inline boolean_t 109 overflow_multiply(uint64_t a, uint64_t b, uint64_t *c) 110 { 111 uint64_t temp = a * b; 112 if (b != 0 && temp / b != a) 113 return (B_FALSE); 114 *c = temp; 115 return (B_TRUE); 116 } 117 118 struct send_thread_arg { 119 bqueue_t q; 120 objset_t *os; /* Objset to traverse */ 121 uint64_t fromtxg; /* Traverse from this txg */ 122 int flags; /* flags to pass to traverse_dataset */ 123 int error_code; 124 boolean_t cancel; 125 zbookmark_phys_t resume; 126 uint64_t *num_blocks_visited; 127 }; 128 129 struct redact_list_thread_arg { 130 boolean_t cancel; 131 bqueue_t q; 132 zbookmark_phys_t resume; 133 redaction_list_t *rl; 134 boolean_t mark_redact; 135 int error_code; 136 uint64_t *num_blocks_visited; 137 }; 138 139 struct send_merge_thread_arg { 140 bqueue_t q; 141 objset_t *os; 142 struct redact_list_thread_arg *from_arg; 143 struct send_thread_arg *to_arg; 144 struct redact_list_thread_arg *redact_arg; 145 int error; 146 boolean_t cancel; 147 }; 148 149 struct send_range { 150 boolean_t eos_marker; /* Marks the end of the stream */ 151 uint64_t object; 152 uint64_t start_blkid; 153 uint64_t end_blkid; 154 bqueue_node_t ln; 155 enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT, 156 PREVIOUSLY_REDACTED} type; 157 union { 158 struct srd { 159 dmu_object_type_t obj_type; 160 uint32_t datablksz; // logical size 161 uint32_t datasz; // payload size 162 blkptr_t bp; 163 arc_buf_t *abuf; 164 abd_t *abd; 165 kmutex_t lock; 166 kcondvar_t cv; 167 boolean_t io_outstanding; 168 int io_err; 169 } data; 170 struct srh { 171 uint32_t datablksz; 172 } hole; 173 struct sro { 174 /* 175 * This is a pointer because embedding it in the 176 * struct causes these structures to be massively larger 177 * for all range types; this makes the code much less 178 * memory efficient. 179 */ 180 dnode_phys_t *dnp; 181 blkptr_t bp; 182 } object; 183 struct srr { 184 uint32_t datablksz; 185 } redact; 186 struct sror { 187 blkptr_t bp; 188 } object_range; 189 } sru; 190 }; 191 192 /* 193 * The list of data whose inclusion in a send stream can be pending from 194 * one call to backup_cb to another. Multiple calls to dump_free(), 195 * dump_freeobjects(), and dump_redact() can be aggregated into a single 196 * DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record. 197 */ 198 typedef enum { 199 PENDING_NONE, 200 PENDING_FREE, 201 PENDING_FREEOBJECTS, 202 PENDING_REDACT 203 } dmu_pendop_t; 204 205 typedef struct dmu_send_cookie { 206 dmu_replay_record_t *dsc_drr; 207 dmu_send_outparams_t *dsc_dso; 208 offset_t *dsc_off; 209 objset_t *dsc_os; 210 zio_cksum_t dsc_zc; 211 uint64_t dsc_toguid; 212 uint64_t dsc_fromtxg; 213 int dsc_err; 214 dmu_pendop_t dsc_pending_op; 215 uint64_t dsc_featureflags; 216 uint64_t dsc_last_data_object; 217 uint64_t dsc_last_data_offset; 218 uint64_t dsc_resume_object; 219 uint64_t dsc_resume_offset; 220 boolean_t dsc_sent_begin; 221 boolean_t dsc_sent_end; 222 } dmu_send_cookie_t; 223 224 static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range); 225 226 static void 227 range_free(struct send_range *range) 228 { 229 if (range->type == OBJECT) { 230 size_t size = sizeof (dnode_phys_t) * 231 (range->sru.object.dnp->dn_extra_slots + 1); 232 kmem_free(range->sru.object.dnp, size); 233 } else if (range->type == DATA) { 234 mutex_enter(&range->sru.data.lock); 235 while (range->sru.data.io_outstanding) 236 cv_wait(&range->sru.data.cv, &range->sru.data.lock); 237 if (range->sru.data.abd != NULL) 238 abd_free(range->sru.data.abd); 239 if (range->sru.data.abuf != NULL) { 240 arc_buf_destroy(range->sru.data.abuf, 241 &range->sru.data.abuf); 242 } 243 mutex_exit(&range->sru.data.lock); 244 245 cv_destroy(&range->sru.data.cv); 246 mutex_destroy(&range->sru.data.lock); 247 } 248 kmem_free(range, sizeof (*range)); 249 } 250 251 /* 252 * For all record types except BEGIN, fill in the checksum (overlaid in 253 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything 254 * up to the start of the checksum itself. 255 */ 256 static int 257 dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len) 258 { 259 dmu_send_outparams_t *dso = dscp->dsc_dso; 260 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 261 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 262 (void) fletcher_4_incremental_native(dscp->dsc_drr, 263 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 264 &dscp->dsc_zc); 265 if (dscp->dsc_drr->drr_type == DRR_BEGIN) { 266 dscp->dsc_sent_begin = B_TRUE; 267 } else { 268 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u. 269 drr_checksum.drr_checksum)); 270 dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc; 271 } 272 if (dscp->dsc_drr->drr_type == DRR_END) { 273 dscp->dsc_sent_end = B_TRUE; 274 } 275 (void) fletcher_4_incremental_native(&dscp->dsc_drr-> 276 drr_u.drr_checksum.drr_checksum, 277 sizeof (zio_cksum_t), &dscp->dsc_zc); 278 *dscp->dsc_off += sizeof (dmu_replay_record_t); 279 dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr, 280 sizeof (dmu_replay_record_t), dso->dso_arg); 281 if (dscp->dsc_err != 0) 282 return (SET_ERROR(EINTR)); 283 if (payload_len != 0) { 284 *dscp->dsc_off += payload_len; 285 /* 286 * payload is null when dso_dryrun == B_TRUE (i.e. when we're 287 * doing a send size calculation) 288 */ 289 if (payload != NULL) { 290 (void) fletcher_4_incremental_native( 291 payload, payload_len, &dscp->dsc_zc); 292 } 293 294 /* 295 * The code does not rely on this (len being a multiple of 8). 296 * We keep this assertion because of the corresponding assertion 297 * in receive_read(). Keeping this assertion ensures that we do 298 * not inadvertently break backwards compatibility (causing the 299 * assertion in receive_read() to trigger on old software). 300 * 301 * Raw sends cannot be received on old software, and so can 302 * bypass this assertion. 303 */ 304 305 ASSERT((payload_len % 8 == 0) || 306 (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)); 307 308 dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload, 309 payload_len, dso->dso_arg); 310 if (dscp->dsc_err != 0) 311 return (SET_ERROR(EINTR)); 312 } 313 return (0); 314 } 315 316 /* 317 * Fill in the drr_free struct, or perform aggregation if the previous record is 318 * also a free record, and the two are adjacent. 319 * 320 * Note that we send free records even for a full send, because we want to be 321 * able to receive a full send as a clone, which requires a list of all the free 322 * and freeobject records that were generated on the source. 323 */ 324 static int 325 dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, 326 uint64_t length) 327 { 328 struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free); 329 330 /* 331 * When we receive a free record, dbuf_free_range() assumes 332 * that the receiving system doesn't have any dbufs in the range 333 * being freed. This is always true because there is a one-record 334 * constraint: we only send one WRITE record for any given 335 * object,offset. We know that the one-record constraint is 336 * true because we always send data in increasing order by 337 * object,offset. 338 * 339 * If the increasing-order constraint ever changes, we should find 340 * another way to assert that the one-record constraint is still 341 * satisfied. 342 */ 343 ASSERT(object > dscp->dsc_last_data_object || 344 (object == dscp->dsc_last_data_object && 345 offset > dscp->dsc_last_data_offset)); 346 347 /* 348 * If there is a pending op, but it's not PENDING_FREE, push it out, 349 * since free block aggregation can only be done for blocks of the 350 * same type (i.e., DRR_FREE records can only be aggregated with 351 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 352 * aggregated with other DRR_FREEOBJECTS records). 353 */ 354 if (dscp->dsc_pending_op != PENDING_NONE && 355 dscp->dsc_pending_op != PENDING_FREE) { 356 if (dump_record(dscp, NULL, 0) != 0) 357 return (SET_ERROR(EINTR)); 358 dscp->dsc_pending_op = PENDING_NONE; 359 } 360 361 if (dscp->dsc_pending_op == PENDING_FREE) { 362 /* 363 * Check to see whether this free block can be aggregated 364 * with pending one. 365 */ 366 if (drrf->drr_object == object && drrf->drr_offset + 367 drrf->drr_length == offset) { 368 if (offset + length < offset || length == UINT64_MAX) 369 drrf->drr_length = UINT64_MAX; 370 else 371 drrf->drr_length += length; 372 return (0); 373 } else { 374 /* not a continuation. Push out pending record */ 375 if (dump_record(dscp, NULL, 0) != 0) 376 return (SET_ERROR(EINTR)); 377 dscp->dsc_pending_op = PENDING_NONE; 378 } 379 } 380 /* create a FREE record and make it pending */ 381 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 382 dscp->dsc_drr->drr_type = DRR_FREE; 383 drrf->drr_object = object; 384 drrf->drr_offset = offset; 385 if (offset + length < offset) 386 drrf->drr_length = DMU_OBJECT_END; 387 else 388 drrf->drr_length = length; 389 drrf->drr_toguid = dscp->dsc_toguid; 390 if (length == DMU_OBJECT_END) { 391 if (dump_record(dscp, NULL, 0) != 0) 392 return (SET_ERROR(EINTR)); 393 } else { 394 dscp->dsc_pending_op = PENDING_FREE; 395 } 396 397 return (0); 398 } 399 400 /* 401 * Fill in the drr_redact struct, or perform aggregation if the previous record 402 * is also a redaction record, and the two are adjacent. 403 */ 404 static int 405 dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, 406 uint64_t length) 407 { 408 struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact; 409 410 /* 411 * If there is a pending op, but it's not PENDING_REDACT, push it out, 412 * since free block aggregation can only be done for blocks of the 413 * same type (i.e., DRR_REDACT records can only be aggregated with 414 * other DRR_REDACT records). 415 */ 416 if (dscp->dsc_pending_op != PENDING_NONE && 417 dscp->dsc_pending_op != PENDING_REDACT) { 418 if (dump_record(dscp, NULL, 0) != 0) 419 return (SET_ERROR(EINTR)); 420 dscp->dsc_pending_op = PENDING_NONE; 421 } 422 423 if (dscp->dsc_pending_op == PENDING_REDACT) { 424 /* 425 * Check to see whether this redacted block can be aggregated 426 * with pending one. 427 */ 428 if (drrr->drr_object == object && drrr->drr_offset + 429 drrr->drr_length == offset) { 430 drrr->drr_length += length; 431 return (0); 432 } else { 433 /* not a continuation. Push out pending record */ 434 if (dump_record(dscp, NULL, 0) != 0) 435 return (SET_ERROR(EINTR)); 436 dscp->dsc_pending_op = PENDING_NONE; 437 } 438 } 439 /* create a REDACT record and make it pending */ 440 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 441 dscp->dsc_drr->drr_type = DRR_REDACT; 442 drrr->drr_object = object; 443 drrr->drr_offset = offset; 444 drrr->drr_length = length; 445 drrr->drr_toguid = dscp->dsc_toguid; 446 dscp->dsc_pending_op = PENDING_REDACT; 447 448 return (0); 449 } 450 451 static int 452 dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object, 453 uint64_t offset, int lsize, int psize, const blkptr_t *bp, void *data) 454 { 455 uint64_t payload_size; 456 boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW); 457 struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write); 458 459 /* 460 * We send data in increasing object, offset order. 461 * See comment in dump_free() for details. 462 */ 463 ASSERT(object > dscp->dsc_last_data_object || 464 (object == dscp->dsc_last_data_object && 465 offset > dscp->dsc_last_data_offset)); 466 dscp->dsc_last_data_object = object; 467 dscp->dsc_last_data_offset = offset + lsize - 1; 468 469 /* 470 * If there is any kind of pending aggregation (currently either 471 * a grouping of free objects or free blocks), push it out to 472 * the stream, since aggregation can't be done across operations 473 * of different types. 474 */ 475 if (dscp->dsc_pending_op != PENDING_NONE) { 476 if (dump_record(dscp, NULL, 0) != 0) 477 return (SET_ERROR(EINTR)); 478 dscp->dsc_pending_op = PENDING_NONE; 479 } 480 /* write a WRITE record */ 481 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 482 dscp->dsc_drr->drr_type = DRR_WRITE; 483 drrw->drr_object = object; 484 drrw->drr_type = type; 485 drrw->drr_offset = offset; 486 drrw->drr_toguid = dscp->dsc_toguid; 487 drrw->drr_logical_size = lsize; 488 489 /* only set the compression fields if the buf is compressed or raw */ 490 if (raw || lsize != psize) { 491 ASSERT(raw || dscp->dsc_featureflags & 492 DMU_BACKUP_FEATURE_COMPRESSED); 493 ASSERT(!BP_IS_EMBEDDED(bp)); 494 ASSERT3S(psize, >, 0); 495 496 if (raw) { 497 ASSERT(BP_IS_PROTECTED(bp)); 498 499 /* 500 * This is a raw protected block so we need to pass 501 * along everything the receiving side will need to 502 * interpret this block, including the byteswap, salt, 503 * IV, and MAC. 504 */ 505 if (BP_SHOULD_BYTESWAP(bp)) 506 drrw->drr_flags |= DRR_RAW_BYTESWAP; 507 zio_crypt_decode_params_bp(bp, drrw->drr_salt, 508 drrw->drr_iv); 509 zio_crypt_decode_mac_bp(bp, drrw->drr_mac); 510 } else { 511 /* this is a compressed block */ 512 ASSERT(dscp->dsc_featureflags & 513 DMU_BACKUP_FEATURE_COMPRESSED); 514 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 515 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp))); 516 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF); 517 ASSERT3S(lsize, >=, psize); 518 } 519 520 /* set fields common to compressed and raw sends */ 521 drrw->drr_compressiontype = BP_GET_COMPRESS(bp); 522 drrw->drr_compressed_size = psize; 523 payload_size = drrw->drr_compressed_size; 524 } else { 525 payload_size = drrw->drr_logical_size; 526 } 527 528 if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) { 529 /* 530 * There's no pre-computed checksum for partial-block writes, 531 * embedded BP's, or encrypted BP's that are being sent as 532 * plaintext, so (like fletcher4-checksummed blocks) userland 533 * will have to compute a dedup-capable checksum itself. 534 */ 535 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; 536 } else { 537 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 538 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & 539 ZCHECKSUM_FLAG_DEDUP) 540 drrw->drr_flags |= DRR_CHECKSUM_DEDUP; 541 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 542 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 543 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 544 DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp)); 545 drrw->drr_key.ddk_cksum = bp->blk_cksum; 546 } 547 548 if (dump_record(dscp, data, payload_size) != 0) 549 return (SET_ERROR(EINTR)); 550 return (0); 551 } 552 553 static int 554 dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, 555 int blksz, const blkptr_t *bp) 556 { 557 char buf[BPE_PAYLOAD_SIZE]; 558 struct drr_write_embedded *drrw = 559 &(dscp->dsc_drr->drr_u.drr_write_embedded); 560 561 if (dscp->dsc_pending_op != PENDING_NONE) { 562 if (dump_record(dscp, NULL, 0) != 0) 563 return (SET_ERROR(EINTR)); 564 dscp->dsc_pending_op = PENDING_NONE; 565 } 566 567 ASSERT(BP_IS_EMBEDDED(bp)); 568 569 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 570 dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED; 571 drrw->drr_object = object; 572 drrw->drr_offset = offset; 573 drrw->drr_length = blksz; 574 drrw->drr_toguid = dscp->dsc_toguid; 575 drrw->drr_compression = BP_GET_COMPRESS(bp); 576 drrw->drr_etype = BPE_GET_ETYPE(bp); 577 drrw->drr_lsize = BPE_GET_LSIZE(bp); 578 drrw->drr_psize = BPE_GET_PSIZE(bp); 579 580 decode_embedded_bp_compressed(bp, buf); 581 582 if (dump_record(dscp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) 583 return (SET_ERROR(EINTR)); 584 return (0); 585 } 586 587 static int 588 dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object, 589 void *data) 590 { 591 struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill); 592 uint64_t blksz = BP_GET_LSIZE(bp); 593 uint64_t payload_size = blksz; 594 595 if (dscp->dsc_pending_op != PENDING_NONE) { 596 if (dump_record(dscp, NULL, 0) != 0) 597 return (SET_ERROR(EINTR)); 598 dscp->dsc_pending_op = PENDING_NONE; 599 } 600 601 /* write a SPILL record */ 602 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 603 dscp->dsc_drr->drr_type = DRR_SPILL; 604 drrs->drr_object = object; 605 drrs->drr_length = blksz; 606 drrs->drr_toguid = dscp->dsc_toguid; 607 608 /* See comment in dump_dnode() for full details */ 609 if (zfs_send_unmodified_spill_blocks && 610 (bp->blk_birth <= dscp->dsc_fromtxg)) { 611 drrs->drr_flags |= DRR_SPILL_UNMODIFIED; 612 } 613 614 /* handle raw send fields */ 615 if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) { 616 ASSERT(BP_IS_PROTECTED(bp)); 617 618 if (BP_SHOULD_BYTESWAP(bp)) 619 drrs->drr_flags |= DRR_RAW_BYTESWAP; 620 drrs->drr_compressiontype = BP_GET_COMPRESS(bp); 621 drrs->drr_compressed_size = BP_GET_PSIZE(bp); 622 zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv); 623 zio_crypt_decode_mac_bp(bp, drrs->drr_mac); 624 payload_size = drrs->drr_compressed_size; 625 } 626 627 if (dump_record(dscp, data, payload_size) != 0) 628 return (SET_ERROR(EINTR)); 629 return (0); 630 } 631 632 static int 633 dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs) 634 { 635 struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects); 636 uint64_t maxobj = DNODES_PER_BLOCK * 637 (DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1); 638 639 /* 640 * ZoL < 0.7 does not handle large FREEOBJECTS records correctly, 641 * leading to zfs recv never completing. to avoid this issue, don't 642 * send FREEOBJECTS records for object IDs which cannot exist on the 643 * receiving side. 644 */ 645 if (maxobj > 0) { 646 if (maxobj <= firstobj) 647 return (0); 648 649 if (maxobj < firstobj + numobjs) 650 numobjs = maxobj - firstobj; 651 } 652 653 /* 654 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 655 * push it out, since free block aggregation can only be done for 656 * blocks of the same type (i.e., DRR_FREE records can only be 657 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 658 * can only be aggregated with other DRR_FREEOBJECTS records). 659 */ 660 if (dscp->dsc_pending_op != PENDING_NONE && 661 dscp->dsc_pending_op != PENDING_FREEOBJECTS) { 662 if (dump_record(dscp, NULL, 0) != 0) 663 return (SET_ERROR(EINTR)); 664 dscp->dsc_pending_op = PENDING_NONE; 665 } 666 667 if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) { 668 /* 669 * See whether this free object array can be aggregated 670 * with pending one 671 */ 672 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 673 drrfo->drr_numobjs += numobjs; 674 return (0); 675 } else { 676 /* can't be aggregated. Push out pending record */ 677 if (dump_record(dscp, NULL, 0) != 0) 678 return (SET_ERROR(EINTR)); 679 dscp->dsc_pending_op = PENDING_NONE; 680 } 681 } 682 683 /* write a FREEOBJECTS record */ 684 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 685 dscp->dsc_drr->drr_type = DRR_FREEOBJECTS; 686 drrfo->drr_firstobj = firstobj; 687 drrfo->drr_numobjs = numobjs; 688 drrfo->drr_toguid = dscp->dsc_toguid; 689 690 dscp->dsc_pending_op = PENDING_FREEOBJECTS; 691 692 return (0); 693 } 694 695 static int 696 dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object, 697 dnode_phys_t *dnp) 698 { 699 struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object); 700 int bonuslen; 701 702 if (object < dscp->dsc_resume_object) { 703 /* 704 * Note: when resuming, we will visit all the dnodes in 705 * the block of dnodes that we are resuming from. In 706 * this case it's unnecessary to send the dnodes prior to 707 * the one we are resuming from. We should be at most one 708 * block's worth of dnodes behind the resume point. 709 */ 710 ASSERT3U(dscp->dsc_resume_object - object, <, 711 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); 712 return (0); 713 } 714 715 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 716 return (dump_freeobjects(dscp, object, 1)); 717 718 if (dscp->dsc_pending_op != PENDING_NONE) { 719 if (dump_record(dscp, NULL, 0) != 0) 720 return (SET_ERROR(EINTR)); 721 dscp->dsc_pending_op = PENDING_NONE; 722 } 723 724 /* write an OBJECT record */ 725 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 726 dscp->dsc_drr->drr_type = DRR_OBJECT; 727 drro->drr_object = object; 728 drro->drr_type = dnp->dn_type; 729 drro->drr_bonustype = dnp->dn_bonustype; 730 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 731 drro->drr_bonuslen = dnp->dn_bonuslen; 732 drro->drr_dn_slots = dnp->dn_extra_slots + 1; 733 drro->drr_checksumtype = dnp->dn_checksum; 734 drro->drr_compress = dnp->dn_compress; 735 drro->drr_toguid = dscp->dsc_toguid; 736 737 if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 738 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) 739 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; 740 741 bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8); 742 743 if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) { 744 ASSERT(BP_IS_ENCRYPTED(bp)); 745 746 if (BP_SHOULD_BYTESWAP(bp)) 747 drro->drr_flags |= DRR_RAW_BYTESWAP; 748 749 /* needed for reconstructing dnp on recv side */ 750 drro->drr_maxblkid = dnp->dn_maxblkid; 751 drro->drr_indblkshift = dnp->dn_indblkshift; 752 drro->drr_nlevels = dnp->dn_nlevels; 753 drro->drr_nblkptr = dnp->dn_nblkptr; 754 755 /* 756 * Since we encrypt the entire bonus area, the (raw) part 757 * beyond the bonuslen is actually nonzero, so we need 758 * to send it. 759 */ 760 if (bonuslen != 0) { 761 drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp); 762 bonuslen = drro->drr_raw_bonuslen; 763 } 764 } 765 766 /* 767 * DRR_OBJECT_SPILL is set for every dnode which references a 768 * spill block. This allows the receiving pool to definitively 769 * determine when a spill block should be kept or freed. 770 */ 771 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) 772 drro->drr_flags |= DRR_OBJECT_SPILL; 773 774 if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0) 775 return (SET_ERROR(EINTR)); 776 777 /* Free anything past the end of the file. */ 778 if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) * 779 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0) 780 return (SET_ERROR(EINTR)); 781 782 /* 783 * Send DRR_SPILL records for unmodified spill blocks. This is useful 784 * because changing certain attributes of the object (e.g. blocksize) 785 * can cause old versions of ZFS to incorrectly remove a spill block. 786 * Including these records in the stream forces an up to date version 787 * to always be written ensuring they're never lost. Current versions 788 * of the code which understand the DRR_FLAG_SPILL_BLOCK feature can 789 * ignore these unmodified spill blocks. 790 */ 791 if (zfs_send_unmodified_spill_blocks && 792 (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) && 793 (DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) { 794 struct send_range record; 795 blkptr_t *bp = DN_SPILL_BLKPTR(dnp); 796 797 bzero(&record, sizeof (struct send_range)); 798 record.type = DATA; 799 record.object = object; 800 record.eos_marker = B_FALSE; 801 record.start_blkid = DMU_SPILL_BLKID; 802 record.end_blkid = record.start_blkid + 1; 803 record.sru.data.bp = *bp; 804 record.sru.data.obj_type = dnp->dn_type; 805 record.sru.data.datablksz = BP_GET_LSIZE(bp); 806 807 if (do_dump(dscp, &record) != 0) 808 return (SET_ERROR(EINTR)); 809 } 810 811 if (dscp->dsc_err != 0) 812 return (SET_ERROR(EINTR)); 813 814 return (0); 815 } 816 817 static int 818 dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp, 819 uint64_t firstobj, uint64_t numslots) 820 { 821 struct drr_object_range *drror = 822 &(dscp->dsc_drr->drr_u.drr_object_range); 823 824 /* we only use this record type for raw sends */ 825 ASSERT(BP_IS_PROTECTED(bp)); 826 ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW); 827 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 828 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE); 829 ASSERT0(BP_GET_LEVEL(bp)); 830 831 if (dscp->dsc_pending_op != PENDING_NONE) { 832 if (dump_record(dscp, NULL, 0) != 0) 833 return (SET_ERROR(EINTR)); 834 dscp->dsc_pending_op = PENDING_NONE; 835 } 836 837 bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); 838 dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE; 839 drror->drr_firstobj = firstobj; 840 drror->drr_numslots = numslots; 841 drror->drr_toguid = dscp->dsc_toguid; 842 if (BP_SHOULD_BYTESWAP(bp)) 843 drror->drr_flags |= DRR_RAW_BYTESWAP; 844 zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv); 845 zio_crypt_decode_mac_bp(bp, drror->drr_mac); 846 847 if (dump_record(dscp, NULL, 0) != 0) 848 return (SET_ERROR(EINTR)); 849 return (0); 850 } 851 852 static boolean_t 853 send_do_embed(const blkptr_t *bp, uint64_t featureflags) 854 { 855 if (!BP_IS_EMBEDDED(bp)) 856 return (B_FALSE); 857 858 /* 859 * Compression function must be legacy, or explicitly enabled. 860 */ 861 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && 862 !(featureflags & DMU_BACKUP_FEATURE_LZ4))) 863 return (B_FALSE); 864 865 /* 866 * If we have not set the ZSTD feature flag, we can't send ZSTD 867 * compressed embedded blocks, as the receiver may not support them. 868 */ 869 if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD && 870 !(featureflags & DMU_BACKUP_FEATURE_ZSTD))) 871 return (B_FALSE); 872 873 /* 874 * Embed type must be explicitly enabled. 875 */ 876 switch (BPE_GET_ETYPE(bp)) { 877 case BP_EMBEDDED_TYPE_DATA: 878 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 879 return (B_TRUE); 880 break; 881 default: 882 return (B_FALSE); 883 } 884 return (B_FALSE); 885 } 886 887 /* 888 * This function actually handles figuring out what kind of record needs to be 889 * dumped, and calling the appropriate helper function. In most cases, 890 * the data has already been read by send_reader_thread(). 891 */ 892 static int 893 do_dump(dmu_send_cookie_t *dscp, struct send_range *range) 894 { 895 int err = 0; 896 switch (range->type) { 897 case OBJECT: 898 err = dump_dnode(dscp, &range->sru.object.bp, range->object, 899 range->sru.object.dnp); 900 return (err); 901 case OBJECT_RANGE: { 902 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); 903 if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) { 904 return (0); 905 } 906 uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >> 907 DNODE_SHIFT; 908 uint64_t firstobj = range->start_blkid * epb; 909 err = dump_object_range(dscp, &range->sru.object_range.bp, 910 firstobj, epb); 911 break; 912 } 913 case REDACT: { 914 struct srr *srrp = &range->sru.redact; 915 err = dump_redact(dscp, range->object, range->start_blkid * 916 srrp->datablksz, (range->end_blkid - range->start_blkid) * 917 srrp->datablksz); 918 return (err); 919 } 920 case DATA: { 921 struct srd *srdp = &range->sru.data; 922 blkptr_t *bp = &srdp->bp; 923 spa_t *spa = 924 dmu_objset_spa(dscp->dsc_os); 925 926 ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp)); 927 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); 928 if (BP_GET_TYPE(bp) == DMU_OT_SA) { 929 arc_flags_t aflags = ARC_FLAG_WAIT; 930 enum zio_flag zioflags = ZIO_FLAG_CANFAIL; 931 932 if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) { 933 ASSERT(BP_IS_PROTECTED(bp)); 934 zioflags |= ZIO_FLAG_RAW; 935 } 936 937 zbookmark_phys_t zb; 938 ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID); 939 zb.zb_objset = dmu_objset_id(dscp->dsc_os); 940 zb.zb_object = range->object; 941 zb.zb_level = 0; 942 zb.zb_blkid = range->start_blkid; 943 944 arc_buf_t *abuf = NULL; 945 if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa, 946 bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, 947 zioflags, &aflags, &zb) != 0) 948 return (SET_ERROR(EIO)); 949 950 err = dump_spill(dscp, bp, zb.zb_object, 951 (abuf == NULL ? NULL : abuf->b_data)); 952 if (abuf != NULL) 953 arc_buf_destroy(abuf, &abuf); 954 return (err); 955 } 956 if (send_do_embed(bp, dscp->dsc_featureflags)) { 957 err = dump_write_embedded(dscp, range->object, 958 range->start_blkid * srdp->datablksz, 959 srdp->datablksz, bp); 960 return (err); 961 } 962 ASSERT(range->object > dscp->dsc_resume_object || 963 (range->object == dscp->dsc_resume_object && 964 range->start_blkid * srdp->datablksz >= 965 dscp->dsc_resume_offset)); 966 /* it's a level-0 block of a regular object */ 967 968 mutex_enter(&srdp->lock); 969 while (srdp->io_outstanding) 970 cv_wait(&srdp->cv, &srdp->lock); 971 err = srdp->io_err; 972 mutex_exit(&srdp->lock); 973 974 if (err != 0) { 975 if (zfs_send_corrupt_data && 976 !dscp->dsc_dso->dso_dryrun) { 977 /* 978 * Send a block filled with 0x"zfs badd bloc" 979 */ 980 srdp->abuf = arc_alloc_buf(spa, &srdp->abuf, 981 ARC_BUFC_DATA, srdp->datablksz); 982 uint64_t *ptr; 983 for (ptr = srdp->abuf->b_data; 984 (char *)ptr < (char *)srdp->abuf->b_data + 985 srdp->datablksz; ptr++) 986 *ptr = 0x2f5baddb10cULL; 987 } else { 988 return (SET_ERROR(EIO)); 989 } 990 } 991 992 ASSERT(dscp->dsc_dso->dso_dryrun || 993 srdp->abuf != NULL || srdp->abd != NULL); 994 995 uint64_t offset = range->start_blkid * srdp->datablksz; 996 997 char *data = NULL; 998 if (srdp->abd != NULL) { 999 data = abd_to_buf(srdp->abd); 1000 ASSERT3P(srdp->abuf, ==, NULL); 1001 } else if (srdp->abuf != NULL) { 1002 data = srdp->abuf->b_data; 1003 } 1004 1005 /* 1006 * If we have large blocks stored on disk but the send flags 1007 * don't allow us to send large blocks, we split the data from 1008 * the arc buf into chunks. 1009 */ 1010 if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE && 1011 !(dscp->dsc_featureflags & 1012 DMU_BACKUP_FEATURE_LARGE_BLOCKS)) { 1013 while (srdp->datablksz > 0 && err == 0) { 1014 int n = MIN(srdp->datablksz, 1015 SPA_OLD_MAXBLOCKSIZE); 1016 err = dmu_dump_write(dscp, srdp->obj_type, 1017 range->object, offset, n, n, NULL, data); 1018 offset += n; 1019 /* 1020 * When doing dry run, data==NULL is used as a 1021 * sentinel value by 1022 * dmu_dump_write()->dump_record(). 1023 */ 1024 if (data != NULL) 1025 data += n; 1026 srdp->datablksz -= n; 1027 } 1028 } else { 1029 err = dmu_dump_write(dscp, srdp->obj_type, 1030 range->object, offset, 1031 srdp->datablksz, srdp->datasz, bp, data); 1032 } 1033 return (err); 1034 } 1035 case HOLE: { 1036 struct srh *srhp = &range->sru.hole; 1037 if (range->object == DMU_META_DNODE_OBJECT) { 1038 uint32_t span = srhp->datablksz >> DNODE_SHIFT; 1039 uint64_t first_obj = range->start_blkid * span; 1040 uint64_t numobj = range->end_blkid * span - first_obj; 1041 return (dump_freeobjects(dscp, first_obj, numobj)); 1042 } 1043 uint64_t offset = 0; 1044 1045 /* 1046 * If this multiply overflows, we don't need to send this block. 1047 * Even if it has a birth time, it can never not be a hole, so 1048 * we don't need to send records for it. 1049 */ 1050 if (!overflow_multiply(range->start_blkid, srhp->datablksz, 1051 &offset)) { 1052 return (0); 1053 } 1054 uint64_t len = 0; 1055 1056 if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len)) 1057 len = UINT64_MAX; 1058 len = len - offset; 1059 return (dump_free(dscp, range->object, offset, len)); 1060 } 1061 default: 1062 panic("Invalid range type in do_dump: %d", range->type); 1063 } 1064 return (err); 1065 } 1066 1067 static struct send_range * 1068 range_alloc(enum type type, uint64_t object, uint64_t start_blkid, 1069 uint64_t end_blkid, boolean_t eos) 1070 { 1071 struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP); 1072 range->type = type; 1073 range->object = object; 1074 range->start_blkid = start_blkid; 1075 range->end_blkid = end_blkid; 1076 range->eos_marker = eos; 1077 if (type == DATA) { 1078 range->sru.data.abd = NULL; 1079 range->sru.data.abuf = NULL; 1080 mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL); 1081 cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL); 1082 range->sru.data.io_outstanding = 0; 1083 range->sru.data.io_err = 0; 1084 } 1085 return (range); 1086 } 1087 1088 /* 1089 * This is the callback function to traverse_dataset that acts as a worker 1090 * thread for dmu_send_impl. 1091 */ 1092 /*ARGSUSED*/ 1093 static int 1094 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1095 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) 1096 { 1097 struct send_thread_arg *sta = arg; 1098 struct send_range *record; 1099 1100 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 1101 zb->zb_object >= sta->resume.zb_object); 1102 1103 /* 1104 * All bps of an encrypted os should have the encryption bit set. 1105 * If this is not true it indicates tampering and we report an error. 1106 */ 1107 if (sta->os->os_encrypted && 1108 !BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) { 1109 spa_log_error(spa, zb); 1110 zfs_panic_recover("unencrypted block in encrypted " 1111 "object set %llu", dmu_objset_id(sta->os)); 1112 return (SET_ERROR(EIO)); 1113 } 1114 1115 if (sta->cancel) 1116 return (SET_ERROR(EINTR)); 1117 if (zb->zb_object != DMU_META_DNODE_OBJECT && 1118 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) 1119 return (0); 1120 atomic_inc_64(sta->num_blocks_visited); 1121 1122 if (zb->zb_level == ZB_DNODE_LEVEL) { 1123 if (zb->zb_object == DMU_META_DNODE_OBJECT) 1124 return (0); 1125 record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE); 1126 record->sru.object.bp = *bp; 1127 size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1); 1128 record->sru.object.dnp = kmem_alloc(size, KM_SLEEP); 1129 bcopy(dnp, record->sru.object.dnp, size); 1130 bqueue_enqueue(&sta->q, record, sizeof (*record)); 1131 return (0); 1132 } 1133 if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT && 1134 !BP_IS_HOLE(bp)) { 1135 record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid, 1136 zb->zb_blkid + 1, B_FALSE); 1137 record->sru.object_range.bp = *bp; 1138 bqueue_enqueue(&sta->q, record, sizeof (*record)); 1139 return (0); 1140 } 1141 if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp))) 1142 return (0); 1143 if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp)) 1144 return (0); 1145 1146 uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level); 1147 uint64_t start; 1148 1149 /* 1150 * If this multiply overflows, we don't need to send this block. 1151 * Even if it has a birth time, it can never not be a hole, so 1152 * we don't need to send records for it. 1153 */ 1154 if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid == 1155 DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) && 1156 span * zb->zb_blkid > dnp->dn_maxblkid)) { 1157 ASSERT(BP_IS_HOLE(bp)); 1158 return (0); 1159 } 1160 1161 if (zb->zb_blkid == DMU_SPILL_BLKID) 1162 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA); 1163 1164 enum type record_type = DATA; 1165 if (BP_IS_HOLE(bp)) 1166 record_type = HOLE; 1167 else if (BP_IS_REDACTED(bp)) 1168 record_type = REDACT; 1169 else 1170 record_type = DATA; 1171 1172 record = range_alloc(record_type, zb->zb_object, start, 1173 (start + span < start ? 0 : start + span), B_FALSE); 1174 1175 uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ? 1176 BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 1177 1178 if (BP_IS_HOLE(bp)) { 1179 record->sru.hole.datablksz = datablksz; 1180 } else if (BP_IS_REDACTED(bp)) { 1181 record->sru.redact.datablksz = datablksz; 1182 } else { 1183 record->sru.data.datablksz = datablksz; 1184 record->sru.data.obj_type = dnp->dn_type; 1185 record->sru.data.bp = *bp; 1186 } 1187 1188 bqueue_enqueue(&sta->q, record, sizeof (*record)); 1189 return (0); 1190 } 1191 1192 struct redact_list_cb_arg { 1193 uint64_t *num_blocks_visited; 1194 bqueue_t *q; 1195 boolean_t *cancel; 1196 boolean_t mark_redact; 1197 }; 1198 1199 static int 1200 redact_list_cb(redact_block_phys_t *rb, void *arg) 1201 { 1202 struct redact_list_cb_arg *rlcap = arg; 1203 1204 atomic_inc_64(rlcap->num_blocks_visited); 1205 if (*rlcap->cancel) 1206 return (-1); 1207 1208 struct send_range *data = range_alloc(REDACT, rb->rbp_object, 1209 rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE); 1210 ASSERT3U(data->end_blkid, >, rb->rbp_blkid); 1211 if (rlcap->mark_redact) { 1212 data->type = REDACT; 1213 data->sru.redact.datablksz = redact_block_get_size(rb); 1214 } else { 1215 data->type = PREVIOUSLY_REDACTED; 1216 } 1217 bqueue_enqueue(rlcap->q, data, sizeof (*data)); 1218 1219 return (0); 1220 } 1221 1222 /* 1223 * This function kicks off the traverse_dataset. It also handles setting the 1224 * error code of the thread in case something goes wrong, and pushes the End of 1225 * Stream record when the traverse_dataset call has finished. 1226 */ 1227 static void 1228 send_traverse_thread(void *arg) 1229 { 1230 struct send_thread_arg *st_arg = arg; 1231 int err = 0; 1232 struct send_range *data; 1233 fstrans_cookie_t cookie = spl_fstrans_mark(); 1234 1235 err = traverse_dataset_resume(st_arg->os->os_dsl_dataset, 1236 st_arg->fromtxg, &st_arg->resume, 1237 st_arg->flags, send_cb, st_arg); 1238 1239 if (err != EINTR) 1240 st_arg->error_code = err; 1241 data = range_alloc(DATA, 0, 0, 0, B_TRUE); 1242 bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data)); 1243 spl_fstrans_unmark(cookie); 1244 thread_exit(); 1245 } 1246 1247 /* 1248 * Utility function that causes End of Stream records to compare after of all 1249 * others, so that other threads' comparison logic can stay simple. 1250 */ 1251 static int __attribute__((unused)) 1252 send_range_after(const struct send_range *from, const struct send_range *to) 1253 { 1254 if (from->eos_marker == B_TRUE) 1255 return (1); 1256 if (to->eos_marker == B_TRUE) 1257 return (-1); 1258 1259 uint64_t from_obj = from->object; 1260 uint64_t from_end_obj = from->object + 1; 1261 uint64_t to_obj = to->object; 1262 uint64_t to_end_obj = to->object + 1; 1263 if (from_obj == 0) { 1264 ASSERT(from->type == HOLE || from->type == OBJECT_RANGE); 1265 from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT; 1266 from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT; 1267 } 1268 if (to_obj == 0) { 1269 ASSERT(to->type == HOLE || to->type == OBJECT_RANGE); 1270 to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT; 1271 to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT; 1272 } 1273 1274 if (from_end_obj <= to_obj) 1275 return (-1); 1276 if (from_obj >= to_end_obj) 1277 return (1); 1278 int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type == 1279 OBJECT_RANGE); 1280 if (unlikely(cmp)) 1281 return (cmp); 1282 cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT); 1283 if (unlikely(cmp)) 1284 return (cmp); 1285 if (from->end_blkid <= to->start_blkid) 1286 return (-1); 1287 if (from->start_blkid >= to->end_blkid) 1288 return (1); 1289 return (0); 1290 } 1291 1292 /* 1293 * Pop the new data off the queue, check that the records we receive are in 1294 * the right order, but do not free the old data. This is used so that the 1295 * records can be sent on to the main thread without copying the data. 1296 */ 1297 static struct send_range * 1298 get_next_range_nofree(bqueue_t *bq, struct send_range *prev) 1299 { 1300 struct send_range *next = bqueue_dequeue(bq); 1301 ASSERT3S(send_range_after(prev, next), ==, -1); 1302 return (next); 1303 } 1304 1305 /* 1306 * Pop the new data off the queue, check that the records we receive are in 1307 * the right order, and free the old data. 1308 */ 1309 static struct send_range * 1310 get_next_range(bqueue_t *bq, struct send_range *prev) 1311 { 1312 struct send_range *next = get_next_range_nofree(bq, prev); 1313 range_free(prev); 1314 return (next); 1315 } 1316 1317 static void 1318 redact_list_thread(void *arg) 1319 { 1320 struct redact_list_thread_arg *rlt_arg = arg; 1321 struct send_range *record; 1322 fstrans_cookie_t cookie = spl_fstrans_mark(); 1323 if (rlt_arg->rl != NULL) { 1324 struct redact_list_cb_arg rlcba = {0}; 1325 rlcba.cancel = &rlt_arg->cancel; 1326 rlcba.q = &rlt_arg->q; 1327 rlcba.num_blocks_visited = rlt_arg->num_blocks_visited; 1328 rlcba.mark_redact = rlt_arg->mark_redact; 1329 int err = dsl_redaction_list_traverse(rlt_arg->rl, 1330 &rlt_arg->resume, redact_list_cb, &rlcba); 1331 if (err != EINTR) 1332 rlt_arg->error_code = err; 1333 } 1334 record = range_alloc(DATA, 0, 0, 0, B_TRUE); 1335 bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record)); 1336 spl_fstrans_unmark(cookie); 1337 1338 thread_exit(); 1339 } 1340 1341 /* 1342 * Compare the start point of the two provided ranges. End of stream ranges 1343 * compare last, objects compare before any data or hole inside that object and 1344 * multi-object holes that start at the same object. 1345 */ 1346 static int 1347 send_range_start_compare(struct send_range *r1, struct send_range *r2) 1348 { 1349 uint64_t r1_objequiv = r1->object; 1350 uint64_t r1_l0equiv = r1->start_blkid; 1351 uint64_t r2_objequiv = r2->object; 1352 uint64_t r2_l0equiv = r2->start_blkid; 1353 int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker); 1354 if (unlikely(cmp)) 1355 return (cmp); 1356 if (r1->object == 0) { 1357 r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK; 1358 r1_l0equiv = 0; 1359 } 1360 if (r2->object == 0) { 1361 r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK; 1362 r2_l0equiv = 0; 1363 } 1364 1365 cmp = TREE_CMP(r1_objequiv, r2_objequiv); 1366 if (likely(cmp)) 1367 return (cmp); 1368 cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE); 1369 if (unlikely(cmp)) 1370 return (cmp); 1371 cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT); 1372 if (unlikely(cmp)) 1373 return (cmp); 1374 1375 return (TREE_CMP(r1_l0equiv, r2_l0equiv)); 1376 } 1377 1378 enum q_idx { 1379 REDACT_IDX = 0, 1380 TO_IDX, 1381 FROM_IDX, 1382 NUM_THREADS 1383 }; 1384 1385 /* 1386 * This function returns the next range the send_merge_thread should operate on. 1387 * The inputs are two arrays; the first one stores the range at the front of the 1388 * queues stored in the second one. The ranges are sorted in descending 1389 * priority order; the metadata from earlier ranges overrules metadata from 1390 * later ranges. out_mask is used to return which threads the ranges came from; 1391 * bit i is set if ranges[i] started at the same place as the returned range. 1392 * 1393 * This code is not hardcoded to compare a specific number of threads; it could 1394 * be used with any number, just by changing the q_idx enum. 1395 * 1396 * The "next range" is the one with the earliest start; if two starts are equal, 1397 * the highest-priority range is the next to operate on. If a higher-priority 1398 * range starts in the middle of the first range, then the first range will be 1399 * truncated to end where the higher-priority range starts, and we will operate 1400 * on that one next time. In this way, we make sure that each block covered by 1401 * some range gets covered by a returned range, and each block covered is 1402 * returned using the metadata of the highest-priority range it appears in. 1403 * 1404 * For example, if the three ranges at the front of the queues were [2,4), 1405 * [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata 1406 * from the third range, [2,4) with the metadata from the first range, and then 1407 * [4,5) with the metadata from the second. 1408 */ 1409 static struct send_range * 1410 find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask) 1411 { 1412 int idx = 0; // index of the range with the earliest start 1413 int i; 1414 uint64_t bmask = 0; 1415 for (i = 1; i < NUM_THREADS; i++) { 1416 if (send_range_start_compare(ranges[i], ranges[idx]) < 0) 1417 idx = i; 1418 } 1419 if (ranges[idx]->eos_marker) { 1420 struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE); 1421 *out_mask = 0; 1422 return (ret); 1423 } 1424 /* 1425 * Find all the ranges that start at that same point. 1426 */ 1427 for (i = 0; i < NUM_THREADS; i++) { 1428 if (send_range_start_compare(ranges[i], ranges[idx]) == 0) 1429 bmask |= 1 << i; 1430 } 1431 *out_mask = bmask; 1432 /* 1433 * OBJECT_RANGE records only come from the TO thread, and should always 1434 * be treated as overlapping with nothing and sent on immediately. They 1435 * are only used in raw sends, and are never redacted. 1436 */ 1437 if (ranges[idx]->type == OBJECT_RANGE) { 1438 ASSERT3U(idx, ==, TO_IDX); 1439 ASSERT3U(*out_mask, ==, 1 << TO_IDX); 1440 struct send_range *ret = ranges[idx]; 1441 ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]); 1442 return (ret); 1443 } 1444 /* 1445 * Find the first start or end point after the start of the first range. 1446 */ 1447 uint64_t first_change = ranges[idx]->end_blkid; 1448 for (i = 0; i < NUM_THREADS; i++) { 1449 if (i == idx || ranges[i]->eos_marker || 1450 ranges[i]->object > ranges[idx]->object || 1451 ranges[i]->object == DMU_META_DNODE_OBJECT) 1452 continue; 1453 ASSERT3U(ranges[i]->object, ==, ranges[idx]->object); 1454 if (first_change > ranges[i]->start_blkid && 1455 (bmask & (1 << i)) == 0) 1456 first_change = ranges[i]->start_blkid; 1457 else if (first_change > ranges[i]->end_blkid) 1458 first_change = ranges[i]->end_blkid; 1459 } 1460 /* 1461 * Update all ranges to no longer overlap with the range we're 1462 * returning. All such ranges must start at the same place as the range 1463 * being returned, and end at or after first_change. Thus we update 1464 * their start to first_change. If that makes them size 0, then free 1465 * them and pull a new range from that thread. 1466 */ 1467 for (i = 0; i < NUM_THREADS; i++) { 1468 if (i == idx || (bmask & (1 << i)) == 0) 1469 continue; 1470 ASSERT3U(first_change, >, ranges[i]->start_blkid); 1471 ranges[i]->start_blkid = first_change; 1472 ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid); 1473 if (ranges[i]->start_blkid == ranges[i]->end_blkid) 1474 ranges[i] = get_next_range(qs[i], ranges[i]); 1475 } 1476 /* 1477 * Short-circuit the simple case; if the range doesn't overlap with 1478 * anything else, or it only overlaps with things that start at the same 1479 * place and are longer, send it on. 1480 */ 1481 if (first_change == ranges[idx]->end_blkid) { 1482 struct send_range *ret = ranges[idx]; 1483 ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]); 1484 return (ret); 1485 } 1486 1487 /* 1488 * Otherwise, return a truncated copy of ranges[idx] and move the start 1489 * of ranges[idx] back to first_change. 1490 */ 1491 struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP); 1492 *ret = *ranges[idx]; 1493 ret->end_blkid = first_change; 1494 ranges[idx]->start_blkid = first_change; 1495 return (ret); 1496 } 1497 1498 #define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX)) 1499 1500 /* 1501 * Merge the results from the from thread and the to thread, and then hand the 1502 * records off to send_prefetch_thread to prefetch them. If this is not a 1503 * send from a redaction bookmark, the from thread will push an end of stream 1504 * record and stop, and we'll just send everything that was changed in the 1505 * to_ds since the ancestor's creation txg. If it is, then since 1506 * traverse_dataset has a canonical order, we can compare each change as 1507 * they're pulled off the queues. That will give us a stream that is 1508 * appropriately sorted, and covers all records. In addition, we pull the 1509 * data from the redact_list_thread and use that to determine which blocks 1510 * should be redacted. 1511 */ 1512 static void 1513 send_merge_thread(void *arg) 1514 { 1515 struct send_merge_thread_arg *smt_arg = arg; 1516 struct send_range *front_ranges[NUM_THREADS]; 1517 bqueue_t *queues[NUM_THREADS]; 1518 int err = 0; 1519 fstrans_cookie_t cookie = spl_fstrans_mark(); 1520 1521 if (smt_arg->redact_arg == NULL) { 1522 front_ranges[REDACT_IDX] = 1523 kmem_zalloc(sizeof (struct send_range), KM_SLEEP); 1524 front_ranges[REDACT_IDX]->eos_marker = B_TRUE; 1525 front_ranges[REDACT_IDX]->type = REDACT; 1526 queues[REDACT_IDX] = NULL; 1527 } else { 1528 front_ranges[REDACT_IDX] = 1529 bqueue_dequeue(&smt_arg->redact_arg->q); 1530 queues[REDACT_IDX] = &smt_arg->redact_arg->q; 1531 } 1532 front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q); 1533 queues[TO_IDX] = &smt_arg->to_arg->q; 1534 front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q); 1535 queues[FROM_IDX] = &smt_arg->from_arg->q; 1536 uint64_t mask = 0; 1537 struct send_range *range; 1538 for (range = find_next_range(front_ranges, queues, &mask); 1539 !range->eos_marker && err == 0 && !smt_arg->cancel; 1540 range = find_next_range(front_ranges, queues, &mask)) { 1541 /* 1542 * If the range in question was in both the from redact bookmark 1543 * and the bookmark we're using to redact, then don't send it. 1544 * It's already redacted on the receiving system, so a redaction 1545 * record would be redundant. 1546 */ 1547 if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) { 1548 ASSERT3U(range->type, ==, REDACT); 1549 range_free(range); 1550 continue; 1551 } 1552 bqueue_enqueue(&smt_arg->q, range, sizeof (*range)); 1553 1554 if (smt_arg->to_arg->error_code != 0) { 1555 err = smt_arg->to_arg->error_code; 1556 } else if (smt_arg->from_arg->error_code != 0) { 1557 err = smt_arg->from_arg->error_code; 1558 } else if (smt_arg->redact_arg != NULL && 1559 smt_arg->redact_arg->error_code != 0) { 1560 err = smt_arg->redact_arg->error_code; 1561 } 1562 } 1563 if (smt_arg->cancel && err == 0) 1564 err = SET_ERROR(EINTR); 1565 smt_arg->error = err; 1566 if (smt_arg->error != 0) { 1567 smt_arg->to_arg->cancel = B_TRUE; 1568 smt_arg->from_arg->cancel = B_TRUE; 1569 if (smt_arg->redact_arg != NULL) 1570 smt_arg->redact_arg->cancel = B_TRUE; 1571 } 1572 for (int i = 0; i < NUM_THREADS; i++) { 1573 while (!front_ranges[i]->eos_marker) { 1574 front_ranges[i] = get_next_range(queues[i], 1575 front_ranges[i]); 1576 } 1577 range_free(front_ranges[i]); 1578 } 1579 if (range == NULL) 1580 range = kmem_zalloc(sizeof (*range), KM_SLEEP); 1581 range->eos_marker = B_TRUE; 1582 bqueue_enqueue_flush(&smt_arg->q, range, 1); 1583 spl_fstrans_unmark(cookie); 1584 thread_exit(); 1585 } 1586 1587 struct send_reader_thread_arg { 1588 struct send_merge_thread_arg *smta; 1589 bqueue_t q; 1590 boolean_t cancel; 1591 boolean_t issue_reads; 1592 uint64_t featureflags; 1593 int error; 1594 }; 1595 1596 static void 1597 dmu_send_read_done(zio_t *zio) 1598 { 1599 struct send_range *range = zio->io_private; 1600 1601 mutex_enter(&range->sru.data.lock); 1602 if (zio->io_error != 0) { 1603 abd_free(range->sru.data.abd); 1604 range->sru.data.abd = NULL; 1605 range->sru.data.io_err = zio->io_error; 1606 } 1607 1608 ASSERT(range->sru.data.io_outstanding); 1609 range->sru.data.io_outstanding = B_FALSE; 1610 cv_broadcast(&range->sru.data.cv); 1611 mutex_exit(&range->sru.data.lock); 1612 } 1613 1614 static void 1615 issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range) 1616 { 1617 struct srd *srdp = &range->sru.data; 1618 blkptr_t *bp = &srdp->bp; 1619 objset_t *os = srta->smta->os; 1620 1621 ASSERT3U(range->type, ==, DATA); 1622 ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); 1623 /* 1624 * If we have large blocks stored on disk but 1625 * the send flags don't allow us to send large 1626 * blocks, we split the data from the arc buf 1627 * into chunks. 1628 */ 1629 boolean_t split_large_blocks = 1630 srdp->datablksz > SPA_OLD_MAXBLOCKSIZE && 1631 !(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS); 1632 /* 1633 * We should only request compressed data from the ARC if all 1634 * the following are true: 1635 * - stream compression was requested 1636 * - we aren't splitting large blocks into smaller chunks 1637 * - the data won't need to be byteswapped before sending 1638 * - this isn't an embedded block 1639 * - this isn't metadata (if receiving on a different endian 1640 * system it can be byteswapped more easily) 1641 */ 1642 boolean_t request_compressed = 1643 (srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) && 1644 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) && 1645 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp)); 1646 1647 enum zio_flag zioflags = ZIO_FLAG_CANFAIL; 1648 1649 if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) 1650 zioflags |= ZIO_FLAG_RAW; 1651 else if (request_compressed) 1652 zioflags |= ZIO_FLAG_RAW_COMPRESS; 1653 1654 srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ? 1655 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp); 1656 1657 if (!srta->issue_reads) 1658 return; 1659 if (BP_IS_REDACTED(bp)) 1660 return; 1661 if (send_do_embed(bp, srta->featureflags)) 1662 return; 1663 1664 zbookmark_phys_t zb = { 1665 .zb_objset = dmu_objset_id(os), 1666 .zb_object = range->object, 1667 .zb_level = 0, 1668 .zb_blkid = range->start_blkid, 1669 }; 1670 1671 arc_flags_t aflags = ARC_FLAG_CACHED_ONLY; 1672 1673 int arc_err = arc_read(NULL, os->os_spa, bp, 1674 arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ, 1675 zioflags, &aflags, &zb); 1676 /* 1677 * If the data is not already cached in the ARC, we read directly 1678 * from zio. This avoids the performance overhead of adding a new 1679 * entry to the ARC, and we also avoid polluting the ARC cache with 1680 * data that is not likely to be used in the future. 1681 */ 1682 if (arc_err != 0) { 1683 srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE); 1684 srdp->io_outstanding = B_TRUE; 1685 zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd, 1686 srdp->datasz, dmu_send_read_done, range, 1687 ZIO_PRIORITY_ASYNC_READ, zioflags, &zb)); 1688 } 1689 } 1690 1691 /* 1692 * Create a new record with the given values. 1693 */ 1694 static void 1695 enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn, 1696 uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz) 1697 { 1698 enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE : 1699 (BP_IS_REDACTED(bp) ? REDACT : DATA)); 1700 1701 struct send_range *range = range_alloc(range_type, dn->dn_object, 1702 blkid, blkid + count, B_FALSE); 1703 1704 if (blkid == DMU_SPILL_BLKID) 1705 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA); 1706 1707 switch (range_type) { 1708 case HOLE: 1709 range->sru.hole.datablksz = datablksz; 1710 break; 1711 case DATA: 1712 ASSERT3U(count, ==, 1); 1713 range->sru.data.datablksz = datablksz; 1714 range->sru.data.obj_type = dn->dn_type; 1715 range->sru.data.bp = *bp; 1716 issue_data_read(srta, range); 1717 break; 1718 case REDACT: 1719 range->sru.redact.datablksz = datablksz; 1720 break; 1721 default: 1722 break; 1723 } 1724 bqueue_enqueue(q, range, datablksz); 1725 } 1726 1727 /* 1728 * This thread is responsible for two things: First, it retrieves the correct 1729 * blkptr in the to ds if we need to send the data because of something from 1730 * the from thread. As a result of this, we're the first ones to discover that 1731 * some indirect blocks can be discarded because they're not holes. Second, 1732 * it issues prefetches for the data we need to send. 1733 */ 1734 static void 1735 send_reader_thread(void *arg) 1736 { 1737 struct send_reader_thread_arg *srta = arg; 1738 struct send_merge_thread_arg *smta = srta->smta; 1739 bqueue_t *inq = &smta->q; 1740 bqueue_t *outq = &srta->q; 1741 objset_t *os = smta->os; 1742 fstrans_cookie_t cookie = spl_fstrans_mark(); 1743 struct send_range *range = bqueue_dequeue(inq); 1744 int err = 0; 1745 1746 /* 1747 * If the record we're analyzing is from a redaction bookmark from the 1748 * fromds, then we need to know whether or not it exists in the tods so 1749 * we know whether to create records for it or not. If it does, we need 1750 * the datablksz so we can generate an appropriate record for it. 1751 * Finally, if it isn't redacted, we need the blkptr so that we can send 1752 * a WRITE record containing the actual data. 1753 */ 1754 uint64_t last_obj = UINT64_MAX; 1755 uint64_t last_obj_exists = B_TRUE; 1756 while (!range->eos_marker && !srta->cancel && smta->error == 0 && 1757 err == 0) { 1758 switch (range->type) { 1759 case DATA: 1760 issue_data_read(srta, range); 1761 bqueue_enqueue(outq, range, range->sru.data.datablksz); 1762 range = get_next_range_nofree(inq, range); 1763 break; 1764 case HOLE: 1765 case OBJECT: 1766 case OBJECT_RANGE: 1767 case REDACT: // Redacted blocks must exist 1768 bqueue_enqueue(outq, range, sizeof (*range)); 1769 range = get_next_range_nofree(inq, range); 1770 break; 1771 case PREVIOUSLY_REDACTED: { 1772 /* 1773 * This entry came from the "from bookmark" when 1774 * sending from a bookmark that has a redaction 1775 * list. We need to check if this object/blkid 1776 * exists in the target ("to") dataset, and if 1777 * not then we drop this entry. We also need 1778 * to fill in the block pointer so that we know 1779 * what to prefetch. 1780 * 1781 * To accomplish the above, we first cache whether or 1782 * not the last object we examined exists. If it 1783 * doesn't, we can drop this record. If it does, we hold 1784 * the dnode and use it to call dbuf_dnode_findbp. We do 1785 * this instead of dbuf_bookmark_findbp because we will 1786 * often operate on large ranges, and holding the dnode 1787 * once is more efficient. 1788 */ 1789 boolean_t object_exists = B_TRUE; 1790 /* 1791 * If the data is redacted, we only care if it exists, 1792 * so that we don't send records for objects that have 1793 * been deleted. 1794 */ 1795 dnode_t *dn; 1796 if (range->object == last_obj && !last_obj_exists) { 1797 /* 1798 * If we're still examining the same object as 1799 * previously, and it doesn't exist, we don't 1800 * need to call dbuf_bookmark_findbp. 1801 */ 1802 object_exists = B_FALSE; 1803 } else { 1804 err = dnode_hold(os, range->object, FTAG, &dn); 1805 if (err == ENOENT) { 1806 object_exists = B_FALSE; 1807 err = 0; 1808 } 1809 last_obj = range->object; 1810 last_obj_exists = object_exists; 1811 } 1812 1813 if (err != 0) { 1814 break; 1815 } else if (!object_exists) { 1816 /* 1817 * The block was modified, but doesn't 1818 * exist in the to dataset; if it was 1819 * deleted in the to dataset, then we'll 1820 * visit the hole bp for it at some point. 1821 */ 1822 range = get_next_range(inq, range); 1823 continue; 1824 } 1825 uint64_t file_max = 1826 (dn->dn_maxblkid < range->end_blkid ? 1827 dn->dn_maxblkid : range->end_blkid); 1828 /* 1829 * The object exists, so we need to try to find the 1830 * blkptr for each block in the range we're processing. 1831 */ 1832 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1833 for (uint64_t blkid = range->start_blkid; 1834 blkid < file_max; blkid++) { 1835 blkptr_t bp; 1836 uint32_t datablksz = 1837 dn->dn_phys->dn_datablkszsec << 1838 SPA_MINBLOCKSHIFT; 1839 uint64_t offset = blkid * datablksz; 1840 /* 1841 * This call finds the next non-hole block in 1842 * the object. This is to prevent a 1843 * performance problem where we're unredacting 1844 * a large hole. Using dnode_next_offset to 1845 * skip over the large hole avoids iterating 1846 * over every block in it. 1847 */ 1848 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK, 1849 &offset, 1, 1, 0); 1850 if (err == ESRCH) { 1851 offset = UINT64_MAX; 1852 err = 0; 1853 } else if (err != 0) { 1854 break; 1855 } 1856 if (offset != blkid * datablksz) { 1857 /* 1858 * if there is a hole from here 1859 * (blkid) to offset 1860 */ 1861 offset = MIN(offset, file_max * 1862 datablksz); 1863 uint64_t nblks = (offset / datablksz) - 1864 blkid; 1865 enqueue_range(srta, outq, dn, blkid, 1866 nblks, NULL, datablksz); 1867 blkid += nblks; 1868 } 1869 if (blkid >= file_max) 1870 break; 1871 err = dbuf_dnode_findbp(dn, 0, blkid, &bp, 1872 NULL, NULL); 1873 if (err != 0) 1874 break; 1875 ASSERT(!BP_IS_HOLE(&bp)); 1876 enqueue_range(srta, outq, dn, blkid, 1, &bp, 1877 datablksz); 1878 } 1879 rw_exit(&dn->dn_struct_rwlock); 1880 dnode_rele(dn, FTAG); 1881 range = get_next_range(inq, range); 1882 } 1883 } 1884 } 1885 if (srta->cancel || err != 0) { 1886 smta->cancel = B_TRUE; 1887 srta->error = err; 1888 } else if (smta->error != 0) { 1889 srta->error = smta->error; 1890 } 1891 while (!range->eos_marker) 1892 range = get_next_range(inq, range); 1893 1894 bqueue_enqueue_flush(outq, range, 1); 1895 spl_fstrans_unmark(cookie); 1896 thread_exit(); 1897 } 1898 1899 #define NUM_SNAPS_NOT_REDACTED UINT64_MAX 1900 1901 struct dmu_send_params { 1902 /* Pool args */ 1903 void *tag; // Tag that dp was held with, will be used to release dp. 1904 dsl_pool_t *dp; 1905 /* To snapshot args */ 1906 const char *tosnap; 1907 dsl_dataset_t *to_ds; 1908 /* From snapshot args */ 1909 zfs_bookmark_phys_t ancestor_zb; 1910 uint64_t *fromredactsnaps; 1911 /* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */ 1912 uint64_t numfromredactsnaps; 1913 /* Stream params */ 1914 boolean_t is_clone; 1915 boolean_t embedok; 1916 boolean_t large_block_ok; 1917 boolean_t compressok; 1918 boolean_t rawok; 1919 boolean_t savedok; 1920 uint64_t resumeobj; 1921 uint64_t resumeoff; 1922 uint64_t saved_guid; 1923 zfs_bookmark_phys_t *redactbook; 1924 /* Stream output params */ 1925 dmu_send_outparams_t *dso; 1926 1927 /* Stream progress params */ 1928 offset_t *off; 1929 int outfd; 1930 char saved_toname[MAXNAMELEN]; 1931 }; 1932 1933 static int 1934 setup_featureflags(struct dmu_send_params *dspp, objset_t *os, 1935 uint64_t *featureflags) 1936 { 1937 dsl_dataset_t *to_ds = dspp->to_ds; 1938 dsl_pool_t *dp = dspp->dp; 1939 #ifdef _KERNEL 1940 if (dmu_objset_type(os) == DMU_OST_ZFS) { 1941 uint64_t version; 1942 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) 1943 return (SET_ERROR(EINVAL)); 1944 1945 if (version >= ZPL_VERSION_SA) 1946 *featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; 1947 } 1948 #endif 1949 1950 /* raw sends imply large_block_ok */ 1951 if ((dspp->rawok || dspp->large_block_ok) && 1952 dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) { 1953 *featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; 1954 } 1955 1956 /* encrypted datasets will not have embedded blocks */ 1957 if ((dspp->embedok || dspp->rawok) && !os->os_encrypted && 1958 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { 1959 *featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; 1960 } 1961 1962 /* raw send implies compressok */ 1963 if (dspp->compressok || dspp->rawok) 1964 *featureflags |= DMU_BACKUP_FEATURE_COMPRESSED; 1965 1966 if (dspp->rawok && os->os_encrypted) 1967 *featureflags |= DMU_BACKUP_FEATURE_RAW; 1968 1969 if ((*featureflags & 1970 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED | 1971 DMU_BACKUP_FEATURE_RAW)) != 0 && 1972 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) { 1973 *featureflags |= DMU_BACKUP_FEATURE_LZ4; 1974 } 1975 1976 /* 1977 * We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to 1978 * allow sending ZSTD compressed datasets to a receiver that does not 1979 * support ZSTD 1980 */ 1981 if ((*featureflags & 1982 (DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 && 1983 dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) { 1984 *featureflags |= DMU_BACKUP_FEATURE_ZSTD; 1985 } 1986 1987 if (dspp->resumeobj != 0 || dspp->resumeoff != 0) { 1988 *featureflags |= DMU_BACKUP_FEATURE_RESUMING; 1989 } 1990 1991 if (dspp->redactbook != NULL) { 1992 *featureflags |= DMU_BACKUP_FEATURE_REDACTED; 1993 } 1994 1995 if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) { 1996 *featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE; 1997 } 1998 return (0); 1999 } 2000 2001 static dmu_replay_record_t * 2002 create_begin_record(struct dmu_send_params *dspp, objset_t *os, 2003 uint64_t featureflags) 2004 { 2005 dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t), 2006 KM_SLEEP); 2007 drr->drr_type = DRR_BEGIN; 2008 2009 struct drr_begin *drrb = &drr->drr_u.drr_begin; 2010 dsl_dataset_t *to_ds = dspp->to_ds; 2011 2012 drrb->drr_magic = DMU_BACKUP_MAGIC; 2013 drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time; 2014 drrb->drr_type = dmu_objset_type(os); 2015 drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; 2016 drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid; 2017 2018 DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM); 2019 DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags); 2020 2021 if (dspp->is_clone) 2022 drrb->drr_flags |= DRR_FLAG_CLONE; 2023 if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET) 2024 drrb->drr_flags |= DRR_FLAG_CI_DATA; 2025 if (zfs_send_set_freerecords_bit) 2026 drrb->drr_flags |= DRR_FLAG_FREERECORDS; 2027 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK; 2028 2029 if (dspp->savedok) { 2030 drrb->drr_toguid = dspp->saved_guid; 2031 strlcpy(drrb->drr_toname, dspp->saved_toname, 2032 sizeof (drrb->drr_toname)); 2033 } else { 2034 dsl_dataset_name(to_ds, drrb->drr_toname); 2035 if (!to_ds->ds_is_snapshot) { 2036 (void) strlcat(drrb->drr_toname, "@--head--", 2037 sizeof (drrb->drr_toname)); 2038 } 2039 } 2040 return (drr); 2041 } 2042 2043 static void 2044 setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os, 2045 dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok) 2046 { 2047 VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff, 2048 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2049 offsetof(struct send_range, ln))); 2050 to_arg->error_code = 0; 2051 to_arg->cancel = B_FALSE; 2052 to_arg->os = to_os; 2053 to_arg->fromtxg = fromtxg; 2054 to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA; 2055 if (rawok) 2056 to_arg->flags |= TRAVERSE_NO_DECRYPT; 2057 to_arg->num_blocks_visited = &dssp->dss_blocks; 2058 (void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0, 2059 curproc, TS_RUN, minclsyspri); 2060 } 2061 2062 static void 2063 setup_from_thread(struct redact_list_thread_arg *from_arg, 2064 redaction_list_t *from_rl, dmu_sendstatus_t *dssp) 2065 { 2066 VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff, 2067 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2068 offsetof(struct send_range, ln))); 2069 from_arg->error_code = 0; 2070 from_arg->cancel = B_FALSE; 2071 from_arg->rl = from_rl; 2072 from_arg->mark_redact = B_FALSE; 2073 from_arg->num_blocks_visited = &dssp->dss_blocks; 2074 /* 2075 * If from_ds is null, send_traverse_thread just returns success and 2076 * enqueues an eos marker. 2077 */ 2078 (void) thread_create(NULL, 0, redact_list_thread, from_arg, 0, 2079 curproc, TS_RUN, minclsyspri); 2080 } 2081 2082 static void 2083 setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg, 2084 struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp) 2085 { 2086 if (dspp->redactbook == NULL) 2087 return; 2088 2089 rlt_arg->cancel = B_FALSE; 2090 VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff, 2091 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2092 offsetof(struct send_range, ln))); 2093 rlt_arg->error_code = 0; 2094 rlt_arg->mark_redact = B_TRUE; 2095 rlt_arg->rl = rl; 2096 rlt_arg->num_blocks_visited = &dssp->dss_blocks; 2097 2098 (void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0, 2099 curproc, TS_RUN, minclsyspri); 2100 } 2101 2102 static void 2103 setup_merge_thread(struct send_merge_thread_arg *smt_arg, 2104 struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg, 2105 struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg, 2106 objset_t *os) 2107 { 2108 VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff, 2109 MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), 2110 offsetof(struct send_range, ln))); 2111 smt_arg->cancel = B_FALSE; 2112 smt_arg->error = 0; 2113 smt_arg->from_arg = from_arg; 2114 smt_arg->to_arg = to_arg; 2115 if (dspp->redactbook != NULL) 2116 smt_arg->redact_arg = rlt_arg; 2117 2118 smt_arg->os = os; 2119 (void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc, 2120 TS_RUN, minclsyspri); 2121 } 2122 2123 static void 2124 setup_reader_thread(struct send_reader_thread_arg *srt_arg, 2125 struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg, 2126 uint64_t featureflags) 2127 { 2128 VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff, 2129 MAX(zfs_send_queue_length, 2 * zfs_max_recordsize), 2130 offsetof(struct send_range, ln))); 2131 srt_arg->smta = smt_arg; 2132 srt_arg->issue_reads = !dspp->dso->dso_dryrun; 2133 srt_arg->featureflags = featureflags; 2134 (void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0, 2135 curproc, TS_RUN, minclsyspri); 2136 } 2137 2138 static int 2139 setup_resume_points(struct dmu_send_params *dspp, 2140 struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg, 2141 struct redact_list_thread_arg *rlt_arg, 2142 struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os, 2143 redaction_list_t *redact_rl, nvlist_t *nvl) 2144 { 2145 dsl_dataset_t *to_ds = dspp->to_ds; 2146 int err = 0; 2147 2148 uint64_t obj = 0; 2149 uint64_t blkid = 0; 2150 if (resuming) { 2151 obj = dspp->resumeobj; 2152 dmu_object_info_t to_doi; 2153 err = dmu_object_info(os, obj, &to_doi); 2154 if (err != 0) 2155 return (err); 2156 2157 blkid = dspp->resumeoff / to_doi.doi_data_block_size; 2158 } 2159 /* 2160 * If we're resuming a redacted send, we can skip to the appropriate 2161 * point in the redaction bookmark by binary searching through it. 2162 */ 2163 if (redact_rl != NULL) { 2164 SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid); 2165 } 2166 2167 SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid); 2168 if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) { 2169 uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj; 2170 /* 2171 * Note: If the resume point is in an object whose 2172 * blocksize is different in the from vs to snapshots, 2173 * we will have divided by the "wrong" blocksize. 2174 * However, in this case fromsnap's send_cb() will 2175 * detect that the blocksize has changed and therefore 2176 * ignore this object. 2177 * 2178 * If we're resuming a send from a redaction bookmark, 2179 * we still cannot accidentally suggest blocks behind 2180 * the to_ds. In addition, we know that any blocks in 2181 * the object in the to_ds will have to be sent, since 2182 * the size changed. Therefore, we can't cause any harm 2183 * this way either. 2184 */ 2185 SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid); 2186 } 2187 if (resuming) { 2188 fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj); 2189 fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff); 2190 } 2191 return (0); 2192 } 2193 2194 static dmu_sendstatus_t * 2195 setup_send_progress(struct dmu_send_params *dspp) 2196 { 2197 dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP); 2198 dssp->dss_outfd = dspp->outfd; 2199 dssp->dss_off = dspp->off; 2200 dssp->dss_proc = curproc; 2201 mutex_enter(&dspp->to_ds->ds_sendstream_lock); 2202 list_insert_head(&dspp->to_ds->ds_sendstreams, dssp); 2203 mutex_exit(&dspp->to_ds->ds_sendstream_lock); 2204 return (dssp); 2205 } 2206 2207 /* 2208 * Actually do the bulk of the work in a zfs send. 2209 * 2210 * The idea is that we want to do a send from ancestor_zb to to_ds. We also 2211 * want to not send any data that has been modified by all the datasets in 2212 * redactsnaparr, and store the list of blocks that are redacted in this way in 2213 * a bookmark named redactbook, created on the to_ds. We do this by creating 2214 * several worker threads, whose function is described below. 2215 * 2216 * There are three cases. 2217 * The first case is a redacted zfs send. In this case there are 5 threads. 2218 * The first thread is the to_ds traversal thread: it calls dataset_traverse on 2219 * the to_ds and finds all the blocks that have changed since ancestor_zb (if 2220 * it's a full send, that's all blocks in the dataset). It then sends those 2221 * blocks on to the send merge thread. The redact list thread takes the data 2222 * from the redaction bookmark and sends those blocks on to the send merge 2223 * thread. The send merge thread takes the data from the to_ds traversal 2224 * thread, and combines it with the redaction records from the redact list 2225 * thread. If a block appears in both the to_ds's data and the redaction data, 2226 * the send merge thread will mark it as redacted and send it on to the prefetch 2227 * thread. Otherwise, the send merge thread will send the block on to the 2228 * prefetch thread unchanged. The prefetch thread will issue prefetch reads for 2229 * any data that isn't redacted, and then send the data on to the main thread. 2230 * The main thread behaves the same as in a normal send case, issuing demand 2231 * reads for data blocks and sending out records over the network 2232 * 2233 * The graphic below diagrams the flow of data in the case of a redacted zfs 2234 * send. Each box represents a thread, and each line represents the flow of 2235 * data. 2236 * 2237 * Records from the | 2238 * redaction bookmark | 2239 * +--------------------+ | +---------------------------+ 2240 * | | v | Send Merge Thread | 2241 * | Redact List Thread +----------> Apply redaction marks to | 2242 * | | | records as specified by | 2243 * +--------------------+ | redaction ranges | 2244 * +----^---------------+------+ 2245 * | | Merged data 2246 * | | 2247 * | +------------v--------+ 2248 * | | Prefetch Thread | 2249 * +--------------------+ | | Issues prefetch | 2250 * | to_ds Traversal | | | reads of data blocks| 2251 * | Thread (finds +---------------+ +------------+--------+ 2252 * | candidate blocks) | Blocks modified | Prefetched data 2253 * +--------------------+ by to_ds since | 2254 * ancestor_zb +------------v----+ 2255 * | Main Thread | File Descriptor 2256 * | Sends data over +->(to zfs receive) 2257 * | wire | 2258 * +-----------------+ 2259 * 2260 * The second case is an incremental send from a redaction bookmark. The to_ds 2261 * traversal thread and the main thread behave the same as in the redacted 2262 * send case. The new thread is the from bookmark traversal thread. It 2263 * iterates over the redaction list in the redaction bookmark, and enqueues 2264 * records for each block that was redacted in the original send. The send 2265 * merge thread now has to merge the data from the two threads. For details 2266 * about that process, see the header comment of send_merge_thread(). Any data 2267 * it decides to send on will be prefetched by the prefetch thread. Note that 2268 * you can perform a redacted send from a redaction bookmark; in that case, 2269 * the data flow behaves very similarly to the flow in the redacted send case, 2270 * except with the addition of the bookmark traversal thread iterating over the 2271 * redaction bookmark. The send_merge_thread also has to take on the 2272 * responsibility of merging the redact list thread's records, the bookmark 2273 * traversal thread's records, and the to_ds records. 2274 * 2275 * +---------------------+ 2276 * | | 2277 * | Redact List Thread +--------------+ 2278 * | | | 2279 * +---------------------+ | 2280 * Blocks in redaction list | Ranges modified by every secure snap 2281 * of from bookmark | (or EOS if not readcted) 2282 * | 2283 * +---------------------+ | +----v----------------------+ 2284 * | bookmark Traversal | v | Send Merge Thread | 2285 * | Thread (finds +---------> Merges bookmark, rlt, and | 2286 * | candidate blocks) | | to_ds send records | 2287 * +---------------------+ +----^---------------+------+ 2288 * | | Merged data 2289 * | +------------v--------+ 2290 * | | Prefetch Thread | 2291 * +--------------------+ | | Issues prefetch | 2292 * | to_ds Traversal | | | reads of data blocks| 2293 * | Thread (finds +---------------+ +------------+--------+ 2294 * | candidate blocks) | Blocks modified | Prefetched data 2295 * +--------------------+ by to_ds since +------------v----+ 2296 * ancestor_zb | Main Thread | File Descriptor 2297 * | Sends data over +->(to zfs receive) 2298 * | wire | 2299 * +-----------------+ 2300 * 2301 * The final case is a simple zfs full or incremental send. The to_ds traversal 2302 * thread behaves the same as always. The redact list thread is never started. 2303 * The send merge thread takes all the blocks that the to_ds traversal thread 2304 * sends it, prefetches the data, and sends the blocks on to the main thread. 2305 * The main thread sends the data over the wire. 2306 * 2307 * To keep performance acceptable, we want to prefetch the data in the worker 2308 * threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH 2309 * feature built into traverse_dataset, the combining and deletion of records 2310 * due to redaction and sends from redaction bookmarks mean that we could 2311 * issue many unnecessary prefetches. As a result, we only prefetch data 2312 * after we've determined that the record is not going to be redacted. To 2313 * prevent the prefetching from getting too far ahead of the main thread, the 2314 * blocking queues that are used for communication are capped not by the 2315 * number of entries in the queue, but by the sum of the size of the 2316 * prefetches associated with them. The limit on the amount of data that the 2317 * thread can prefetch beyond what the main thread has reached is controlled 2318 * by the global variable zfs_send_queue_length. In addition, to prevent poor 2319 * performance in the beginning of a send, we also limit the distance ahead 2320 * that the traversal threads can be. That distance is controlled by the 2321 * zfs_send_no_prefetch_queue_length tunable. 2322 * 2323 * Note: Releases dp using the specified tag. 2324 */ 2325 static int 2326 dmu_send_impl(struct dmu_send_params *dspp) 2327 { 2328 objset_t *os; 2329 dmu_replay_record_t *drr; 2330 dmu_sendstatus_t *dssp; 2331 dmu_send_cookie_t dsc = {0}; 2332 int err; 2333 uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg; 2334 uint64_t featureflags = 0; 2335 struct redact_list_thread_arg *from_arg; 2336 struct send_thread_arg *to_arg; 2337 struct redact_list_thread_arg *rlt_arg; 2338 struct send_merge_thread_arg *smt_arg; 2339 struct send_reader_thread_arg *srt_arg; 2340 struct send_range *range; 2341 redaction_list_t *from_rl = NULL; 2342 redaction_list_t *redact_rl = NULL; 2343 boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0); 2344 boolean_t book_resuming = resuming; 2345 2346 dsl_dataset_t *to_ds = dspp->to_ds; 2347 zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb; 2348 dsl_pool_t *dp = dspp->dp; 2349 void *tag = dspp->tag; 2350 2351 err = dmu_objset_from_ds(to_ds, &os); 2352 if (err != 0) { 2353 dsl_pool_rele(dp, tag); 2354 return (err); 2355 } 2356 2357 /* 2358 * If this is a non-raw send of an encrypted ds, we can ensure that 2359 * the objset_phys_t is authenticated. This is safe because this is 2360 * either a snapshot or we have owned the dataset, ensuring that 2361 * it can't be modified. 2362 */ 2363 if (!dspp->rawok && os->os_encrypted && 2364 arc_is_unauthenticated(os->os_phys_buf)) { 2365 zbookmark_phys_t zb; 2366 2367 SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT, 2368 ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 2369 err = arc_untransform(os->os_phys_buf, os->os_spa, 2370 &zb, B_FALSE); 2371 if (err != 0) { 2372 dsl_pool_rele(dp, tag); 2373 return (err); 2374 } 2375 2376 ASSERT0(arc_is_unauthenticated(os->os_phys_buf)); 2377 } 2378 2379 if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) { 2380 dsl_pool_rele(dp, tag); 2381 return (err); 2382 } 2383 2384 /* 2385 * If we're doing a redacted send, hold the bookmark's redaction list. 2386 */ 2387 if (dspp->redactbook != NULL) { 2388 err = dsl_redaction_list_hold_obj(dp, 2389 dspp->redactbook->zbm_redaction_obj, FTAG, 2390 &redact_rl); 2391 if (err != 0) { 2392 dsl_pool_rele(dp, tag); 2393 return (SET_ERROR(EINVAL)); 2394 } 2395 dsl_redaction_list_long_hold(dp, redact_rl, FTAG); 2396 } 2397 2398 /* 2399 * If we're sending from a redaction bookmark, hold the redaction list 2400 * so that we can consider sending the redacted blocks. 2401 */ 2402 if (ancestor_zb->zbm_redaction_obj != 0) { 2403 err = dsl_redaction_list_hold_obj(dp, 2404 ancestor_zb->zbm_redaction_obj, FTAG, &from_rl); 2405 if (err != 0) { 2406 if (redact_rl != NULL) { 2407 dsl_redaction_list_long_rele(redact_rl, FTAG); 2408 dsl_redaction_list_rele(redact_rl, FTAG); 2409 } 2410 dsl_pool_rele(dp, tag); 2411 return (SET_ERROR(EINVAL)); 2412 } 2413 dsl_redaction_list_long_hold(dp, from_rl, FTAG); 2414 } 2415 2416 dsl_dataset_long_hold(to_ds, FTAG); 2417 2418 from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP); 2419 to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP); 2420 rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP); 2421 smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP); 2422 srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP); 2423 2424 drr = create_begin_record(dspp, os, featureflags); 2425 dssp = setup_send_progress(dspp); 2426 2427 dsc.dsc_drr = drr; 2428 dsc.dsc_dso = dspp->dso; 2429 dsc.dsc_os = os; 2430 dsc.dsc_off = dspp->off; 2431 dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid; 2432 dsc.dsc_fromtxg = fromtxg; 2433 dsc.dsc_pending_op = PENDING_NONE; 2434 dsc.dsc_featureflags = featureflags; 2435 dsc.dsc_resume_object = dspp->resumeobj; 2436 dsc.dsc_resume_offset = dspp->resumeoff; 2437 2438 dsl_pool_rele(dp, tag); 2439 2440 void *payload = NULL; 2441 size_t payload_len = 0; 2442 nvlist_t *nvl = fnvlist_alloc(); 2443 2444 /* 2445 * If we're doing a redacted send, we include the snapshots we're 2446 * redacted with respect to so that the target system knows what send 2447 * streams can be correctly received on top of this dataset. If we're 2448 * instead sending a redacted dataset, we include the snapshots that the 2449 * dataset was created with respect to. 2450 */ 2451 if (dspp->redactbook != NULL) { 2452 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, 2453 redact_rl->rl_phys->rlp_snaps, 2454 redact_rl->rl_phys->rlp_num_snaps); 2455 } else if (dsl_dataset_feature_is_active(to_ds, 2456 SPA_FEATURE_REDACTED_DATASETS)) { 2457 uint64_t *tods_guids; 2458 uint64_t length; 2459 VERIFY(dsl_dataset_get_uint64_array_feature(to_ds, 2460 SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids)); 2461 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids, 2462 length); 2463 } 2464 2465 /* 2466 * If we're sending from a redaction bookmark, then we should retrieve 2467 * the guids of that bookmark so we can send them over the wire. 2468 */ 2469 if (from_rl != NULL) { 2470 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS, 2471 from_rl->rl_phys->rlp_snaps, 2472 from_rl->rl_phys->rlp_num_snaps); 2473 } 2474 2475 /* 2476 * If the snapshot we're sending from is redacted, include the redaction 2477 * list in the stream. 2478 */ 2479 if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) { 2480 ASSERT3P(from_rl, ==, NULL); 2481 fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS, 2482 dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps); 2483 if (dspp->numfromredactsnaps > 0) { 2484 kmem_free(dspp->fromredactsnaps, 2485 dspp->numfromredactsnaps * sizeof (uint64_t)); 2486 dspp->fromredactsnaps = NULL; 2487 } 2488 } 2489 2490 if (resuming || book_resuming) { 2491 err = setup_resume_points(dspp, to_arg, from_arg, 2492 rlt_arg, smt_arg, resuming, os, redact_rl, nvl); 2493 if (err != 0) 2494 goto out; 2495 } 2496 2497 if (featureflags & DMU_BACKUP_FEATURE_RAW) { 2498 uint64_t ivset_guid = (ancestor_zb != NULL) ? 2499 ancestor_zb->zbm_ivset_guid : 0; 2500 nvlist_t *keynvl = NULL; 2501 ASSERT(os->os_encrypted); 2502 2503 err = dsl_crypto_populate_key_nvlist(os, ivset_guid, 2504 &keynvl); 2505 if (err != 0) { 2506 fnvlist_free(nvl); 2507 goto out; 2508 } 2509 2510 fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl); 2511 fnvlist_free(keynvl); 2512 } 2513 2514 if (!nvlist_empty(nvl)) { 2515 payload = fnvlist_pack(nvl, &payload_len); 2516 drr->drr_payloadlen = payload_len; 2517 } 2518 2519 fnvlist_free(nvl); 2520 err = dump_record(&dsc, payload, payload_len); 2521 fnvlist_pack_free(payload, payload_len); 2522 if (err != 0) { 2523 err = dsc.dsc_err; 2524 goto out; 2525 } 2526 2527 setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok); 2528 setup_from_thread(from_arg, from_rl, dssp); 2529 setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp); 2530 setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os); 2531 setup_reader_thread(srt_arg, dspp, smt_arg, featureflags); 2532 2533 range = bqueue_dequeue(&srt_arg->q); 2534 while (err == 0 && !range->eos_marker) { 2535 err = do_dump(&dsc, range); 2536 range = get_next_range(&srt_arg->q, range); 2537 if (issig(JUSTLOOKING) && issig(FORREAL)) 2538 err = SET_ERROR(EINTR); 2539 } 2540 2541 /* 2542 * If we hit an error or are interrupted, cancel our worker threads and 2543 * clear the queue of any pending records. The threads will pass the 2544 * cancel up the tree of worker threads, and each one will clean up any 2545 * pending records before exiting. 2546 */ 2547 if (err != 0) { 2548 srt_arg->cancel = B_TRUE; 2549 while (!range->eos_marker) { 2550 range = get_next_range(&srt_arg->q, range); 2551 } 2552 } 2553 range_free(range); 2554 2555 bqueue_destroy(&srt_arg->q); 2556 bqueue_destroy(&smt_arg->q); 2557 if (dspp->redactbook != NULL) 2558 bqueue_destroy(&rlt_arg->q); 2559 bqueue_destroy(&to_arg->q); 2560 bqueue_destroy(&from_arg->q); 2561 2562 if (err == 0 && srt_arg->error != 0) 2563 err = srt_arg->error; 2564 2565 if (err != 0) 2566 goto out; 2567 2568 if (dsc.dsc_pending_op != PENDING_NONE) 2569 if (dump_record(&dsc, NULL, 0) != 0) 2570 err = SET_ERROR(EINTR); 2571 2572 if (err != 0) { 2573 if (err == EINTR && dsc.dsc_err != 0) 2574 err = dsc.dsc_err; 2575 goto out; 2576 } 2577 2578 /* 2579 * Send the DRR_END record if this is not a saved stream. 2580 * Otherwise, the omitted DRR_END record will signal to 2581 * the receive side that the stream is incomplete. 2582 */ 2583 if (!dspp->savedok) { 2584 bzero(drr, sizeof (dmu_replay_record_t)); 2585 drr->drr_type = DRR_END; 2586 drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc; 2587 drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid; 2588 2589 if (dump_record(&dsc, NULL, 0) != 0) 2590 err = dsc.dsc_err; 2591 } 2592 out: 2593 mutex_enter(&to_ds->ds_sendstream_lock); 2594 list_remove(&to_ds->ds_sendstreams, dssp); 2595 mutex_exit(&to_ds->ds_sendstream_lock); 2596 2597 VERIFY(err != 0 || (dsc.dsc_sent_begin && 2598 (dsc.dsc_sent_end || dspp->savedok))); 2599 2600 kmem_free(drr, sizeof (dmu_replay_record_t)); 2601 kmem_free(dssp, sizeof (dmu_sendstatus_t)); 2602 kmem_free(from_arg, sizeof (*from_arg)); 2603 kmem_free(to_arg, sizeof (*to_arg)); 2604 kmem_free(rlt_arg, sizeof (*rlt_arg)); 2605 kmem_free(smt_arg, sizeof (*smt_arg)); 2606 kmem_free(srt_arg, sizeof (*srt_arg)); 2607 2608 dsl_dataset_long_rele(to_ds, FTAG); 2609 if (from_rl != NULL) { 2610 dsl_redaction_list_long_rele(from_rl, FTAG); 2611 dsl_redaction_list_rele(from_rl, FTAG); 2612 } 2613 if (redact_rl != NULL) { 2614 dsl_redaction_list_long_rele(redact_rl, FTAG); 2615 dsl_redaction_list_rele(redact_rl, FTAG); 2616 } 2617 2618 return (err); 2619 } 2620 2621 int 2622 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 2623 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, 2624 boolean_t rawok, boolean_t savedok, int outfd, offset_t *off, 2625 dmu_send_outparams_t *dsop) 2626 { 2627 int err; 2628 dsl_dataset_t *fromds; 2629 ds_hold_flags_t dsflags; 2630 struct dmu_send_params dspp = {0}; 2631 dspp.embedok = embedok; 2632 dspp.large_block_ok = large_block_ok; 2633 dspp.compressok = compressok; 2634 dspp.outfd = outfd; 2635 dspp.off = off; 2636 dspp.dso = dsop; 2637 dspp.tag = FTAG; 2638 dspp.rawok = rawok; 2639 dspp.savedok = savedok; 2640 2641 dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; 2642 err = dsl_pool_hold(pool, FTAG, &dspp.dp); 2643 if (err != 0) 2644 return (err); 2645 2646 err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG, 2647 &dspp.to_ds); 2648 if (err != 0) { 2649 dsl_pool_rele(dspp.dp, FTAG); 2650 return (err); 2651 } 2652 2653 if (fromsnap != 0) { 2654 err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags, 2655 FTAG, &fromds); 2656 if (err != 0) { 2657 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); 2658 dsl_pool_rele(dspp.dp, FTAG); 2659 return (err); 2660 } 2661 dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 2662 dspp.ancestor_zb.zbm_creation_txg = 2663 dsl_dataset_phys(fromds)->ds_creation_txg; 2664 dspp.ancestor_zb.zbm_creation_time = 2665 dsl_dataset_phys(fromds)->ds_creation_time; 2666 2667 if (dsl_dataset_is_zapified(fromds)) { 2668 (void) zap_lookup(dspp.dp->dp_meta_objset, 2669 fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1, 2670 &dspp.ancestor_zb.zbm_ivset_guid); 2671 } 2672 2673 /* See dmu_send for the reasons behind this. */ 2674 uint64_t *fromredact; 2675 2676 if (!dsl_dataset_get_uint64_array_feature(fromds, 2677 SPA_FEATURE_REDACTED_DATASETS, 2678 &dspp.numfromredactsnaps, 2679 &fromredact)) { 2680 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2681 } else if (dspp.numfromredactsnaps > 0) { 2682 uint64_t size = dspp.numfromredactsnaps * 2683 sizeof (uint64_t); 2684 dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP); 2685 bcopy(fromredact, dspp.fromredactsnaps, size); 2686 } 2687 2688 boolean_t is_before = 2689 dsl_dataset_is_before(dspp.to_ds, fromds, 0); 2690 dspp.is_clone = (dspp.to_ds->ds_dir != 2691 fromds->ds_dir); 2692 dsl_dataset_rele(fromds, FTAG); 2693 if (!is_before) { 2694 dsl_pool_rele(dspp.dp, FTAG); 2695 err = SET_ERROR(EXDEV); 2696 } else { 2697 err = dmu_send_impl(&dspp); 2698 } 2699 } else { 2700 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2701 err = dmu_send_impl(&dspp); 2702 } 2703 dsl_dataset_rele(dspp.to_ds, FTAG); 2704 return (err); 2705 } 2706 2707 int 2708 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, 2709 boolean_t large_block_ok, boolean_t compressok, boolean_t rawok, 2710 boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff, 2711 const char *redactbook, int outfd, offset_t *off, 2712 dmu_send_outparams_t *dsop) 2713 { 2714 int err = 0; 2715 ds_hold_flags_t dsflags; 2716 boolean_t owned = B_FALSE; 2717 dsl_dataset_t *fromds = NULL; 2718 zfs_bookmark_phys_t book = {0}; 2719 struct dmu_send_params dspp = {0}; 2720 2721 dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; 2722 dspp.tosnap = tosnap; 2723 dspp.embedok = embedok; 2724 dspp.large_block_ok = large_block_ok; 2725 dspp.compressok = compressok; 2726 dspp.outfd = outfd; 2727 dspp.off = off; 2728 dspp.dso = dsop; 2729 dspp.tag = FTAG; 2730 dspp.resumeobj = resumeobj; 2731 dspp.resumeoff = resumeoff; 2732 dspp.rawok = rawok; 2733 dspp.savedok = savedok; 2734 2735 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) 2736 return (SET_ERROR(EINVAL)); 2737 2738 err = dsl_pool_hold(tosnap, FTAG, &dspp.dp); 2739 if (err != 0) 2740 return (err); 2741 2742 if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) { 2743 /* 2744 * We are sending a filesystem or volume. Ensure 2745 * that it doesn't change by owning the dataset. 2746 */ 2747 2748 if (savedok) { 2749 /* 2750 * We are looking for the dataset that represents the 2751 * partially received send stream. If this stream was 2752 * received as a new snapshot of an existing dataset, 2753 * this will be saved in a hidden clone named 2754 * "<pool>/<dataset>/%recv". Otherwise, the stream 2755 * will be saved in the live dataset itself. In 2756 * either case we need to use dsl_dataset_own_force() 2757 * because the stream is marked as inconsistent, 2758 * which would normally make it unavailable to be 2759 * owned. 2760 */ 2761 char *name = kmem_asprintf("%s/%s", tosnap, 2762 recv_clone_name); 2763 err = dsl_dataset_own_force(dspp.dp, name, dsflags, 2764 FTAG, &dspp.to_ds); 2765 if (err == ENOENT) { 2766 err = dsl_dataset_own_force(dspp.dp, tosnap, 2767 dsflags, FTAG, &dspp.to_ds); 2768 } 2769 2770 if (err == 0) { 2771 err = zap_lookup(dspp.dp->dp_meta_objset, 2772 dspp.to_ds->ds_object, 2773 DS_FIELD_RESUME_TOGUID, 8, 1, 2774 &dspp.saved_guid); 2775 } 2776 2777 if (err == 0) { 2778 err = zap_lookup(dspp.dp->dp_meta_objset, 2779 dspp.to_ds->ds_object, 2780 DS_FIELD_RESUME_TONAME, 1, 2781 sizeof (dspp.saved_toname), 2782 dspp.saved_toname); 2783 } 2784 if (err != 0) 2785 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); 2786 2787 kmem_strfree(name); 2788 } else { 2789 err = dsl_dataset_own(dspp.dp, tosnap, dsflags, 2790 FTAG, &dspp.to_ds); 2791 } 2792 owned = B_TRUE; 2793 } else { 2794 err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG, 2795 &dspp.to_ds); 2796 } 2797 2798 if (err != 0) { 2799 dsl_pool_rele(dspp.dp, FTAG); 2800 return (err); 2801 } 2802 2803 if (redactbook != NULL) { 2804 char path[ZFS_MAX_DATASET_NAME_LEN]; 2805 (void) strlcpy(path, tosnap, sizeof (path)); 2806 char *at = strchr(path, '@'); 2807 if (at == NULL) { 2808 err = EINVAL; 2809 } else { 2810 (void) snprintf(at, sizeof (path) - (at - path), "#%s", 2811 redactbook); 2812 err = dsl_bookmark_lookup(dspp.dp, path, 2813 NULL, &book); 2814 dspp.redactbook = &book; 2815 } 2816 } 2817 2818 if (err != 0) { 2819 dsl_pool_rele(dspp.dp, FTAG); 2820 if (owned) 2821 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); 2822 else 2823 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); 2824 return (err); 2825 } 2826 2827 if (fromsnap != NULL) { 2828 zfs_bookmark_phys_t *zb = &dspp.ancestor_zb; 2829 int fsnamelen; 2830 if (strpbrk(tosnap, "@#") != NULL) 2831 fsnamelen = strpbrk(tosnap, "@#") - tosnap; 2832 else 2833 fsnamelen = strlen(tosnap); 2834 2835 /* 2836 * If the fromsnap is in a different filesystem, then 2837 * mark the send stream as a clone. 2838 */ 2839 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || 2840 (fromsnap[fsnamelen] != '@' && 2841 fromsnap[fsnamelen] != '#')) { 2842 dspp.is_clone = B_TRUE; 2843 } 2844 2845 if (strchr(fromsnap, '@') != NULL) { 2846 err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG, 2847 &fromds); 2848 2849 if (err != 0) { 2850 ASSERT3P(fromds, ==, NULL); 2851 } else { 2852 /* 2853 * We need to make a deep copy of the redact 2854 * snapshots of the from snapshot, because the 2855 * array will be freed when we evict from_ds. 2856 */ 2857 uint64_t *fromredact; 2858 if (!dsl_dataset_get_uint64_array_feature( 2859 fromds, SPA_FEATURE_REDACTED_DATASETS, 2860 &dspp.numfromredactsnaps, 2861 &fromredact)) { 2862 dspp.numfromredactsnaps = 2863 NUM_SNAPS_NOT_REDACTED; 2864 } else if (dspp.numfromredactsnaps > 0) { 2865 uint64_t size = 2866 dspp.numfromredactsnaps * 2867 sizeof (uint64_t); 2868 dspp.fromredactsnaps = kmem_zalloc(size, 2869 KM_SLEEP); 2870 bcopy(fromredact, dspp.fromredactsnaps, 2871 size); 2872 } 2873 if (!dsl_dataset_is_before(dspp.to_ds, fromds, 2874 0)) { 2875 err = SET_ERROR(EXDEV); 2876 } else { 2877 zb->zbm_creation_txg = 2878 dsl_dataset_phys(fromds)-> 2879 ds_creation_txg; 2880 zb->zbm_creation_time = 2881 dsl_dataset_phys(fromds)-> 2882 ds_creation_time; 2883 zb->zbm_guid = 2884 dsl_dataset_phys(fromds)->ds_guid; 2885 zb->zbm_redaction_obj = 0; 2886 2887 if (dsl_dataset_is_zapified(fromds)) { 2888 (void) zap_lookup( 2889 dspp.dp->dp_meta_objset, 2890 fromds->ds_object, 2891 DS_FIELD_IVSET_GUID, 8, 1, 2892 &zb->zbm_ivset_guid); 2893 } 2894 } 2895 dsl_dataset_rele(fromds, FTAG); 2896 } 2897 } else { 2898 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2899 err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds, 2900 zb); 2901 if (err == EXDEV && zb->zbm_redaction_obj != 0 && 2902 zb->zbm_guid == 2903 dsl_dataset_phys(dspp.to_ds)->ds_guid) 2904 err = 0; 2905 } 2906 2907 if (err == 0) { 2908 /* dmu_send_impl will call dsl_pool_rele for us. */ 2909 err = dmu_send_impl(&dspp); 2910 } else { 2911 dsl_pool_rele(dspp.dp, FTAG); 2912 } 2913 } else { 2914 dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; 2915 err = dmu_send_impl(&dspp); 2916 } 2917 if (owned) 2918 dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); 2919 else 2920 dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); 2921 return (err); 2922 } 2923 2924 static int 2925 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed, 2926 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep) 2927 { 2928 int err = 0; 2929 uint64_t size; 2930 /* 2931 * Assume that space (both on-disk and in-stream) is dominated by 2932 * data. We will adjust for indirect blocks and the copies property, 2933 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 2934 */ 2935 2936 uint64_t recordsize; 2937 uint64_t record_count; 2938 objset_t *os; 2939 VERIFY0(dmu_objset_from_ds(ds, &os)); 2940 2941 /* Assume all (uncompressed) blocks are recordsize. */ 2942 if (zfs_override_estimate_recordsize != 0) { 2943 recordsize = zfs_override_estimate_recordsize; 2944 } else if (os->os_phys->os_type == DMU_OST_ZVOL) { 2945 err = dsl_prop_get_int_ds(ds, 2946 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize); 2947 } else { 2948 err = dsl_prop_get_int_ds(ds, 2949 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize); 2950 } 2951 if (err != 0) 2952 return (err); 2953 record_count = uncompressed / recordsize; 2954 2955 /* 2956 * If we're estimating a send size for a compressed stream, use the 2957 * compressed data size to estimate the stream size. Otherwise, use the 2958 * uncompressed data size. 2959 */ 2960 size = stream_compressed ? compressed : uncompressed; 2961 2962 /* 2963 * Subtract out approximate space used by indirect blocks. 2964 * Assume most space is used by data blocks (non-indirect, non-dnode). 2965 * Assume no ditto blocks or internal fragmentation. 2966 * 2967 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 2968 * block. 2969 */ 2970 size -= record_count * sizeof (blkptr_t); 2971 2972 /* Add in the space for the record associated with each block. */ 2973 size += record_count * sizeof (dmu_replay_record_t); 2974 2975 *sizep = size; 2976 2977 return (0); 2978 } 2979 2980 int 2981 dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds, 2982 zfs_bookmark_phys_t *frombook, boolean_t stream_compressed, 2983 boolean_t saved, uint64_t *sizep) 2984 { 2985 int err; 2986 dsl_dataset_t *ds = origds; 2987 uint64_t uncomp, comp; 2988 2989 ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool)); 2990 ASSERT(fromds == NULL || frombook == NULL); 2991 2992 /* 2993 * If this is a saved send we may actually be sending 2994 * from the %recv clone used for resuming. 2995 */ 2996 if (saved) { 2997 objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset; 2998 uint64_t guid; 2999 char dsname[ZFS_MAX_DATASET_NAME_LEN + 6]; 3000 3001 dsl_dataset_name(origds, dsname); 3002 (void) strcat(dsname, "/"); 3003 (void) strcat(dsname, recv_clone_name); 3004 3005 err = dsl_dataset_hold(origds->ds_dir->dd_pool, 3006 dsname, FTAG, &ds); 3007 if (err != ENOENT && err != 0) { 3008 return (err); 3009 } else if (err == ENOENT) { 3010 ds = origds; 3011 } 3012 3013 /* check that this dataset has partially received data */ 3014 err = zap_lookup(mos, ds->ds_object, 3015 DS_FIELD_RESUME_TOGUID, 8, 1, &guid); 3016 if (err != 0) { 3017 err = SET_ERROR(err == ENOENT ? EINVAL : err); 3018 goto out; 3019 } 3020 3021 err = zap_lookup(mos, ds->ds_object, 3022 DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname); 3023 if (err != 0) { 3024 err = SET_ERROR(err == ENOENT ? EINVAL : err); 3025 goto out; 3026 } 3027 } 3028 3029 /* tosnap must be a snapshot or the target of a saved send */ 3030 if (!ds->ds_is_snapshot && ds == origds) 3031 return (SET_ERROR(EINVAL)); 3032 3033 if (fromds != NULL) { 3034 uint64_t used; 3035 if (!fromds->ds_is_snapshot) { 3036 err = SET_ERROR(EINVAL); 3037 goto out; 3038 } 3039 3040 if (!dsl_dataset_is_before(ds, fromds, 0)) { 3041 err = SET_ERROR(EXDEV); 3042 goto out; 3043 } 3044 3045 err = dsl_dataset_space_written(fromds, ds, &used, &comp, 3046 &uncomp); 3047 if (err != 0) 3048 goto out; 3049 } else if (frombook != NULL) { 3050 uint64_t used; 3051 err = dsl_dataset_space_written_bookmark(frombook, ds, &used, 3052 &comp, &uncomp); 3053 if (err != 0) 3054 goto out; 3055 } else { 3056 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes; 3057 comp = dsl_dataset_phys(ds)->ds_compressed_bytes; 3058 } 3059 3060 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp, 3061 stream_compressed, sizep); 3062 /* 3063 * Add the size of the BEGIN and END records to the estimate. 3064 */ 3065 *sizep += 2 * sizeof (dmu_replay_record_t); 3066 3067 out: 3068 if (ds != origds) 3069 dsl_dataset_rele(ds, FTAG); 3070 return (err); 3071 } 3072 3073 /* BEGIN CSTYLED */ 3074 ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW, 3075 "Allow sending corrupt data"); 3076 3077 ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, INT, ZMOD_RW, 3078 "Maximum send queue length"); 3079 3080 ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW, 3081 "Send unmodified spill blocks"); 3082 3083 ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, INT, ZMOD_RW, 3084 "Maximum send queue length for non-prefetch queues"); 3085 3086 ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, INT, ZMOD_RW, 3087 "Send queue fill fraction"); 3088 3089 ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, INT, ZMOD_RW, 3090 "Send queue fill fraction for non-prefetch queues"); 3091 3092 ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, INT, ZMOD_RW, 3093 "Override block size estimate with fixed size"); 3094 /* END CSTYLED */ 3095