1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 * Copyright 2014 HybridCluster. All rights reserved. 27 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 28 * Copyright (c) 2019, Klara Inc. 29 * Copyright (c) 2019, Allan Jude 30 * Copyright (c) 2019 Datto Inc. 31 * Copyright (c) 2022 Axcient. 32 */ 33 34 #include <sys/arc.h> 35 #include <sys/spa_impl.h> 36 #include <sys/dmu.h> 37 #include <sys/dmu_impl.h> 38 #include <sys/dmu_send.h> 39 #include <sys/dmu_recv.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/dbuf.h> 42 #include <sys/dnode.h> 43 #include <sys/zfs_context.h> 44 #include <sys/dmu_objset.h> 45 #include <sys/dmu_traverse.h> 46 #include <sys/dsl_dataset.h> 47 #include <sys/dsl_dir.h> 48 #include <sys/dsl_prop.h> 49 #include <sys/dsl_pool.h> 50 #include <sys/dsl_synctask.h> 51 #include <sys/zfs_ioctl.h> 52 #include <sys/zap.h> 53 #include <sys/zvol.h> 54 #include <sys/zio_checksum.h> 55 #include <sys/zfs_znode.h> 56 #include <zfs_fletcher.h> 57 #include <sys/avl.h> 58 #include <sys/ddt.h> 59 #include <sys/zfs_onexit.h> 60 #include <sys/dsl_destroy.h> 61 #include <sys/blkptr.h> 62 #include <sys/dsl_bookmark.h> 63 #include <sys/zfeature.h> 64 #include <sys/bqueue.h> 65 #include <sys/objlist.h> 66 #ifdef _KERNEL 67 #include <sys/zfs_vfsops.h> 68 #endif 69 #include <sys/zfs_file.h> 70 71 static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE; 72 static uint_t zfs_recv_queue_ff = 20; 73 static uint_t zfs_recv_write_batch_size = 1024 * 1024; 74 static int zfs_recv_best_effort_corrective = 0; 75 76 static const void *const dmu_recv_tag = "dmu_recv_tag"; 77 const char *const recv_clone_name = "%recv"; 78 79 typedef enum { 80 ORNS_NO, 81 ORNS_YES, 82 ORNS_MAYBE 83 } or_need_sync_t; 84 85 static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len, 86 void *buf); 87 88 struct receive_record_arg { 89 dmu_replay_record_t header; 90 void *payload; /* Pointer to a buffer containing the payload */ 91 /* 92 * If the record is a WRITE or SPILL, pointer to the abd containing the 93 * payload. 94 */ 95 abd_t *abd; 96 int payload_size; 97 uint64_t bytes_read; /* bytes read from stream when record created */ 98 boolean_t eos_marker; /* Marks the end of the stream */ 99 bqueue_node_t node; 100 }; 101 102 struct receive_writer_arg { 103 objset_t *os; 104 boolean_t byteswap; 105 bqueue_t q; 106 107 /* 108 * These three members are used to signal to the main thread when 109 * we're done. 110 */ 111 kmutex_t mutex; 112 kcondvar_t cv; 113 boolean_t done; 114 115 int err; 116 const char *tofs; 117 boolean_t heal; 118 boolean_t resumable; 119 boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */ 120 boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */ 121 boolean_t full; /* this is a full send stream */ 122 uint64_t last_object; 123 uint64_t last_offset; 124 uint64_t max_object; /* highest object ID referenced in stream */ 125 uint64_t bytes_read; /* bytes read when current record created */ 126 127 list_t write_batch; 128 129 /* Encryption parameters for the last received DRR_OBJECT_RANGE */ 130 boolean_t or_crypt_params_present; 131 uint64_t or_firstobj; 132 uint64_t or_numslots; 133 uint8_t or_salt[ZIO_DATA_SALT_LEN]; 134 uint8_t or_iv[ZIO_DATA_IV_LEN]; 135 uint8_t or_mac[ZIO_DATA_MAC_LEN]; 136 boolean_t or_byteorder; 137 zio_t *heal_pio; 138 139 /* Keep track of DRR_FREEOBJECTS right after DRR_OBJECT_RANGE */ 140 or_need_sync_t or_need_sync; 141 }; 142 143 typedef struct dmu_recv_begin_arg { 144 const char *drba_origin; 145 dmu_recv_cookie_t *drba_cookie; 146 cred_t *drba_cred; 147 proc_t *drba_proc; 148 dsl_crypto_params_t *drba_dcp; 149 } dmu_recv_begin_arg_t; 150 151 static void 152 byteswap_record(dmu_replay_record_t *drr) 153 { 154 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 155 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 156 drr->drr_type = BSWAP_32(drr->drr_type); 157 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 158 159 switch (drr->drr_type) { 160 case DRR_BEGIN: 161 DO64(drr_begin.drr_magic); 162 DO64(drr_begin.drr_versioninfo); 163 DO64(drr_begin.drr_creation_time); 164 DO32(drr_begin.drr_type); 165 DO32(drr_begin.drr_flags); 166 DO64(drr_begin.drr_toguid); 167 DO64(drr_begin.drr_fromguid); 168 break; 169 case DRR_OBJECT: 170 DO64(drr_object.drr_object); 171 DO32(drr_object.drr_type); 172 DO32(drr_object.drr_bonustype); 173 DO32(drr_object.drr_blksz); 174 DO32(drr_object.drr_bonuslen); 175 DO32(drr_object.drr_raw_bonuslen); 176 DO64(drr_object.drr_toguid); 177 DO64(drr_object.drr_maxblkid); 178 break; 179 case DRR_FREEOBJECTS: 180 DO64(drr_freeobjects.drr_firstobj); 181 DO64(drr_freeobjects.drr_numobjs); 182 DO64(drr_freeobjects.drr_toguid); 183 break; 184 case DRR_WRITE: 185 DO64(drr_write.drr_object); 186 DO32(drr_write.drr_type); 187 DO64(drr_write.drr_offset); 188 DO64(drr_write.drr_logical_size); 189 DO64(drr_write.drr_toguid); 190 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); 191 DO64(drr_write.drr_key.ddk_prop); 192 DO64(drr_write.drr_compressed_size); 193 break; 194 case DRR_WRITE_EMBEDDED: 195 DO64(drr_write_embedded.drr_object); 196 DO64(drr_write_embedded.drr_offset); 197 DO64(drr_write_embedded.drr_length); 198 DO64(drr_write_embedded.drr_toguid); 199 DO32(drr_write_embedded.drr_lsize); 200 DO32(drr_write_embedded.drr_psize); 201 break; 202 case DRR_FREE: 203 DO64(drr_free.drr_object); 204 DO64(drr_free.drr_offset); 205 DO64(drr_free.drr_length); 206 DO64(drr_free.drr_toguid); 207 break; 208 case DRR_SPILL: 209 DO64(drr_spill.drr_object); 210 DO64(drr_spill.drr_length); 211 DO64(drr_spill.drr_toguid); 212 DO64(drr_spill.drr_compressed_size); 213 DO32(drr_spill.drr_type); 214 break; 215 case DRR_OBJECT_RANGE: 216 DO64(drr_object_range.drr_firstobj); 217 DO64(drr_object_range.drr_numslots); 218 DO64(drr_object_range.drr_toguid); 219 break; 220 case DRR_REDACT: 221 DO64(drr_redact.drr_object); 222 DO64(drr_redact.drr_offset); 223 DO64(drr_redact.drr_length); 224 DO64(drr_redact.drr_toguid); 225 break; 226 case DRR_END: 227 DO64(drr_end.drr_toguid); 228 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); 229 break; 230 default: 231 break; 232 } 233 234 if (drr->drr_type != DRR_BEGIN) { 235 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); 236 } 237 238 #undef DO64 239 #undef DO32 240 } 241 242 static boolean_t 243 redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid) 244 { 245 for (int i = 0; i < num_snaps; i++) { 246 if (snaps[i] == guid) 247 return (B_TRUE); 248 } 249 return (B_FALSE); 250 } 251 252 /* 253 * Check that the new stream we're trying to receive is redacted with respect to 254 * a subset of the snapshots that the origin was redacted with respect to. For 255 * the reasons behind this, see the man page on redacted zfs sends and receives. 256 */ 257 static boolean_t 258 compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps, 259 uint64_t *redact_snaps, uint64_t num_redact_snaps) 260 { 261 /* 262 * Short circuit the comparison; if we are redacted with respect to 263 * more snapshots than the origin, we can't be redacted with respect 264 * to a subset. 265 */ 266 if (num_redact_snaps > origin_num_snaps) { 267 return (B_FALSE); 268 } 269 270 for (int i = 0; i < num_redact_snaps; i++) { 271 if (!redact_snaps_contains(origin_snaps, origin_num_snaps, 272 redact_snaps[i])) { 273 return (B_FALSE); 274 } 275 } 276 return (B_TRUE); 277 } 278 279 static boolean_t 280 redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin) 281 { 282 uint64_t *origin_snaps; 283 uint64_t origin_num_snaps; 284 dmu_recv_cookie_t *drc = drba->drba_cookie; 285 struct drr_begin *drrb = drc->drc_drrb; 286 int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 287 int err = 0; 288 boolean_t ret = B_TRUE; 289 uint64_t *redact_snaps; 290 uint_t numredactsnaps; 291 292 /* 293 * If this is a full send stream, we're safe no matter what. 294 */ 295 if (drrb->drr_fromguid == 0) 296 return (ret); 297 298 VERIFY(dsl_dataset_get_uint64_array_feature(origin, 299 SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps)); 300 301 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl, 302 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) == 303 0) { 304 /* 305 * If the send stream was sent from the redaction bookmark or 306 * the redacted version of the dataset, then we're safe. Verify 307 * that this is from the a compatible redaction bookmark or 308 * redacted dataset. 309 */ 310 if (!compatible_redact_snaps(origin_snaps, origin_num_snaps, 311 redact_snaps, numredactsnaps)) { 312 err = EINVAL; 313 } 314 } else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) { 315 /* 316 * If the stream is redacted, it must be redacted with respect 317 * to a subset of what the origin is redacted with respect to. 318 * See case number 2 in the zfs man page section on redacted zfs 319 * send. 320 */ 321 err = nvlist_lookup_uint64_array(drc->drc_begin_nvl, 322 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps); 323 324 if (err != 0 || !compatible_redact_snaps(origin_snaps, 325 origin_num_snaps, redact_snaps, numredactsnaps)) { 326 err = EINVAL; 327 } 328 } else if (!redact_snaps_contains(origin_snaps, origin_num_snaps, 329 drrb->drr_toguid)) { 330 /* 331 * If the stream isn't redacted but the origin is, this must be 332 * one of the snapshots the origin is redacted with respect to. 333 * See case number 1 in the zfs man page section on redacted zfs 334 * send. 335 */ 336 err = EINVAL; 337 } 338 339 if (err != 0) 340 ret = B_FALSE; 341 return (ret); 342 } 343 344 /* 345 * If we previously received a stream with --large-block, we don't support 346 * receiving an incremental on top of it without --large-block. This avoids 347 * forcing a read-modify-write or trying to re-aggregate a string of WRITE 348 * records. 349 */ 350 static int 351 recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags) 352 { 353 if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) && 354 !(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS)) 355 return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH)); 356 return (0); 357 } 358 359 static int 360 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 361 uint64_t fromguid, uint64_t featureflags) 362 { 363 uint64_t obj; 364 uint64_t children; 365 int error; 366 dsl_dataset_t *snap; 367 dsl_pool_t *dp = ds->ds_dir->dd_pool; 368 boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0; 369 boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0; 370 boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0; 371 372 /* Temporary clone name must not exist. */ 373 error = zap_lookup(dp->dp_meta_objset, 374 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, 375 8, 1, &obj); 376 if (error != ENOENT) 377 return (error == 0 ? SET_ERROR(EBUSY) : error); 378 379 /* Resume state must not be set. */ 380 if (dsl_dataset_has_resume_receive_state(ds)) 381 return (SET_ERROR(EBUSY)); 382 383 /* New snapshot name must not exist if we're not healing it. */ 384 error = zap_lookup(dp->dp_meta_objset, 385 dsl_dataset_phys(ds)->ds_snapnames_zapobj, 386 drba->drba_cookie->drc_tosnap, 8, 1, &obj); 387 if (drba->drba_cookie->drc_heal) { 388 if (error != 0) 389 return (error); 390 } else if (error != ENOENT) { 391 return (error == 0 ? SET_ERROR(EEXIST) : error); 392 } 393 394 /* Must not have children if receiving a ZVOL. */ 395 error = zap_count(dp->dp_meta_objset, 396 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children); 397 if (error != 0) 398 return (error); 399 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS && 400 children > 0) 401 return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); 402 403 /* 404 * Check snapshot limit before receiving. We'll recheck again at the 405 * end, but might as well abort before receiving if we're already over 406 * the limit. 407 * 408 * Note that we do not check the file system limit with 409 * dsl_dir_fscount_check because the temporary %clones don't count 410 * against that limit. 411 */ 412 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, 413 NULL, drba->drba_cred, drba->drba_proc); 414 if (error != 0) 415 return (error); 416 417 if (drba->drba_cookie->drc_heal) { 418 /* Encryption is incompatible with embedded data. */ 419 if (encrypted && embed) 420 return (SET_ERROR(EINVAL)); 421 422 /* Healing is not supported when in 'force' mode. */ 423 if (drba->drba_cookie->drc_force) 424 return (SET_ERROR(EINVAL)); 425 426 /* Must have keys loaded if doing encrypted non-raw recv. */ 427 if (encrypted && !raw) { 428 if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object, 429 NULL, NULL) != 0) 430 return (SET_ERROR(EACCES)); 431 } 432 433 error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap); 434 if (error != 0) 435 return (error); 436 437 /* 438 * When not doing best effort corrective recv healing can only 439 * be done if the send stream is for the same snapshot as the 440 * one we are trying to heal. 441 */ 442 if (zfs_recv_best_effort_corrective == 0 && 443 drba->drba_cookie->drc_drrb->drr_toguid != 444 dsl_dataset_phys(snap)->ds_guid) { 445 dsl_dataset_rele(snap, FTAG); 446 return (SET_ERROR(ENOTSUP)); 447 } 448 dsl_dataset_rele(snap, FTAG); 449 } else if (fromguid != 0) { 450 /* Sanity check the incremental recv */ 451 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 452 453 /* Can't perform a raw receive on top of a non-raw receive */ 454 if (!encrypted && raw) 455 return (SET_ERROR(EINVAL)); 456 457 /* Encryption is incompatible with embedded data */ 458 if (encrypted && embed) 459 return (SET_ERROR(EINVAL)); 460 461 /* Find snapshot in this dir that matches fromguid. */ 462 while (obj != 0) { 463 error = dsl_dataset_hold_obj(dp, obj, FTAG, 464 &snap); 465 if (error != 0) 466 return (SET_ERROR(ENODEV)); 467 if (snap->ds_dir != ds->ds_dir) { 468 dsl_dataset_rele(snap, FTAG); 469 return (SET_ERROR(ENODEV)); 470 } 471 if (dsl_dataset_phys(snap)->ds_guid == fromguid) 472 break; 473 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 474 dsl_dataset_rele(snap, FTAG); 475 } 476 if (obj == 0) 477 return (SET_ERROR(ENODEV)); 478 479 if (drba->drba_cookie->drc_force) { 480 drba->drba_cookie->drc_fromsnapobj = obj; 481 } else { 482 /* 483 * If we are not forcing, there must be no 484 * changes since fromsnap. Raw sends have an 485 * additional constraint that requires that 486 * no "noop" snapshots exist between fromsnap 487 * and tosnap for the IVset checking code to 488 * work properly. 489 */ 490 if (dsl_dataset_modified_since_snap(ds, snap) || 491 (raw && 492 dsl_dataset_phys(ds)->ds_prev_snap_obj != 493 snap->ds_object)) { 494 dsl_dataset_rele(snap, FTAG); 495 return (SET_ERROR(ETXTBSY)); 496 } 497 drba->drba_cookie->drc_fromsnapobj = 498 ds->ds_prev->ds_object; 499 } 500 501 if (dsl_dataset_feature_is_active(snap, 502 SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba, 503 snap)) { 504 dsl_dataset_rele(snap, FTAG); 505 return (SET_ERROR(EINVAL)); 506 } 507 508 error = recv_check_large_blocks(snap, featureflags); 509 if (error != 0) { 510 dsl_dataset_rele(snap, FTAG); 511 return (error); 512 } 513 514 dsl_dataset_rele(snap, FTAG); 515 } else { 516 /* If full and not healing then must be forced. */ 517 if (!drba->drba_cookie->drc_force) 518 return (SET_ERROR(EEXIST)); 519 520 /* 521 * We don't support using zfs recv -F to blow away 522 * encrypted filesystems. This would require the 523 * dsl dir to point to the old encryption key and 524 * the new one at the same time during the receive. 525 */ 526 if ((!encrypted && raw) || encrypted) 527 return (SET_ERROR(EINVAL)); 528 529 /* 530 * Perform the same encryption checks we would if 531 * we were creating a new dataset from scratch. 532 */ 533 if (!raw) { 534 boolean_t will_encrypt; 535 536 error = dmu_objset_create_crypt_check( 537 ds->ds_dir->dd_parent, drba->drba_dcp, 538 &will_encrypt); 539 if (error != 0) 540 return (error); 541 542 if (will_encrypt && embed) 543 return (SET_ERROR(EINVAL)); 544 } 545 } 546 547 return (0); 548 } 549 550 /* 551 * Check that any feature flags used in the data stream we're receiving are 552 * supported by the pool we are receiving into. 553 * 554 * Note that some of the features we explicitly check here have additional 555 * (implicit) features they depend on, but those dependencies are enforced 556 * through the zfeature_register() calls declaring the features that we 557 * explicitly check. 558 */ 559 static int 560 recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa) 561 { 562 /* 563 * Check if there are any unsupported feature flags. 564 */ 565 if (!DMU_STREAM_SUPPORTED(featureflags)) { 566 return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE)); 567 } 568 569 /* Verify pool version supports SA if SA_SPILL feature set */ 570 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 571 spa_version(spa) < SPA_VERSION_SA) 572 return (SET_ERROR(ENOTSUP)); 573 574 /* 575 * LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks, 576 * and large_dnodes in the stream can only be used if those pool 577 * features are enabled because we don't attempt to decompress / 578 * un-embed / un-mooch / split up the blocks / dnodes during the 579 * receive process. 580 */ 581 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && 582 !spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS)) 583 return (SET_ERROR(ENOTSUP)); 584 if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) && 585 !spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS)) 586 return (SET_ERROR(ENOTSUP)); 587 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 588 !spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) 589 return (SET_ERROR(ENOTSUP)); 590 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 591 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 592 return (SET_ERROR(ENOTSUP)); 593 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) && 594 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 595 return (SET_ERROR(ENOTSUP)); 596 597 /* 598 * Receiving redacted streams requires that redacted datasets are 599 * enabled. 600 */ 601 if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) && 602 !spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS)) 603 return (SET_ERROR(ENOTSUP)); 604 605 return (0); 606 } 607 608 static int 609 dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 610 { 611 dmu_recv_begin_arg_t *drba = arg; 612 dsl_pool_t *dp = dmu_tx_pool(tx); 613 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 614 uint64_t fromguid = drrb->drr_fromguid; 615 int flags = drrb->drr_flags; 616 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE; 617 int error; 618 uint64_t featureflags = drba->drba_cookie->drc_featureflags; 619 dsl_dataset_t *ds; 620 const char *tofs = drba->drba_cookie->drc_tofs; 621 622 /* already checked */ 623 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 624 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING)); 625 626 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 627 DMU_COMPOUNDSTREAM || 628 drrb->drr_type >= DMU_OST_NUMTYPES || 629 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 630 return (SET_ERROR(EINVAL)); 631 632 error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa); 633 if (error != 0) 634 return (error); 635 636 /* Resumable receives require extensible datasets */ 637 if (drba->drba_cookie->drc_resumable && 638 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) 639 return (SET_ERROR(ENOTSUP)); 640 641 if (featureflags & DMU_BACKUP_FEATURE_RAW) { 642 /* raw receives require the encryption feature */ 643 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) 644 return (SET_ERROR(ENOTSUP)); 645 646 /* embedded data is incompatible with encryption and raw recv */ 647 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 648 return (SET_ERROR(EINVAL)); 649 650 /* raw receives require spill block allocation flag */ 651 if (!(flags & DRR_FLAG_SPILL_BLOCK)) 652 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING)); 653 } else { 654 /* 655 * We support unencrypted datasets below encrypted ones now, 656 * so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing 657 * with a dataset we may encrypt. 658 */ 659 if (drba->drba_dcp == NULL || 660 drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) { 661 dsflags |= DS_HOLD_FLAG_DECRYPT; 662 } 663 } 664 665 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds); 666 if (error == 0) { 667 /* target fs already exists; recv into temp clone */ 668 669 /* Can't recv a clone into an existing fs */ 670 if (flags & DRR_FLAG_CLONE || drba->drba_origin) { 671 dsl_dataset_rele_flags(ds, dsflags, FTAG); 672 return (SET_ERROR(EINVAL)); 673 } 674 675 error = recv_begin_check_existing_impl(drba, ds, fromguid, 676 featureflags); 677 dsl_dataset_rele_flags(ds, dsflags, FTAG); 678 } else if (error == ENOENT) { 679 /* target fs does not exist; must be a full backup or clone */ 680 char buf[ZFS_MAX_DATASET_NAME_LEN]; 681 objset_t *os; 682 683 /* healing recv must be done "into" an existing snapshot */ 684 if (drba->drba_cookie->drc_heal == B_TRUE) 685 return (SET_ERROR(ENOTSUP)); 686 687 /* 688 * If it's a non-clone incremental, we are missing the 689 * target fs, so fail the recv. 690 */ 691 if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) || 692 drba->drba_origin)) 693 return (SET_ERROR(ENOENT)); 694 695 /* 696 * If we're receiving a full send as a clone, and it doesn't 697 * contain all the necessary free records and freeobject 698 * records, reject it. 699 */ 700 if (fromguid == 0 && drba->drba_origin != NULL && 701 !(flags & DRR_FLAG_FREERECORDS)) 702 return (SET_ERROR(EINVAL)); 703 704 /* Open the parent of tofs */ 705 ASSERT3U(strlen(tofs), <, sizeof (buf)); 706 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 707 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 708 if (error != 0) 709 return (error); 710 711 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 && 712 drba->drba_origin == NULL) { 713 boolean_t will_encrypt; 714 715 /* 716 * Check that we aren't breaking any encryption rules 717 * and that we have all the parameters we need to 718 * create an encrypted dataset if necessary. If we are 719 * making an encrypted dataset the stream can't have 720 * embedded data. 721 */ 722 error = dmu_objset_create_crypt_check(ds->ds_dir, 723 drba->drba_dcp, &will_encrypt); 724 if (error != 0) { 725 dsl_dataset_rele(ds, FTAG); 726 return (error); 727 } 728 729 if (will_encrypt && 730 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) { 731 dsl_dataset_rele(ds, FTAG); 732 return (SET_ERROR(EINVAL)); 733 } 734 } 735 736 /* 737 * Check filesystem and snapshot limits before receiving. We'll 738 * recheck snapshot limits again at the end (we create the 739 * filesystems and increment those counts during begin_sync). 740 */ 741 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 742 ZFS_PROP_FILESYSTEM_LIMIT, NULL, 743 drba->drba_cred, drba->drba_proc); 744 if (error != 0) { 745 dsl_dataset_rele(ds, FTAG); 746 return (error); 747 } 748 749 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 750 ZFS_PROP_SNAPSHOT_LIMIT, NULL, 751 drba->drba_cred, drba->drba_proc); 752 if (error != 0) { 753 dsl_dataset_rele(ds, FTAG); 754 return (error); 755 } 756 757 /* can't recv below anything but filesystems (eg. no ZVOLs) */ 758 error = dmu_objset_from_ds(ds, &os); 759 if (error != 0) { 760 dsl_dataset_rele(ds, FTAG); 761 return (error); 762 } 763 if (dmu_objset_type(os) != DMU_OST_ZFS) { 764 dsl_dataset_rele(ds, FTAG); 765 return (SET_ERROR(ZFS_ERR_WRONG_PARENT)); 766 } 767 768 if (drba->drba_origin != NULL) { 769 dsl_dataset_t *origin; 770 error = dsl_dataset_hold_flags(dp, drba->drba_origin, 771 dsflags, FTAG, &origin); 772 if (error != 0) { 773 dsl_dataset_rele(ds, FTAG); 774 return (error); 775 } 776 if (!origin->ds_is_snapshot) { 777 dsl_dataset_rele_flags(origin, dsflags, FTAG); 778 dsl_dataset_rele(ds, FTAG); 779 return (SET_ERROR(EINVAL)); 780 } 781 if (dsl_dataset_phys(origin)->ds_guid != fromguid && 782 fromguid != 0) { 783 dsl_dataset_rele_flags(origin, dsflags, FTAG); 784 dsl_dataset_rele(ds, FTAG); 785 return (SET_ERROR(ENODEV)); 786 } 787 788 if (origin->ds_dir->dd_crypto_obj != 0 && 789 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) { 790 dsl_dataset_rele_flags(origin, dsflags, FTAG); 791 dsl_dataset_rele(ds, FTAG); 792 return (SET_ERROR(EINVAL)); 793 } 794 795 /* 796 * If the origin is redacted we need to verify that this 797 * send stream can safely be received on top of the 798 * origin. 799 */ 800 if (dsl_dataset_feature_is_active(origin, 801 SPA_FEATURE_REDACTED_DATASETS)) { 802 if (!redact_check(drba, origin)) { 803 dsl_dataset_rele_flags(origin, dsflags, 804 FTAG); 805 dsl_dataset_rele_flags(ds, dsflags, 806 FTAG); 807 return (SET_ERROR(EINVAL)); 808 } 809 } 810 811 error = recv_check_large_blocks(ds, featureflags); 812 if (error != 0) { 813 dsl_dataset_rele_flags(origin, dsflags, FTAG); 814 dsl_dataset_rele_flags(ds, dsflags, FTAG); 815 return (error); 816 } 817 818 dsl_dataset_rele_flags(origin, dsflags, FTAG); 819 } 820 821 dsl_dataset_rele(ds, FTAG); 822 error = 0; 823 } 824 return (error); 825 } 826 827 static void 828 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 829 { 830 dmu_recv_begin_arg_t *drba = arg; 831 dsl_pool_t *dp = dmu_tx_pool(tx); 832 objset_t *mos = dp->dp_meta_objset; 833 dmu_recv_cookie_t *drc = drba->drba_cookie; 834 struct drr_begin *drrb = drc->drc_drrb; 835 const char *tofs = drc->drc_tofs; 836 uint64_t featureflags = drc->drc_featureflags; 837 dsl_dataset_t *ds, *newds; 838 objset_t *os; 839 uint64_t dsobj; 840 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE; 841 int error; 842 uint64_t crflags = 0; 843 dsl_crypto_params_t dummy_dcp = { 0 }; 844 dsl_crypto_params_t *dcp = drba->drba_dcp; 845 846 if (drrb->drr_flags & DRR_FLAG_CI_DATA) 847 crflags |= DS_FLAG_CI_DATASET; 848 849 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0) 850 dsflags |= DS_HOLD_FLAG_DECRYPT; 851 852 /* 853 * Raw, non-incremental recvs always use a dummy dcp with 854 * the raw cmd set. Raw incremental recvs do not use a dcp 855 * since the encryption parameters are already set in stone. 856 */ 857 if (dcp == NULL && drrb->drr_fromguid == 0 && 858 drba->drba_origin == NULL) { 859 ASSERT3P(dcp, ==, NULL); 860 dcp = &dummy_dcp; 861 862 if (featureflags & DMU_BACKUP_FEATURE_RAW) 863 dcp->cp_cmd = DCP_CMD_RAW_RECV; 864 } 865 866 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds); 867 if (error == 0) { 868 /* Create temporary clone unless we're doing corrective recv */ 869 dsl_dataset_t *snap = NULL; 870 871 if (drba->drba_cookie->drc_fromsnapobj != 0) { 872 VERIFY0(dsl_dataset_hold_obj(dp, 873 drba->drba_cookie->drc_fromsnapobj, FTAG, &snap)); 874 ASSERT3P(dcp, ==, NULL); 875 } 876 if (drc->drc_heal) { 877 /* When healing we want to use the provided snapshot */ 878 VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap, 879 &dsobj)); 880 } else { 881 dsobj = dsl_dataset_create_sync(ds->ds_dir, 882 recv_clone_name, snap, crflags, drba->drba_cred, 883 dcp, tx); 884 } 885 if (drba->drba_cookie->drc_fromsnapobj != 0) 886 dsl_dataset_rele(snap, FTAG); 887 dsl_dataset_rele_flags(ds, dsflags, FTAG); 888 } else { 889 dsl_dir_t *dd; 890 const char *tail; 891 dsl_dataset_t *origin = NULL; 892 893 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 894 895 if (drba->drba_origin != NULL) { 896 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 897 FTAG, &origin)); 898 ASSERT3P(dcp, ==, NULL); 899 } 900 901 /* Create new dataset. */ 902 dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1, 903 origin, crflags, drba->drba_cred, dcp, tx); 904 if (origin != NULL) 905 dsl_dataset_rele(origin, FTAG); 906 dsl_dir_rele(dd, FTAG); 907 drc->drc_newfs = B_TRUE; 908 } 909 VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag, 910 &newds)); 911 if (dsl_dataset_feature_is_active(newds, 912 SPA_FEATURE_REDACTED_DATASETS)) { 913 /* 914 * If the origin dataset is redacted, the child will be redacted 915 * when we create it. We clear the new dataset's 916 * redaction info; if it should be redacted, we'll fill 917 * in its information later. 918 */ 919 dsl_dataset_deactivate_feature(newds, 920 SPA_FEATURE_REDACTED_DATASETS, tx); 921 } 922 VERIFY0(dmu_objset_from_ds(newds, &os)); 923 924 if (drc->drc_resumable) { 925 dsl_dataset_zapify(newds, tx); 926 if (drrb->drr_fromguid != 0) { 927 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, 928 8, 1, &drrb->drr_fromguid, tx)); 929 } 930 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, 931 8, 1, &drrb->drr_toguid, tx)); 932 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, 933 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); 934 uint64_t one = 1; 935 uint64_t zero = 0; 936 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, 937 8, 1, &one, tx)); 938 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, 939 8, 1, &zero, tx)); 940 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, 941 8, 1, &zero, tx)); 942 if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) { 943 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK, 944 8, 1, &one, tx)); 945 } 946 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) { 947 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, 948 8, 1, &one, tx)); 949 } 950 if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) { 951 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK, 952 8, 1, &one, tx)); 953 } 954 if (featureflags & DMU_BACKUP_FEATURE_RAW) { 955 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK, 956 8, 1, &one, tx)); 957 } 958 959 uint64_t *redact_snaps; 960 uint_t numredactsnaps; 961 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl, 962 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, 963 &numredactsnaps) == 0) { 964 VERIFY0(zap_add(mos, dsobj, 965 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, 966 sizeof (*redact_snaps), numredactsnaps, 967 redact_snaps, tx)); 968 } 969 } 970 971 /* 972 * Usually the os->os_encrypted value is tied to the presence of a 973 * DSL Crypto Key object in the dd. However, that will not be received 974 * until dmu_recv_stream(), so we set the value manually for now. 975 */ 976 if (featureflags & DMU_BACKUP_FEATURE_RAW) { 977 os->os_encrypted = B_TRUE; 978 drba->drba_cookie->drc_raw = B_TRUE; 979 } 980 981 if (featureflags & DMU_BACKUP_FEATURE_REDACTED) { 982 uint64_t *redact_snaps; 983 uint_t numredactsnaps; 984 VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl, 985 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps)); 986 dsl_dataset_activate_redaction(newds, redact_snaps, 987 numredactsnaps, tx); 988 } 989 990 dmu_buf_will_dirty(newds->ds_dbuf, tx); 991 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; 992 993 /* 994 * If we actually created a non-clone, we need to create the objset 995 * in our new dataset. If this is a raw send we postpone this until 996 * dmu_recv_stream() so that we can allocate the metadnode with the 997 * properties from the DRR_BEGIN payload. 998 */ 999 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG); 1000 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) && 1001 (featureflags & DMU_BACKUP_FEATURE_RAW) == 0 && 1002 !drc->drc_heal) { 1003 (void) dmu_objset_create_impl(dp->dp_spa, 1004 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 1005 } 1006 rrw_exit(&newds->ds_bp_rwlock, FTAG); 1007 1008 drba->drba_cookie->drc_ds = newds; 1009 drba->drba_cookie->drc_os = os; 1010 1011 spa_history_log_internal_ds(newds, "receive", tx, " "); 1012 } 1013 1014 static int 1015 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) 1016 { 1017 dmu_recv_begin_arg_t *drba = arg; 1018 dmu_recv_cookie_t *drc = drba->drba_cookie; 1019 dsl_pool_t *dp = dmu_tx_pool(tx); 1020 struct drr_begin *drrb = drc->drc_drrb; 1021 int error; 1022 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE; 1023 dsl_dataset_t *ds; 1024 const char *tofs = drc->drc_tofs; 1025 1026 /* already checked */ 1027 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1028 ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING); 1029 1030 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1031 DMU_COMPOUNDSTREAM || 1032 drrb->drr_type >= DMU_OST_NUMTYPES) 1033 return (SET_ERROR(EINVAL)); 1034 1035 /* 1036 * This is mostly a sanity check since we should have already done these 1037 * checks during a previous attempt to receive the data. 1038 */ 1039 error = recv_begin_check_feature_flags_impl(drc->drc_featureflags, 1040 dp->dp_spa); 1041 if (error != 0) 1042 return (error); 1043 1044 /* 6 extra bytes for /%recv */ 1045 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1046 1047 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1048 tofs, recv_clone_name); 1049 1050 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) { 1051 /* raw receives require spill block allocation flag */ 1052 if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)) 1053 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING)); 1054 } else { 1055 dsflags |= DS_HOLD_FLAG_DECRYPT; 1056 } 1057 1058 boolean_t recvexist = B_TRUE; 1059 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) { 1060 /* %recv does not exist; continue in tofs */ 1061 recvexist = B_FALSE; 1062 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds); 1063 if (error != 0) 1064 return (error); 1065 } 1066 1067 /* 1068 * Resume of full/newfs recv on existing dataset should be done with 1069 * force flag 1070 */ 1071 if (recvexist && drrb->drr_fromguid == 0 && !drc->drc_force) { 1072 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1073 return (SET_ERROR(ZFS_ERR_RESUME_EXISTS)); 1074 } 1075 1076 /* check that ds is marked inconsistent */ 1077 if (!DS_IS_INCONSISTENT(ds)) { 1078 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1079 return (SET_ERROR(EINVAL)); 1080 } 1081 1082 /* check that there is resuming data, and that the toguid matches */ 1083 if (!dsl_dataset_is_zapified(ds)) { 1084 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1085 return (SET_ERROR(EINVAL)); 1086 } 1087 uint64_t val; 1088 error = zap_lookup(dp->dp_meta_objset, ds->ds_object, 1089 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); 1090 if (error != 0 || drrb->drr_toguid != val) { 1091 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1092 return (SET_ERROR(EINVAL)); 1093 } 1094 1095 /* 1096 * Check if the receive is still running. If so, it will be owned. 1097 * Note that nothing else can own the dataset (e.g. after the receive 1098 * fails) because it will be marked inconsistent. 1099 */ 1100 if (dsl_dataset_has_owner(ds)) { 1101 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1102 return (SET_ERROR(EBUSY)); 1103 } 1104 1105 /* There should not be any snapshots of this fs yet. */ 1106 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { 1107 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1108 return (SET_ERROR(EINVAL)); 1109 } 1110 1111 /* 1112 * Note: resume point will be checked when we process the first WRITE 1113 * record. 1114 */ 1115 1116 /* check that the origin matches */ 1117 val = 0; 1118 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, 1119 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val); 1120 if (drrb->drr_fromguid != val) { 1121 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1122 return (SET_ERROR(EINVAL)); 1123 } 1124 1125 if (ds->ds_prev != NULL && drrb->drr_fromguid != 0) 1126 drc->drc_fromsnapobj = ds->ds_prev->ds_object; 1127 1128 /* 1129 * If we're resuming, and the send is redacted, then the original send 1130 * must have been redacted, and must have been redacted with respect to 1131 * the same snapshots. 1132 */ 1133 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) { 1134 uint64_t num_ds_redact_snaps; 1135 uint64_t *ds_redact_snaps; 1136 1137 uint_t num_stream_redact_snaps; 1138 uint64_t *stream_redact_snaps; 1139 1140 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl, 1141 BEGINNV_REDACT_SNAPS, &stream_redact_snaps, 1142 &num_stream_redact_snaps) != 0) { 1143 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1144 return (SET_ERROR(EINVAL)); 1145 } 1146 1147 if (!dsl_dataset_get_uint64_array_feature(ds, 1148 SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps, 1149 &ds_redact_snaps)) { 1150 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1151 return (SET_ERROR(EINVAL)); 1152 } 1153 1154 for (int i = 0; i < num_ds_redact_snaps; i++) { 1155 if (!redact_snaps_contains(ds_redact_snaps, 1156 num_ds_redact_snaps, stream_redact_snaps[i])) { 1157 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1158 return (SET_ERROR(EINVAL)); 1159 } 1160 } 1161 } 1162 1163 error = recv_check_large_blocks(ds, drc->drc_featureflags); 1164 if (error != 0) { 1165 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1166 return (error); 1167 } 1168 1169 dsl_dataset_rele_flags(ds, dsflags, FTAG); 1170 return (0); 1171 } 1172 1173 static void 1174 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) 1175 { 1176 dmu_recv_begin_arg_t *drba = arg; 1177 dsl_pool_t *dp = dmu_tx_pool(tx); 1178 const char *tofs = drba->drba_cookie->drc_tofs; 1179 uint64_t featureflags = drba->drba_cookie->drc_featureflags; 1180 dsl_dataset_t *ds; 1181 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE; 1182 /* 6 extra bytes for /%recv */ 1183 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1184 1185 (void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs, 1186 recv_clone_name); 1187 1188 if (featureflags & DMU_BACKUP_FEATURE_RAW) { 1189 drba->drba_cookie->drc_raw = B_TRUE; 1190 } else { 1191 dsflags |= DS_HOLD_FLAG_DECRYPT; 1192 } 1193 1194 if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds) 1195 != 0) { 1196 /* %recv does not exist; continue in tofs */ 1197 VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag, 1198 &ds)); 1199 drba->drba_cookie->drc_newfs = B_TRUE; 1200 } 1201 1202 ASSERT(DS_IS_INCONSISTENT(ds)); 1203 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 1204 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) || 1205 drba->drba_cookie->drc_raw); 1206 rrw_exit(&ds->ds_bp_rwlock, FTAG); 1207 1208 drba->drba_cookie->drc_ds = ds; 1209 VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os)); 1210 drba->drba_cookie->drc_should_save = B_TRUE; 1211 1212 spa_history_log_internal_ds(ds, "resume receive", tx, " "); 1213 } 1214 1215 /* 1216 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 1217 * succeeds; otherwise we will leak the holds on the datasets. 1218 */ 1219 int 1220 dmu_recv_begin(const char *tofs, const char *tosnap, 1221 dmu_replay_record_t *drr_begin, boolean_t force, boolean_t heal, 1222 boolean_t resumable, nvlist_t *localprops, nvlist_t *hidden_args, 1223 const char *origin, dmu_recv_cookie_t *drc, zfs_file_t *fp, 1224 offset_t *voffp) 1225 { 1226 dmu_recv_begin_arg_t drba = { 0 }; 1227 int err = 0; 1228 1229 memset(drc, 0, sizeof (dmu_recv_cookie_t)); 1230 drc->drc_drr_begin = drr_begin; 1231 drc->drc_drrb = &drr_begin->drr_u.drr_begin; 1232 drc->drc_tosnap = tosnap; 1233 drc->drc_tofs = tofs; 1234 drc->drc_force = force; 1235 drc->drc_heal = heal; 1236 drc->drc_resumable = resumable; 1237 drc->drc_cred = CRED(); 1238 drc->drc_proc = curproc; 1239 drc->drc_clone = (origin != NULL); 1240 1241 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { 1242 drc->drc_byteswap = B_TRUE; 1243 (void) fletcher_4_incremental_byteswap(drr_begin, 1244 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1245 byteswap_record(drr_begin); 1246 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { 1247 (void) fletcher_4_incremental_native(drr_begin, 1248 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1249 } else { 1250 return (SET_ERROR(EINVAL)); 1251 } 1252 1253 drc->drc_fp = fp; 1254 drc->drc_voff = *voffp; 1255 drc->drc_featureflags = 1256 DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 1257 1258 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen; 1259 1260 /* 1261 * Since OpenZFS 2.0.0, we have enforced a 64MB limit in userspace 1262 * configurable via ZFS_SENDRECV_MAX_NVLIST. We enforce 256MB as a hard 1263 * upper limit. Systems with less than 1GB of RAM will see a lower 1264 * limit from `arc_all_memory() / 4`. 1265 */ 1266 if (payloadlen > (MIN((1U << 28), arc_all_memory() / 4))) 1267 return (E2BIG); 1268 1269 1270 if (payloadlen != 0) { 1271 void *payload = vmem_alloc(payloadlen, KM_SLEEP); 1272 /* 1273 * For compatibility with recursive send streams, we don't do 1274 * this here if the stream could be part of a package. Instead, 1275 * we'll do it in dmu_recv_stream. If we pull the next header 1276 * too early, and it's the END record, we break the `recv_skip` 1277 * logic. 1278 */ 1279 1280 err = receive_read_payload_and_next_header(drc, payloadlen, 1281 payload); 1282 if (err != 0) { 1283 vmem_free(payload, payloadlen); 1284 return (err); 1285 } 1286 err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl, 1287 KM_SLEEP); 1288 vmem_free(payload, payloadlen); 1289 if (err != 0) { 1290 kmem_free(drc->drc_next_rrd, 1291 sizeof (*drc->drc_next_rrd)); 1292 return (err); 1293 } 1294 } 1295 1296 if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK) 1297 drc->drc_spill = B_TRUE; 1298 1299 drba.drba_origin = origin; 1300 drba.drba_cookie = drc; 1301 drba.drba_cred = CRED(); 1302 drba.drba_proc = curproc; 1303 1304 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) { 1305 err = dsl_sync_task(tofs, 1306 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync, 1307 &drba, 5, ZFS_SPACE_CHECK_NORMAL); 1308 } else { 1309 /* 1310 * For non-raw, non-incremental, non-resuming receives the 1311 * user can specify encryption parameters on the command line 1312 * with "zfs recv -o". For these receives we create a dcp and 1313 * pass it to the sync task. Creating the dcp will implicitly 1314 * remove the encryption params from the localprops nvlist, 1315 * which avoids errors when trying to set these normally 1316 * read-only properties. Any other kind of receive that 1317 * attempts to set these properties will fail as a result. 1318 */ 1319 if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & 1320 DMU_BACKUP_FEATURE_RAW) == 0 && 1321 origin == NULL && drc->drc_drrb->drr_fromguid == 0) { 1322 err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, 1323 localprops, hidden_args, &drba.drba_dcp); 1324 } 1325 1326 if (err == 0) { 1327 err = dsl_sync_task(tofs, 1328 dmu_recv_begin_check, dmu_recv_begin_sync, 1329 &drba, 5, ZFS_SPACE_CHECK_NORMAL); 1330 dsl_crypto_params_free(drba.drba_dcp, !!err); 1331 } 1332 } 1333 1334 if (err != 0) { 1335 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); 1336 nvlist_free(drc->drc_begin_nvl); 1337 } 1338 return (err); 1339 } 1340 1341 /* 1342 * Holds data need for corrective recv callback 1343 */ 1344 typedef struct cr_cb_data { 1345 uint64_t size; 1346 zbookmark_phys_t zb; 1347 spa_t *spa; 1348 } cr_cb_data_t; 1349 1350 static void 1351 corrective_read_done(zio_t *zio) 1352 { 1353 cr_cb_data_t *data = zio->io_private; 1354 /* Corruption corrected; update error log if needed */ 1355 if (zio->io_error == 0) { 1356 spa_remove_error(data->spa, &data->zb, 1357 BP_GET_LOGICAL_BIRTH(zio->io_bp)); 1358 } 1359 kmem_free(data, sizeof (cr_cb_data_t)); 1360 abd_free(zio->io_abd); 1361 } 1362 1363 /* 1364 * zio_rewrite the data pointed to by bp with the data from the rrd's abd. 1365 */ 1366 static int 1367 do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw, 1368 struct receive_record_arg *rrd, blkptr_t *bp) 1369 { 1370 int err; 1371 zio_t *io; 1372 zbookmark_phys_t zb; 1373 dnode_t *dn; 1374 abd_t *abd = rrd->abd; 1375 zio_cksum_t bp_cksum = bp->blk_cksum; 1376 zio_flag_t flags = ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_RETRY | 1377 ZIO_FLAG_CANFAIL; 1378 1379 if (rwa->raw) 1380 flags |= ZIO_FLAG_RAW; 1381 1382 err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn); 1383 if (err != 0) 1384 return (err); 1385 SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0, 1386 dbuf_whichblock(dn, 0, drrw->drr_offset)); 1387 dnode_rele(dn, FTAG); 1388 1389 if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) { 1390 /* Decompress the stream data */ 1391 abd_t *dabd = abd_alloc_linear( 1392 drrw->drr_logical_size, B_FALSE); 1393 err = zio_decompress_data(drrw->drr_compressiontype, 1394 abd, abd_to_buf(dabd), abd_get_size(abd), 1395 abd_get_size(dabd), NULL); 1396 1397 if (err != 0) { 1398 abd_free(dabd); 1399 return (err); 1400 } 1401 /* Swap in the newly decompressed data into the abd */ 1402 abd_free(abd); 1403 abd = dabd; 1404 } 1405 1406 if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 1407 /* Recompress the data */ 1408 abd_t *cabd = abd_alloc_linear(BP_GET_PSIZE(bp), 1409 B_FALSE); 1410 void *buf = abd_to_buf(cabd); 1411 uint64_t csize = zio_compress_data(BP_GET_COMPRESS(bp), 1412 abd, &buf, abd_get_size(abd), 1413 rwa->os->os_complevel); 1414 abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize); 1415 /* Swap in newly compressed data into the abd */ 1416 abd_free(abd); 1417 abd = cabd; 1418 flags |= ZIO_FLAG_RAW_COMPRESS; 1419 } 1420 1421 /* 1422 * The stream is not encrypted but the data on-disk is. 1423 * We need to re-encrypt the buf using the same 1424 * encryption type, salt, iv, and mac that was used to encrypt 1425 * the block previosly. 1426 */ 1427 if (!rwa->raw && BP_USES_CRYPT(bp)) { 1428 dsl_dataset_t *ds; 1429 dsl_crypto_key_t *dck = NULL; 1430 uint8_t salt[ZIO_DATA_SALT_LEN]; 1431 uint8_t iv[ZIO_DATA_IV_LEN]; 1432 uint8_t mac[ZIO_DATA_MAC_LEN]; 1433 boolean_t no_crypt = B_FALSE; 1434 dsl_pool_t *dp = dmu_objset_pool(rwa->os); 1435 abd_t *eabd = abd_alloc_linear(BP_GET_PSIZE(bp), B_FALSE); 1436 1437 zio_crypt_decode_params_bp(bp, salt, iv); 1438 zio_crypt_decode_mac_bp(bp, mac); 1439 1440 dsl_pool_config_enter(dp, FTAG); 1441 err = dsl_dataset_hold_flags(dp, rwa->tofs, 1442 DS_HOLD_FLAG_DECRYPT, FTAG, &ds); 1443 if (err != 0) { 1444 dsl_pool_config_exit(dp, FTAG); 1445 abd_free(eabd); 1446 return (SET_ERROR(EACCES)); 1447 } 1448 1449 /* Look up the key from the spa's keystore */ 1450 err = spa_keystore_lookup_key(rwa->os->os_spa, 1451 zb.zb_objset, FTAG, &dck); 1452 if (err != 0) { 1453 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, 1454 FTAG); 1455 dsl_pool_config_exit(dp, FTAG); 1456 abd_free(eabd); 1457 return (SET_ERROR(EACCES)); 1458 } 1459 1460 err = zio_do_crypt_abd(B_TRUE, &dck->dck_key, 1461 BP_GET_TYPE(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, 1462 mac, abd_get_size(abd), abd, eabd, &no_crypt); 1463 1464 spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG); 1465 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 1466 dsl_pool_config_exit(dp, FTAG); 1467 1468 ASSERT0(no_crypt); 1469 if (err != 0) { 1470 abd_free(eabd); 1471 return (err); 1472 } 1473 /* Swap in the newly encrypted data into the abd */ 1474 abd_free(abd); 1475 abd = eabd; 1476 1477 /* 1478 * We want to prevent zio_rewrite() from trying to 1479 * encrypt the data again 1480 */ 1481 flags |= ZIO_FLAG_RAW_ENCRYPT; 1482 } 1483 rrd->abd = abd; 1484 1485 io = zio_rewrite(NULL, rwa->os->os_spa, BP_GET_LOGICAL_BIRTH(bp), bp, 1486 abd, BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, 1487 &zb); 1488 1489 ASSERT(abd_get_size(abd) == BP_GET_LSIZE(bp) || 1490 abd_get_size(abd) == BP_GET_PSIZE(bp)); 1491 1492 /* compute new bp checksum value and make sure it matches the old one */ 1493 zio_checksum_compute(io, BP_GET_CHECKSUM(bp), abd, abd_get_size(abd)); 1494 if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) { 1495 zio_destroy(io); 1496 if (zfs_recv_best_effort_corrective != 0) 1497 return (0); 1498 return (SET_ERROR(ECKSUM)); 1499 } 1500 1501 /* Correct the corruption in place */ 1502 err = zio_wait(io); 1503 if (err == 0) { 1504 cr_cb_data_t *cb_data = 1505 kmem_alloc(sizeof (cr_cb_data_t), KM_SLEEP); 1506 cb_data->spa = rwa->os->os_spa; 1507 cb_data->size = drrw->drr_logical_size; 1508 cb_data->zb = zb; 1509 /* Test if healing worked by re-reading the bp */ 1510 err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp, 1511 abd_alloc_for_io(drrw->drr_logical_size, B_FALSE), 1512 drrw->drr_logical_size, corrective_read_done, 1513 cb_data, ZIO_PRIORITY_ASYNC_READ, flags, NULL)); 1514 } 1515 if (err != 0 && zfs_recv_best_effort_corrective != 0) 1516 err = 0; 1517 1518 return (err); 1519 } 1520 1521 static int 1522 receive_read(dmu_recv_cookie_t *drc, int len, void *buf) 1523 { 1524 int done = 0; 1525 1526 /* 1527 * The code doesn't rely on this (lengths being multiples of 8). See 1528 * comment in dump_bytes. 1529 */ 1530 ASSERT(len % 8 == 0 || 1531 (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0); 1532 1533 while (done < len) { 1534 ssize_t resid = len - done; 1535 zfs_file_t *fp = drc->drc_fp; 1536 int err = zfs_file_read(fp, (char *)buf + done, 1537 len - done, &resid); 1538 if (err == 0 && resid == len - done) { 1539 /* 1540 * Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates 1541 * that the receive was interrupted and can 1542 * potentially be resumed. 1543 */ 1544 err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED); 1545 } 1546 drc->drc_voff += len - done - resid; 1547 done = len - resid; 1548 if (err != 0) 1549 return (err); 1550 } 1551 1552 drc->drc_bytes_read += len; 1553 1554 ASSERT3U(done, ==, len); 1555 return (0); 1556 } 1557 1558 static inline uint8_t 1559 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size) 1560 { 1561 if (bonus_type == DMU_OT_SA) { 1562 return (1); 1563 } else { 1564 return (1 + 1565 ((DN_OLD_MAX_BONUSLEN - 1566 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT)); 1567 } 1568 } 1569 1570 static void 1571 save_resume_state(struct receive_writer_arg *rwa, 1572 uint64_t object, uint64_t offset, dmu_tx_t *tx) 1573 { 1574 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1575 1576 if (!rwa->resumable) 1577 return; 1578 1579 /* 1580 * We use ds_resume_bytes[] != 0 to indicate that we need to 1581 * update this on disk, so it must not be 0. 1582 */ 1583 ASSERT(rwa->bytes_read != 0); 1584 1585 /* 1586 * We only resume from write records, which have a valid 1587 * (non-meta-dnode) object number. 1588 */ 1589 ASSERT(object != 0); 1590 1591 /* 1592 * For resuming to work correctly, we must receive records in order, 1593 * sorted by object,offset. This is checked by the callers, but 1594 * assert it here for good measure. 1595 */ 1596 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); 1597 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || 1598 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); 1599 ASSERT3U(rwa->bytes_read, >=, 1600 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); 1601 1602 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; 1603 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; 1604 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; 1605 } 1606 1607 static int 1608 receive_object_is_same_generation(objset_t *os, uint64_t object, 1609 dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type, 1610 const void *new_bonus, boolean_t *samegenp) 1611 { 1612 zfs_file_info_t zoi; 1613 int err; 1614 1615 dmu_buf_t *old_bonus_dbuf; 1616 err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf); 1617 if (err != 0) 1618 return (err); 1619 err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data, 1620 &zoi); 1621 dmu_buf_rele(old_bonus_dbuf, FTAG); 1622 if (err != 0) 1623 return (err); 1624 uint64_t old_gen = zoi.zfi_generation; 1625 1626 err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi); 1627 if (err != 0) 1628 return (err); 1629 uint64_t new_gen = zoi.zfi_generation; 1630 1631 *samegenp = (old_gen == new_gen); 1632 return (0); 1633 } 1634 1635 static int 1636 receive_handle_existing_object(const struct receive_writer_arg *rwa, 1637 const struct drr_object *drro, const dmu_object_info_t *doi, 1638 const void *bonus_data, 1639 uint64_t *object_to_hold, uint32_t *new_blksz) 1640 { 1641 uint32_t indblksz = drro->drr_indblkshift ? 1642 1ULL << drro->drr_indblkshift : 0; 1643 int nblkptr = deduce_nblkptr(drro->drr_bonustype, 1644 drro->drr_bonuslen); 1645 uint8_t dn_slots = drro->drr_dn_slots != 0 ? 1646 drro->drr_dn_slots : DNODE_MIN_SLOTS; 1647 boolean_t do_free_range = B_FALSE; 1648 int err; 1649 1650 *object_to_hold = drro->drr_object; 1651 1652 /* nblkptr should be bounded by the bonus size and type */ 1653 if (rwa->raw && nblkptr != drro->drr_nblkptr) 1654 return (SET_ERROR(EINVAL)); 1655 1656 /* 1657 * After the previous send stream, the sending system may 1658 * have freed this object, and then happened to re-allocate 1659 * this object number in a later txg. In this case, we are 1660 * receiving a different logical file, and the block size may 1661 * appear to be different. i.e. we may have a different 1662 * block size for this object than what the send stream says. 1663 * In this case we need to remove the object's contents, 1664 * so that its structure can be changed and then its contents 1665 * entirely replaced by subsequent WRITE records. 1666 * 1667 * If this is a -L (--large-block) incremental stream, and 1668 * the previous stream was not -L, the block size may appear 1669 * to increase. i.e. we may have a smaller block size for 1670 * this object than what the send stream says. In this case 1671 * we need to keep the object's contents and block size 1672 * intact, so that we don't lose parts of the object's 1673 * contents that are not changed by this incremental send 1674 * stream. 1675 * 1676 * We can distinguish between the two above cases by using 1677 * the ZPL's generation number (see 1678 * receive_object_is_same_generation()). However, we only 1679 * want to rely on the generation number when absolutely 1680 * necessary, because with raw receives, the generation is 1681 * encrypted. We also want to minimize dependence on the 1682 * ZPL, so that other types of datasets can also be received 1683 * (e.g. ZVOLs, although note that ZVOLS currently do not 1684 * reallocate their objects or change their structure). 1685 * Therefore, we check a number of different cases where we 1686 * know it is safe to discard the object's contents, before 1687 * using the ZPL's generation number to make the above 1688 * distinction. 1689 */ 1690 if (drro->drr_blksz != doi->doi_data_block_size) { 1691 if (rwa->raw) { 1692 /* 1693 * RAW streams always have large blocks, so 1694 * we are sure that the data is not needed 1695 * due to changing --large-block to be on. 1696 * Which is fortunate since the bonus buffer 1697 * (which contains the ZPL generation) is 1698 * encrypted, and the key might not be 1699 * loaded. 1700 */ 1701 do_free_range = B_TRUE; 1702 } else if (rwa->full) { 1703 /* 1704 * This is a full send stream, so it always 1705 * replaces what we have. Even if the 1706 * generation numbers happen to match, this 1707 * can not actually be the same logical file. 1708 * This is relevant when receiving a full 1709 * send as a clone. 1710 */ 1711 do_free_range = B_TRUE; 1712 } else if (drro->drr_type != 1713 DMU_OT_PLAIN_FILE_CONTENTS || 1714 doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) { 1715 /* 1716 * PLAIN_FILE_CONTENTS are the only type of 1717 * objects that have ever been stored with 1718 * large blocks, so we don't need the special 1719 * logic below. ZAP blocks can shrink (when 1720 * there's only one block), so we don't want 1721 * to hit the error below about block size 1722 * only increasing. 1723 */ 1724 do_free_range = B_TRUE; 1725 } else if (doi->doi_max_offset <= 1726 doi->doi_data_block_size) { 1727 /* 1728 * There is only one block. We can free it, 1729 * because its contents will be replaced by a 1730 * WRITE record. This can not be the no-L -> 1731 * -L case, because the no-L case would have 1732 * resulted in multiple blocks. If we 1733 * supported -L -> no-L, it would not be safe 1734 * to free the file's contents. Fortunately, 1735 * that is not allowed (see 1736 * recv_check_large_blocks()). 1737 */ 1738 do_free_range = B_TRUE; 1739 } else { 1740 boolean_t is_same_gen; 1741 err = receive_object_is_same_generation(rwa->os, 1742 drro->drr_object, doi->doi_bonus_type, 1743 drro->drr_bonustype, bonus_data, &is_same_gen); 1744 if (err != 0) 1745 return (SET_ERROR(EINVAL)); 1746 1747 if (is_same_gen) { 1748 /* 1749 * This is the same logical file, and 1750 * the block size must be increasing. 1751 * It could only decrease if 1752 * --large-block was changed to be 1753 * off, which is checked in 1754 * recv_check_large_blocks(). 1755 */ 1756 if (drro->drr_blksz <= 1757 doi->doi_data_block_size) 1758 return (SET_ERROR(EINVAL)); 1759 /* 1760 * We keep the existing blocksize and 1761 * contents. 1762 */ 1763 *new_blksz = 1764 doi->doi_data_block_size; 1765 } else { 1766 do_free_range = B_TRUE; 1767 } 1768 } 1769 } 1770 1771 /* nblkptr can only decrease if the object was reallocated */ 1772 if (nblkptr < doi->doi_nblkptr) 1773 do_free_range = B_TRUE; 1774 1775 /* number of slots can only change on reallocation */ 1776 if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) 1777 do_free_range = B_TRUE; 1778 1779 /* 1780 * For raw sends we also check a few other fields to 1781 * ensure we are preserving the objset structure exactly 1782 * as it was on the receive side: 1783 * - A changed indirect block size 1784 * - A smaller nlevels 1785 */ 1786 if (rwa->raw) { 1787 if (indblksz != doi->doi_metadata_block_size) 1788 do_free_range = B_TRUE; 1789 if (drro->drr_nlevels < doi->doi_indirection) 1790 do_free_range = B_TRUE; 1791 } 1792 1793 if (do_free_range) { 1794 err = dmu_free_long_range(rwa->os, drro->drr_object, 1795 0, DMU_OBJECT_END); 1796 if (err != 0) 1797 return (SET_ERROR(EINVAL)); 1798 } 1799 1800 /* 1801 * The dmu does not currently support decreasing nlevels or changing 1802 * indirect block size if there is already one, same as changing the 1803 * number of of dnode slots on an object. For non-raw sends this 1804 * does not matter and the new object can just use the previous one's 1805 * parameters. For raw sends, however, the structure of the received 1806 * dnode (including indirects and dnode slots) must match that of the 1807 * send side. Therefore, instead of using dmu_object_reclaim(), we 1808 * must free the object completely and call dmu_object_claim_dnsize() 1809 * instead. 1810 */ 1811 if ((rwa->raw && ((doi->doi_indirection > 1 && 1812 indblksz != doi->doi_metadata_block_size) || 1813 drro->drr_nlevels < doi->doi_indirection)) || 1814 dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) { 1815 err = dmu_free_long_object(rwa->os, drro->drr_object); 1816 if (err != 0) 1817 return (SET_ERROR(EINVAL)); 1818 1819 txg_wait_synced(dmu_objset_pool(rwa->os), 0); 1820 *object_to_hold = DMU_NEW_OBJECT; 1821 } 1822 1823 /* 1824 * For raw receives, free everything beyond the new incoming 1825 * maxblkid. Normally this would be done with a DRR_FREE 1826 * record that would come after this DRR_OBJECT record is 1827 * processed. However, for raw receives we manually set the 1828 * maxblkid from the drr_maxblkid and so we must first free 1829 * everything above that blkid to ensure the DMU is always 1830 * consistent with itself. We will never free the first block 1831 * of the object here because a maxblkid of 0 could indicate 1832 * an object with a single block or one with no blocks. This 1833 * free may be skipped when dmu_free_long_range() was called 1834 * above since it covers the entire object's contents. 1835 */ 1836 if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) { 1837 err = dmu_free_long_range(rwa->os, drro->drr_object, 1838 (drro->drr_maxblkid + 1) * doi->doi_data_block_size, 1839 DMU_OBJECT_END); 1840 if (err != 0) 1841 return (SET_ERROR(EINVAL)); 1842 } 1843 return (0); 1844 } 1845 1846 noinline static int 1847 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, 1848 void *data) 1849 { 1850 dmu_object_info_t doi; 1851 dmu_tx_t *tx; 1852 int err; 1853 uint32_t new_blksz = drro->drr_blksz; 1854 uint8_t dn_slots = drro->drr_dn_slots != 0 ? 1855 drro->drr_dn_slots : DNODE_MIN_SLOTS; 1856 1857 if (drro->drr_type == DMU_OT_NONE || 1858 !DMU_OT_IS_VALID(drro->drr_type) || 1859 !DMU_OT_IS_VALID(drro->drr_bonustype) || 1860 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 1861 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 1862 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 1863 drro->drr_blksz < SPA_MINBLOCKSIZE || 1864 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || 1865 drro->drr_bonuslen > 1866 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) || 1867 dn_slots > 1868 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) { 1869 return (SET_ERROR(EINVAL)); 1870 } 1871 1872 if (rwa->raw) { 1873 /* 1874 * We should have received a DRR_OBJECT_RANGE record 1875 * containing this block and stored it in rwa. 1876 */ 1877 if (drro->drr_object < rwa->or_firstobj || 1878 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots || 1879 drro->drr_raw_bonuslen < drro->drr_bonuslen || 1880 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT || 1881 drro->drr_nlevels > DN_MAX_LEVELS || 1882 drro->drr_nblkptr > DN_MAX_NBLKPTR || 1883 DN_SLOTS_TO_BONUSLEN(dn_slots) < 1884 drro->drr_raw_bonuslen) 1885 return (SET_ERROR(EINVAL)); 1886 } else { 1887 /* 1888 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN 1889 * record indicates this by setting DRR_FLAG_SPILL_BLOCK. 1890 */ 1891 if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) || 1892 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) { 1893 return (SET_ERROR(EINVAL)); 1894 } 1895 1896 if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 || 1897 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) { 1898 return (SET_ERROR(EINVAL)); 1899 } 1900 } 1901 1902 err = dmu_object_info(rwa->os, drro->drr_object, &doi); 1903 1904 if (err != 0 && err != ENOENT && err != EEXIST) 1905 return (SET_ERROR(EINVAL)); 1906 1907 if (drro->drr_object > rwa->max_object) 1908 rwa->max_object = drro->drr_object; 1909 1910 /* 1911 * If we are losing blkptrs or changing the block size this must 1912 * be a new file instance. We must clear out the previous file 1913 * contents before we can change this type of metadata in the dnode. 1914 * Raw receives will also check that the indirect structure of the 1915 * dnode hasn't changed. 1916 */ 1917 uint64_t object_to_hold; 1918 if (err == 0) { 1919 err = receive_handle_existing_object(rwa, drro, &doi, data, 1920 &object_to_hold, &new_blksz); 1921 if (err != 0) 1922 return (err); 1923 } else if (err == EEXIST) { 1924 /* 1925 * The object requested is currently an interior slot of a 1926 * multi-slot dnode. This will be resolved when the next txg 1927 * is synced out, since the send stream will have told us 1928 * to free this slot when we freed the associated dnode 1929 * earlier in the stream. 1930 */ 1931 txg_wait_synced(dmu_objset_pool(rwa->os), 0); 1932 1933 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT) 1934 return (SET_ERROR(EINVAL)); 1935 1936 /* object was freed and we are about to allocate a new one */ 1937 object_to_hold = DMU_NEW_OBJECT; 1938 } else { 1939 /* 1940 * If the only record in this range so far was DRR_FREEOBJECTS 1941 * with at least one actually freed object, it's possible that 1942 * the block will now be converted to a hole. We need to wait 1943 * for the txg to sync to prevent races. 1944 */ 1945 if (rwa->or_need_sync == ORNS_YES) 1946 txg_wait_synced(dmu_objset_pool(rwa->os), 0); 1947 1948 /* object is free and we are about to allocate a new one */ 1949 object_to_hold = DMU_NEW_OBJECT; 1950 } 1951 1952 /* Only relevant for the first object in the range */ 1953 rwa->or_need_sync = ORNS_NO; 1954 1955 /* 1956 * If this is a multi-slot dnode there is a chance that this 1957 * object will expand into a slot that is already used by 1958 * another object from the previous snapshot. We must free 1959 * these objects before we attempt to allocate the new dnode. 1960 */ 1961 if (dn_slots > 1) { 1962 boolean_t need_sync = B_FALSE; 1963 1964 for (uint64_t slot = drro->drr_object + 1; 1965 slot < drro->drr_object + dn_slots; 1966 slot++) { 1967 dmu_object_info_t slot_doi; 1968 1969 err = dmu_object_info(rwa->os, slot, &slot_doi); 1970 if (err == ENOENT || err == EEXIST) 1971 continue; 1972 else if (err != 0) 1973 return (err); 1974 1975 err = dmu_free_long_object(rwa->os, slot); 1976 if (err != 0) 1977 return (err); 1978 1979 need_sync = B_TRUE; 1980 } 1981 1982 if (need_sync) 1983 txg_wait_synced(dmu_objset_pool(rwa->os), 0); 1984 } 1985 1986 tx = dmu_tx_create(rwa->os); 1987 dmu_tx_hold_bonus(tx, object_to_hold); 1988 dmu_tx_hold_write(tx, object_to_hold, 0, 0); 1989 err = dmu_tx_assign(tx, TXG_WAIT); 1990 if (err != 0) { 1991 dmu_tx_abort(tx); 1992 return (err); 1993 } 1994 1995 if (object_to_hold == DMU_NEW_OBJECT) { 1996 /* Currently free, wants to be allocated */ 1997 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object, 1998 drro->drr_type, new_blksz, 1999 drro->drr_bonustype, drro->drr_bonuslen, 2000 dn_slots << DNODE_SHIFT, tx); 2001 } else if (drro->drr_type != doi.doi_type || 2002 new_blksz != doi.doi_data_block_size || 2003 drro->drr_bonustype != doi.doi_bonus_type || 2004 drro->drr_bonuslen != doi.doi_bonus_size) { 2005 /* Currently allocated, but with different properties */ 2006 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object, 2007 drro->drr_type, new_blksz, 2008 drro->drr_bonustype, drro->drr_bonuslen, 2009 dn_slots << DNODE_SHIFT, rwa->spill ? 2010 DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx); 2011 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) { 2012 /* 2013 * Currently allocated, the existing version of this object 2014 * may reference a spill block that is no longer allocated 2015 * at the source and needs to be freed. 2016 */ 2017 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx); 2018 } 2019 2020 if (err != 0) { 2021 dmu_tx_commit(tx); 2022 return (SET_ERROR(EINVAL)); 2023 } 2024 2025 if (rwa->or_crypt_params_present) { 2026 /* 2027 * Set the crypt params for the buffer associated with this 2028 * range of dnodes. This causes the blkptr_t to have the 2029 * same crypt params (byteorder, salt, iv, mac) as on the 2030 * sending side. 2031 * 2032 * Since we are committing this tx now, it is possible for 2033 * the dnode block to end up on-disk with the incorrect MAC, 2034 * if subsequent objects in this block are received in a 2035 * different txg. However, since the dataset is marked as 2036 * inconsistent, no code paths will do a non-raw read (or 2037 * decrypt the block / verify the MAC). The receive code and 2038 * scrub code can safely do raw reads and verify the 2039 * checksum. They don't need to verify the MAC. 2040 */ 2041 dmu_buf_t *db = NULL; 2042 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE; 2043 2044 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os), 2045 offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT); 2046 if (err != 0) { 2047 dmu_tx_commit(tx); 2048 return (SET_ERROR(EINVAL)); 2049 } 2050 2051 dmu_buf_set_crypt_params(db, rwa->or_byteorder, 2052 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx); 2053 2054 dmu_buf_rele(db, FTAG); 2055 2056 rwa->or_crypt_params_present = B_FALSE; 2057 } 2058 2059 dmu_object_set_checksum(rwa->os, drro->drr_object, 2060 drro->drr_checksumtype, tx); 2061 dmu_object_set_compress(rwa->os, drro->drr_object, 2062 drro->drr_compress, tx); 2063 2064 /* handle more restrictive dnode structuring for raw recvs */ 2065 if (rwa->raw) { 2066 /* 2067 * Set the indirect block size, block shift, nlevels. 2068 * This will not fail because we ensured all of the 2069 * blocks were freed earlier if this is a new object. 2070 * For non-new objects block size and indirect block 2071 * shift cannot change and nlevels can only increase. 2072 */ 2073 ASSERT3U(new_blksz, ==, drro->drr_blksz); 2074 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object, 2075 drro->drr_blksz, drro->drr_indblkshift, tx)); 2076 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object, 2077 drro->drr_nlevels, tx)); 2078 2079 /* 2080 * Set the maxblkid. This will always succeed because 2081 * we freed all blocks beyond the new maxblkid above. 2082 */ 2083 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object, 2084 drro->drr_maxblkid, tx)); 2085 } 2086 2087 if (data != NULL) { 2088 dmu_buf_t *db; 2089 dnode_t *dn; 2090 uint32_t flags = DMU_READ_NO_PREFETCH; 2091 2092 if (rwa->raw) 2093 flags |= DMU_READ_NO_DECRYPT; 2094 2095 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn)); 2096 VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags)); 2097 2098 dmu_buf_will_dirty(db, tx); 2099 2100 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 2101 memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro)); 2102 2103 /* 2104 * Raw bonus buffers have their byteorder determined by the 2105 * DRR_OBJECT_RANGE record. 2106 */ 2107 if (rwa->byteswap && !rwa->raw) { 2108 dmu_object_byteswap_t byteswap = 2109 DMU_OT_BYTESWAP(drro->drr_bonustype); 2110 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 2111 DRR_OBJECT_PAYLOAD_SIZE(drro)); 2112 } 2113 dmu_buf_rele(db, FTAG); 2114 dnode_rele(dn, FTAG); 2115 } 2116 2117 /* 2118 * If the receive fails, we want the resume stream to start with the 2119 * same record that we last successfully received. There is no way to 2120 * request resume from the object record, but we can benefit from the 2121 * fact that sender always sends object record before anything else, 2122 * after which it will "resend" data at offset 0 and resume normally. 2123 */ 2124 save_resume_state(rwa, drro->drr_object, 0, tx); 2125 2126 dmu_tx_commit(tx); 2127 2128 return (0); 2129 } 2130 2131 noinline static int 2132 receive_freeobjects(struct receive_writer_arg *rwa, 2133 struct drr_freeobjects *drrfo) 2134 { 2135 uint64_t obj; 2136 int next_err = 0; 2137 2138 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 2139 return (SET_ERROR(EINVAL)); 2140 2141 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj; 2142 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && 2143 obj < DN_MAX_OBJECT && next_err == 0; 2144 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { 2145 dmu_object_info_t doi; 2146 int err; 2147 2148 err = dmu_object_info(rwa->os, obj, &doi); 2149 if (err == ENOENT) 2150 continue; 2151 else if (err != 0) 2152 return (err); 2153 2154 err = dmu_free_long_object(rwa->os, obj); 2155 2156 if (err != 0) 2157 return (err); 2158 2159 if (rwa->or_need_sync == ORNS_MAYBE) 2160 rwa->or_need_sync = ORNS_YES; 2161 } 2162 if (next_err != ESRCH) 2163 return (next_err); 2164 return (0); 2165 } 2166 2167 /* 2168 * Note: if this fails, the caller will clean up any records left on the 2169 * rwa->write_batch list. 2170 */ 2171 static int 2172 flush_write_batch_impl(struct receive_writer_arg *rwa) 2173 { 2174 dnode_t *dn; 2175 int err; 2176 2177 if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0) 2178 return (SET_ERROR(EINVAL)); 2179 2180 struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch); 2181 struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write; 2182 2183 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch); 2184 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write; 2185 2186 ASSERT3U(rwa->last_object, ==, last_drrw->drr_object); 2187 ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset); 2188 2189 dmu_tx_t *tx = dmu_tx_create(rwa->os); 2190 dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset, 2191 last_drrw->drr_offset - first_drrw->drr_offset + 2192 last_drrw->drr_logical_size); 2193 err = dmu_tx_assign(tx, TXG_WAIT); 2194 if (err != 0) { 2195 dmu_tx_abort(tx); 2196 dnode_rele(dn, FTAG); 2197 return (err); 2198 } 2199 2200 struct receive_record_arg *rrd; 2201 while ((rrd = list_head(&rwa->write_batch)) != NULL) { 2202 struct drr_write *drrw = &rrd->header.drr_u.drr_write; 2203 abd_t *abd = rrd->abd; 2204 2205 ASSERT3U(drrw->drr_object, ==, rwa->last_object); 2206 2207 if (drrw->drr_logical_size != dn->dn_datablksz) { 2208 /* 2209 * The WRITE record is larger than the object's block 2210 * size. We must be receiving an incremental 2211 * large-block stream into a dataset that previously did 2212 * a non-large-block receive. Lightweight writes must 2213 * be exactly one block, so we need to decompress the 2214 * data (if compressed) and do a normal dmu_write(). 2215 */ 2216 ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz); 2217 if (DRR_WRITE_COMPRESSED(drrw)) { 2218 abd_t *decomp_abd = 2219 abd_alloc_linear(drrw->drr_logical_size, 2220 B_FALSE); 2221 2222 err = zio_decompress_data( 2223 drrw->drr_compressiontype, 2224 abd, abd_to_buf(decomp_abd), 2225 abd_get_size(abd), 2226 abd_get_size(decomp_abd), NULL); 2227 2228 if (err == 0) { 2229 dmu_write_by_dnode(dn, 2230 drrw->drr_offset, 2231 drrw->drr_logical_size, 2232 abd_to_buf(decomp_abd), tx); 2233 } 2234 abd_free(decomp_abd); 2235 } else { 2236 dmu_write_by_dnode(dn, 2237 drrw->drr_offset, 2238 drrw->drr_logical_size, 2239 abd_to_buf(abd), tx); 2240 } 2241 if (err == 0) 2242 abd_free(abd); 2243 } else { 2244 zio_prop_t zp = {0}; 2245 dmu_write_policy(rwa->os, dn, 0, 0, &zp); 2246 2247 zio_flag_t zio_flags = 0; 2248 2249 if (rwa->raw) { 2250 zp.zp_encrypt = B_TRUE; 2251 zp.zp_compress = drrw->drr_compressiontype; 2252 zp.zp_byteorder = ZFS_HOST_BYTEORDER ^ 2253 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^ 2254 rwa->byteswap; 2255 memcpy(zp.zp_salt, drrw->drr_salt, 2256 ZIO_DATA_SALT_LEN); 2257 memcpy(zp.zp_iv, drrw->drr_iv, 2258 ZIO_DATA_IV_LEN); 2259 memcpy(zp.zp_mac, drrw->drr_mac, 2260 ZIO_DATA_MAC_LEN); 2261 if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) { 2262 zp.zp_nopwrite = B_FALSE; 2263 zp.zp_copies = MIN(zp.zp_copies, 2264 SPA_DVAS_PER_BP - 1); 2265 } 2266 zio_flags |= ZIO_FLAG_RAW; 2267 } else if (DRR_WRITE_COMPRESSED(drrw)) { 2268 ASSERT3U(drrw->drr_compressed_size, >, 0); 2269 ASSERT3U(drrw->drr_logical_size, >=, 2270 drrw->drr_compressed_size); 2271 zp.zp_compress = drrw->drr_compressiontype; 2272 zio_flags |= ZIO_FLAG_RAW_COMPRESS; 2273 } else if (rwa->byteswap) { 2274 /* 2275 * Note: compressed blocks never need to be 2276 * byteswapped, because WRITE records for 2277 * metadata blocks are never compressed. The 2278 * exception is raw streams, which are written 2279 * in the original byteorder, and the byteorder 2280 * bit is preserved in the BP by setting 2281 * zp_byteorder above. 2282 */ 2283 dmu_object_byteswap_t byteswap = 2284 DMU_OT_BYTESWAP(drrw->drr_type); 2285 dmu_ot_byteswap[byteswap].ob_func( 2286 abd_to_buf(abd), 2287 DRR_WRITE_PAYLOAD_SIZE(drrw)); 2288 } 2289 2290 /* 2291 * Since this data can't be read until the receive 2292 * completes, we can do a "lightweight" write for 2293 * improved performance. 2294 */ 2295 err = dmu_lightweight_write_by_dnode(dn, 2296 drrw->drr_offset, abd, &zp, zio_flags, tx); 2297 } 2298 2299 if (err != 0) { 2300 /* 2301 * This rrd is left on the list, so the caller will 2302 * free it (and the abd). 2303 */ 2304 break; 2305 } 2306 2307 /* 2308 * Note: If the receive fails, we want the resume stream to 2309 * start with the same record that we last successfully 2310 * received (as opposed to the next record), so that we can 2311 * verify that we are resuming from the correct location. 2312 */ 2313 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); 2314 2315 list_remove(&rwa->write_batch, rrd); 2316 kmem_free(rrd, sizeof (*rrd)); 2317 } 2318 2319 dmu_tx_commit(tx); 2320 dnode_rele(dn, FTAG); 2321 return (err); 2322 } 2323 2324 noinline static int 2325 flush_write_batch(struct receive_writer_arg *rwa) 2326 { 2327 if (list_is_empty(&rwa->write_batch)) 2328 return (0); 2329 int err = rwa->err; 2330 if (err == 0) 2331 err = flush_write_batch_impl(rwa); 2332 if (err != 0) { 2333 struct receive_record_arg *rrd; 2334 while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) { 2335 abd_free(rrd->abd); 2336 kmem_free(rrd, sizeof (*rrd)); 2337 } 2338 } 2339 ASSERT(list_is_empty(&rwa->write_batch)); 2340 return (err); 2341 } 2342 2343 noinline static int 2344 receive_process_write_record(struct receive_writer_arg *rwa, 2345 struct receive_record_arg *rrd) 2346 { 2347 int err = 0; 2348 2349 ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE); 2350 struct drr_write *drrw = &rrd->header.drr_u.drr_write; 2351 2352 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset || 2353 !DMU_OT_IS_VALID(drrw->drr_type)) 2354 return (SET_ERROR(EINVAL)); 2355 2356 if (rwa->heal) { 2357 blkptr_t *bp; 2358 dmu_buf_t *dbp; 2359 int flags = DB_RF_CANFAIL; 2360 2361 if (rwa->raw) 2362 flags |= DB_RF_NO_DECRYPT; 2363 2364 if (rwa->byteswap) { 2365 dmu_object_byteswap_t byteswap = 2366 DMU_OT_BYTESWAP(drrw->drr_type); 2367 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd), 2368 DRR_WRITE_PAYLOAD_SIZE(drrw)); 2369 } 2370 2371 err = dmu_buf_hold_noread(rwa->os, drrw->drr_object, 2372 drrw->drr_offset, FTAG, &dbp); 2373 if (err != 0) 2374 return (err); 2375 2376 /* Try to read the object to see if it needs healing */ 2377 err = dbuf_read((dmu_buf_impl_t *)dbp, NULL, flags); 2378 /* 2379 * We only try to heal when dbuf_read() returns a ECKSUMs. 2380 * Other errors (even EIO) get returned to caller. 2381 * EIO indicates that the device is not present/accessible, 2382 * so writing to it will likely fail. 2383 * If the block is healthy, we don't want to overwrite it 2384 * unnecessarily. 2385 */ 2386 if (err != ECKSUM) { 2387 dmu_buf_rele(dbp, FTAG); 2388 return (err); 2389 } 2390 /* Make sure the on-disk block and recv record sizes match */ 2391 if (drrw->drr_logical_size != dbp->db_size) { 2392 err = ENOTSUP; 2393 dmu_buf_rele(dbp, FTAG); 2394 return (err); 2395 } 2396 /* Get the block pointer for the corrupted block */ 2397 bp = dmu_buf_get_blkptr(dbp); 2398 err = do_corrective_recv(rwa, drrw, rrd, bp); 2399 dmu_buf_rele(dbp, FTAG); 2400 return (err); 2401 } 2402 2403 /* 2404 * For resuming to work, records must be in increasing order 2405 * by (object, offset). 2406 */ 2407 if (drrw->drr_object < rwa->last_object || 2408 (drrw->drr_object == rwa->last_object && 2409 drrw->drr_offset < rwa->last_offset)) { 2410 return (SET_ERROR(EINVAL)); 2411 } 2412 2413 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch); 2414 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write; 2415 uint64_t batch_size = 2416 MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2); 2417 if (first_rrd != NULL && 2418 (drrw->drr_object != first_drrw->drr_object || 2419 drrw->drr_offset >= first_drrw->drr_offset + batch_size)) { 2420 err = flush_write_batch(rwa); 2421 if (err != 0) 2422 return (err); 2423 } 2424 2425 rwa->last_object = drrw->drr_object; 2426 rwa->last_offset = drrw->drr_offset; 2427 2428 if (rwa->last_object > rwa->max_object) 2429 rwa->max_object = rwa->last_object; 2430 2431 list_insert_tail(&rwa->write_batch, rrd); 2432 /* 2433 * Return EAGAIN to indicate that we will use this rrd again, 2434 * so the caller should not free it 2435 */ 2436 return (EAGAIN); 2437 } 2438 2439 static int 2440 receive_write_embedded(struct receive_writer_arg *rwa, 2441 struct drr_write_embedded *drrwe, void *data) 2442 { 2443 dmu_tx_t *tx; 2444 int err; 2445 2446 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) 2447 return (SET_ERROR(EINVAL)); 2448 2449 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) 2450 return (SET_ERROR(EINVAL)); 2451 2452 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) 2453 return (SET_ERROR(EINVAL)); 2454 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) 2455 return (SET_ERROR(EINVAL)); 2456 if (rwa->raw) 2457 return (SET_ERROR(EINVAL)); 2458 2459 if (drrwe->drr_object > rwa->max_object) 2460 rwa->max_object = drrwe->drr_object; 2461 2462 tx = dmu_tx_create(rwa->os); 2463 2464 dmu_tx_hold_write(tx, drrwe->drr_object, 2465 drrwe->drr_offset, drrwe->drr_length); 2466 err = dmu_tx_assign(tx, TXG_WAIT); 2467 if (err != 0) { 2468 dmu_tx_abort(tx); 2469 return (err); 2470 } 2471 2472 dmu_write_embedded(rwa->os, drrwe->drr_object, 2473 drrwe->drr_offset, data, drrwe->drr_etype, 2474 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, 2475 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); 2476 2477 /* See comment in restore_write. */ 2478 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); 2479 dmu_tx_commit(tx); 2480 return (0); 2481 } 2482 2483 static int 2484 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs, 2485 abd_t *abd) 2486 { 2487 dmu_buf_t *db, *db_spill; 2488 int err; 2489 2490 if (drrs->drr_length < SPA_MINBLOCKSIZE || 2491 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) 2492 return (SET_ERROR(EINVAL)); 2493 2494 /* 2495 * This is an unmodified spill block which was added to the stream 2496 * to resolve an issue with incorrectly removing spill blocks. It 2497 * should be ignored by current versions of the code which support 2498 * the DRR_FLAG_SPILL_BLOCK flag. 2499 */ 2500 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) { 2501 abd_free(abd); 2502 return (0); 2503 } 2504 2505 if (rwa->raw) { 2506 if (!DMU_OT_IS_VALID(drrs->drr_type) || 2507 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS || 2508 drrs->drr_compressed_size == 0) 2509 return (SET_ERROR(EINVAL)); 2510 } 2511 2512 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) 2513 return (SET_ERROR(EINVAL)); 2514 2515 if (drrs->drr_object > rwa->max_object) 2516 rwa->max_object = drrs->drr_object; 2517 2518 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); 2519 if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG, 2520 &db_spill)) != 0) { 2521 dmu_buf_rele(db, FTAG); 2522 return (err); 2523 } 2524 2525 dmu_tx_t *tx = dmu_tx_create(rwa->os); 2526 2527 dmu_tx_hold_spill(tx, db->db_object); 2528 2529 err = dmu_tx_assign(tx, TXG_WAIT); 2530 if (err != 0) { 2531 dmu_buf_rele(db, FTAG); 2532 dmu_buf_rele(db_spill, FTAG); 2533 dmu_tx_abort(tx); 2534 return (err); 2535 } 2536 2537 /* 2538 * Spill blocks may both grow and shrink. When a change in size 2539 * occurs any existing dbuf must be updated to match the logical 2540 * size of the provided arc_buf_t. 2541 */ 2542 if (db_spill->db_size != drrs->drr_length) { 2543 dmu_buf_will_fill(db_spill, tx, B_FALSE); 2544 VERIFY0(dbuf_spill_set_blksz(db_spill, 2545 drrs->drr_length, tx)); 2546 } 2547 2548 arc_buf_t *abuf; 2549 if (rwa->raw) { 2550 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ 2551 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^ 2552 rwa->byteswap; 2553 2554 abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os), 2555 drrs->drr_object, byteorder, drrs->drr_salt, 2556 drrs->drr_iv, drrs->drr_mac, drrs->drr_type, 2557 drrs->drr_compressed_size, drrs->drr_length, 2558 drrs->drr_compressiontype, 0); 2559 } else { 2560 abuf = arc_loan_buf(dmu_objset_spa(rwa->os), 2561 DMU_OT_IS_METADATA(drrs->drr_type), 2562 drrs->drr_length); 2563 if (rwa->byteswap) { 2564 dmu_object_byteswap_t byteswap = 2565 DMU_OT_BYTESWAP(drrs->drr_type); 2566 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd), 2567 DRR_SPILL_PAYLOAD_SIZE(drrs)); 2568 } 2569 } 2570 2571 memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs)); 2572 abd_free(abd); 2573 dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx); 2574 2575 dmu_buf_rele(db, FTAG); 2576 dmu_buf_rele(db_spill, FTAG); 2577 2578 dmu_tx_commit(tx); 2579 return (0); 2580 } 2581 2582 noinline static int 2583 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf) 2584 { 2585 int err; 2586 2587 if (drrf->drr_length != -1ULL && 2588 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 2589 return (SET_ERROR(EINVAL)); 2590 2591 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) 2592 return (SET_ERROR(EINVAL)); 2593 2594 if (drrf->drr_object > rwa->max_object) 2595 rwa->max_object = drrf->drr_object; 2596 2597 err = dmu_free_long_range(rwa->os, drrf->drr_object, 2598 drrf->drr_offset, drrf->drr_length); 2599 2600 return (err); 2601 } 2602 2603 static int 2604 receive_object_range(struct receive_writer_arg *rwa, 2605 struct drr_object_range *drror) 2606 { 2607 /* 2608 * By default, we assume this block is in our native format 2609 * (ZFS_HOST_BYTEORDER). We then take into account whether 2610 * the send stream is byteswapped (rwa->byteswap). Finally, 2611 * we need to byteswap again if this particular block was 2612 * in non-native format on the send side. 2613 */ 2614 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^ 2615 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags); 2616 2617 /* 2618 * Since dnode block sizes are constant, we should not need to worry 2619 * about making sure that the dnode block size is the same on the 2620 * sending and receiving sides for the time being. For non-raw sends, 2621 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE 2622 * record at all). Raw sends require this record type because the 2623 * encryption parameters are used to protect an entire block of bonus 2624 * buffers. If the size of dnode blocks ever becomes variable, 2625 * handling will need to be added to ensure that dnode block sizes 2626 * match on the sending and receiving side. 2627 */ 2628 if (drror->drr_numslots != DNODES_PER_BLOCK || 2629 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 || 2630 !rwa->raw) 2631 return (SET_ERROR(EINVAL)); 2632 2633 if (drror->drr_firstobj > rwa->max_object) 2634 rwa->max_object = drror->drr_firstobj; 2635 2636 /* 2637 * The DRR_OBJECT_RANGE handling must be deferred to receive_object() 2638 * so that the block of dnodes is not written out when it's empty, 2639 * and converted to a HOLE BP. 2640 */ 2641 rwa->or_crypt_params_present = B_TRUE; 2642 rwa->or_firstobj = drror->drr_firstobj; 2643 rwa->or_numslots = drror->drr_numslots; 2644 memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN); 2645 memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN); 2646 memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN); 2647 rwa->or_byteorder = byteorder; 2648 2649 rwa->or_need_sync = ORNS_MAYBE; 2650 2651 return (0); 2652 } 2653 2654 /* 2655 * Until we have the ability to redact large ranges of data efficiently, we 2656 * process these records as frees. 2657 */ 2658 noinline static int 2659 receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr) 2660 { 2661 struct drr_free drrf = {0}; 2662 drrf.drr_length = drrr->drr_length; 2663 drrf.drr_object = drrr->drr_object; 2664 drrf.drr_offset = drrr->drr_offset; 2665 drrf.drr_toguid = drrr->drr_toguid; 2666 return (receive_free(rwa, &drrf)); 2667 } 2668 2669 /* used to destroy the drc_ds on error */ 2670 static void 2671 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 2672 { 2673 dsl_dataset_t *ds = drc->drc_ds; 2674 ds_hold_flags_t dsflags; 2675 2676 dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; 2677 /* 2678 * Wait for the txg sync before cleaning up the receive. For 2679 * resumable receives, this ensures that our resume state has 2680 * been written out to disk. For raw receives, this ensures 2681 * that the user accounting code will not attempt to do anything 2682 * after we stopped receiving the dataset. 2683 */ 2684 txg_wait_synced(ds->ds_dir->dd_pool, 0); 2685 ds->ds_objset->os_raw_receive = B_FALSE; 2686 2687 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 2688 if (drc->drc_resumable && drc->drc_should_save && 2689 !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) { 2690 rrw_exit(&ds->ds_bp_rwlock, FTAG); 2691 dsl_dataset_disown(ds, dsflags, dmu_recv_tag); 2692 } else { 2693 char name[ZFS_MAX_DATASET_NAME_LEN]; 2694 rrw_exit(&ds->ds_bp_rwlock, FTAG); 2695 dsl_dataset_name(ds, name); 2696 dsl_dataset_disown(ds, dsflags, dmu_recv_tag); 2697 if (!drc->drc_heal) 2698 (void) dsl_destroy_head(name); 2699 } 2700 } 2701 2702 static void 2703 receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf) 2704 { 2705 if (drc->drc_byteswap) { 2706 (void) fletcher_4_incremental_byteswap(buf, len, 2707 &drc->drc_cksum); 2708 } else { 2709 (void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum); 2710 } 2711 } 2712 2713 /* 2714 * Read the payload into a buffer of size len, and update the current record's 2715 * payload field. 2716 * Allocate drc->drc_next_rrd and read the next record's header into 2717 * drc->drc_next_rrd->header. 2718 * Verify checksum of payload and next record. 2719 */ 2720 static int 2721 receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf) 2722 { 2723 int err; 2724 2725 if (len != 0) { 2726 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE); 2727 err = receive_read(drc, len, buf); 2728 if (err != 0) 2729 return (err); 2730 receive_cksum(drc, len, buf); 2731 2732 /* note: rrd is NULL when reading the begin record's payload */ 2733 if (drc->drc_rrd != NULL) { 2734 drc->drc_rrd->payload = buf; 2735 drc->drc_rrd->payload_size = len; 2736 drc->drc_rrd->bytes_read = drc->drc_bytes_read; 2737 } 2738 } else { 2739 ASSERT3P(buf, ==, NULL); 2740 } 2741 2742 drc->drc_prev_cksum = drc->drc_cksum; 2743 2744 drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP); 2745 err = receive_read(drc, sizeof (drc->drc_next_rrd->header), 2746 &drc->drc_next_rrd->header); 2747 drc->drc_next_rrd->bytes_read = drc->drc_bytes_read; 2748 2749 if (err != 0) { 2750 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); 2751 drc->drc_next_rrd = NULL; 2752 return (err); 2753 } 2754 if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) { 2755 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); 2756 drc->drc_next_rrd = NULL; 2757 return (SET_ERROR(EINVAL)); 2758 } 2759 2760 /* 2761 * Note: checksum is of everything up to but not including the 2762 * checksum itself. 2763 */ 2764 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2765 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 2766 receive_cksum(drc, 2767 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2768 &drc->drc_next_rrd->header); 2769 2770 zio_cksum_t cksum_orig = 2771 drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum; 2772 zio_cksum_t *cksump = 2773 &drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum; 2774 2775 if (drc->drc_byteswap) 2776 byteswap_record(&drc->drc_next_rrd->header); 2777 2778 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) && 2779 !ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) { 2780 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); 2781 drc->drc_next_rrd = NULL; 2782 return (SET_ERROR(ECKSUM)); 2783 } 2784 2785 receive_cksum(drc, sizeof (cksum_orig), &cksum_orig); 2786 2787 return (0); 2788 } 2789 2790 /* 2791 * Issue the prefetch reads for any necessary indirect blocks. 2792 * 2793 * We use the object ignore list to tell us whether or not to issue prefetches 2794 * for a given object. We do this for both correctness (in case the blocksize 2795 * of an object has changed) and performance (if the object doesn't exist, don't 2796 * needlessly try to issue prefetches). We also trim the list as we go through 2797 * the stream to prevent it from growing to an unbounded size. 2798 * 2799 * The object numbers within will always be in sorted order, and any write 2800 * records we see will also be in sorted order, but they're not sorted with 2801 * respect to each other (i.e. we can get several object records before 2802 * receiving each object's write records). As a result, once we've reached a 2803 * given object number, we can safely remove any reference to lower object 2804 * numbers in the ignore list. In practice, we receive up to 32 object records 2805 * before receiving write records, so the list can have up to 32 nodes in it. 2806 */ 2807 static void 2808 receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset, 2809 uint64_t length) 2810 { 2811 if (!objlist_exists(drc->drc_ignore_objlist, object)) { 2812 dmu_prefetch(drc->drc_os, object, 1, offset, length, 2813 ZIO_PRIORITY_SYNC_READ); 2814 } 2815 } 2816 2817 /* 2818 * Read records off the stream, issuing any necessary prefetches. 2819 */ 2820 static int 2821 receive_read_record(dmu_recv_cookie_t *drc) 2822 { 2823 int err; 2824 2825 switch (drc->drc_rrd->header.drr_type) { 2826 case DRR_OBJECT: 2827 { 2828 struct drr_object *drro = 2829 &drc->drc_rrd->header.drr_u.drr_object; 2830 uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro); 2831 void *buf = NULL; 2832 dmu_object_info_t doi; 2833 2834 if (size != 0) 2835 buf = kmem_zalloc(size, KM_SLEEP); 2836 2837 err = receive_read_payload_and_next_header(drc, size, buf); 2838 if (err != 0) { 2839 kmem_free(buf, size); 2840 return (err); 2841 } 2842 err = dmu_object_info(drc->drc_os, drro->drr_object, &doi); 2843 /* 2844 * See receive_read_prefetch for an explanation why we're 2845 * storing this object in the ignore_obj_list. 2846 */ 2847 if (err == ENOENT || err == EEXIST || 2848 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { 2849 objlist_insert(drc->drc_ignore_objlist, 2850 drro->drr_object); 2851 err = 0; 2852 } 2853 return (err); 2854 } 2855 case DRR_FREEOBJECTS: 2856 { 2857 err = receive_read_payload_and_next_header(drc, 0, NULL); 2858 return (err); 2859 } 2860 case DRR_WRITE: 2861 { 2862 struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write; 2863 int size = DRR_WRITE_PAYLOAD_SIZE(drrw); 2864 abd_t *abd = abd_alloc_linear(size, B_FALSE); 2865 err = receive_read_payload_and_next_header(drc, size, 2866 abd_to_buf(abd)); 2867 if (err != 0) { 2868 abd_free(abd); 2869 return (err); 2870 } 2871 drc->drc_rrd->abd = abd; 2872 receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset, 2873 drrw->drr_logical_size); 2874 return (err); 2875 } 2876 case DRR_WRITE_EMBEDDED: 2877 { 2878 struct drr_write_embedded *drrwe = 2879 &drc->drc_rrd->header.drr_u.drr_write_embedded; 2880 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); 2881 void *buf = kmem_zalloc(size, KM_SLEEP); 2882 2883 err = receive_read_payload_and_next_header(drc, size, buf); 2884 if (err != 0) { 2885 kmem_free(buf, size); 2886 return (err); 2887 } 2888 2889 receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset, 2890 drrwe->drr_length); 2891 return (err); 2892 } 2893 case DRR_FREE: 2894 case DRR_REDACT: 2895 { 2896 /* 2897 * It might be beneficial to prefetch indirect blocks here, but 2898 * we don't really have the data to decide for sure. 2899 */ 2900 err = receive_read_payload_and_next_header(drc, 0, NULL); 2901 return (err); 2902 } 2903 case DRR_END: 2904 { 2905 struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end; 2906 if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum, 2907 drre->drr_checksum)) 2908 return (SET_ERROR(ECKSUM)); 2909 return (0); 2910 } 2911 case DRR_SPILL: 2912 { 2913 struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill; 2914 int size = DRR_SPILL_PAYLOAD_SIZE(drrs); 2915 abd_t *abd = abd_alloc_linear(size, B_FALSE); 2916 err = receive_read_payload_and_next_header(drc, size, 2917 abd_to_buf(abd)); 2918 if (err != 0) 2919 abd_free(abd); 2920 else 2921 drc->drc_rrd->abd = abd; 2922 return (err); 2923 } 2924 case DRR_OBJECT_RANGE: 2925 { 2926 err = receive_read_payload_and_next_header(drc, 0, NULL); 2927 return (err); 2928 2929 } 2930 default: 2931 return (SET_ERROR(EINVAL)); 2932 } 2933 } 2934 2935 2936 2937 static void 2938 dprintf_drr(struct receive_record_arg *rrd, int err) 2939 { 2940 #ifdef ZFS_DEBUG 2941 switch (rrd->header.drr_type) { 2942 case DRR_OBJECT: 2943 { 2944 struct drr_object *drro = &rrd->header.drr_u.drr_object; 2945 dprintf("drr_type = OBJECT obj = %llu type = %u " 2946 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u " 2947 "compress = %u dn_slots = %u err = %d\n", 2948 (u_longlong_t)drro->drr_object, drro->drr_type, 2949 drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen, 2950 drro->drr_checksumtype, drro->drr_compress, 2951 drro->drr_dn_slots, err); 2952 break; 2953 } 2954 case DRR_FREEOBJECTS: 2955 { 2956 struct drr_freeobjects *drrfo = 2957 &rrd->header.drr_u.drr_freeobjects; 2958 dprintf("drr_type = FREEOBJECTS firstobj = %llu " 2959 "numobjs = %llu err = %d\n", 2960 (u_longlong_t)drrfo->drr_firstobj, 2961 (u_longlong_t)drrfo->drr_numobjs, err); 2962 break; 2963 } 2964 case DRR_WRITE: 2965 { 2966 struct drr_write *drrw = &rrd->header.drr_u.drr_write; 2967 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu " 2968 "lsize = %llu cksumtype = %u flags = %u " 2969 "compress = %u psize = %llu err = %d\n", 2970 (u_longlong_t)drrw->drr_object, drrw->drr_type, 2971 (u_longlong_t)drrw->drr_offset, 2972 (u_longlong_t)drrw->drr_logical_size, 2973 drrw->drr_checksumtype, drrw->drr_flags, 2974 drrw->drr_compressiontype, 2975 (u_longlong_t)drrw->drr_compressed_size, err); 2976 break; 2977 } 2978 case DRR_WRITE_BYREF: 2979 { 2980 struct drr_write_byref *drrwbr = 2981 &rrd->header.drr_u.drr_write_byref; 2982 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu " 2983 "length = %llu toguid = %llx refguid = %llx " 2984 "refobject = %llu refoffset = %llu cksumtype = %u " 2985 "flags = %u err = %d\n", 2986 (u_longlong_t)drrwbr->drr_object, 2987 (u_longlong_t)drrwbr->drr_offset, 2988 (u_longlong_t)drrwbr->drr_length, 2989 (u_longlong_t)drrwbr->drr_toguid, 2990 (u_longlong_t)drrwbr->drr_refguid, 2991 (u_longlong_t)drrwbr->drr_refobject, 2992 (u_longlong_t)drrwbr->drr_refoffset, 2993 drrwbr->drr_checksumtype, drrwbr->drr_flags, err); 2994 break; 2995 } 2996 case DRR_WRITE_EMBEDDED: 2997 { 2998 struct drr_write_embedded *drrwe = 2999 &rrd->header.drr_u.drr_write_embedded; 3000 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu " 3001 "length = %llu compress = %u etype = %u lsize = %u " 3002 "psize = %u err = %d\n", 3003 (u_longlong_t)drrwe->drr_object, 3004 (u_longlong_t)drrwe->drr_offset, 3005 (u_longlong_t)drrwe->drr_length, 3006 drrwe->drr_compression, drrwe->drr_etype, 3007 drrwe->drr_lsize, drrwe->drr_psize, err); 3008 break; 3009 } 3010 case DRR_FREE: 3011 { 3012 struct drr_free *drrf = &rrd->header.drr_u.drr_free; 3013 dprintf("drr_type = FREE obj = %llu offset = %llu " 3014 "length = %lld err = %d\n", 3015 (u_longlong_t)drrf->drr_object, 3016 (u_longlong_t)drrf->drr_offset, 3017 (longlong_t)drrf->drr_length, 3018 err); 3019 break; 3020 } 3021 case DRR_SPILL: 3022 { 3023 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; 3024 dprintf("drr_type = SPILL obj = %llu length = %llu " 3025 "err = %d\n", (u_longlong_t)drrs->drr_object, 3026 (u_longlong_t)drrs->drr_length, err); 3027 break; 3028 } 3029 case DRR_OBJECT_RANGE: 3030 { 3031 struct drr_object_range *drror = 3032 &rrd->header.drr_u.drr_object_range; 3033 dprintf("drr_type = OBJECT_RANGE firstobj = %llu " 3034 "numslots = %llu flags = %u err = %d\n", 3035 (u_longlong_t)drror->drr_firstobj, 3036 (u_longlong_t)drror->drr_numslots, 3037 drror->drr_flags, err); 3038 break; 3039 } 3040 default: 3041 return; 3042 } 3043 #endif 3044 } 3045 3046 /* 3047 * Commit the records to the pool. 3048 */ 3049 static int 3050 receive_process_record(struct receive_writer_arg *rwa, 3051 struct receive_record_arg *rrd) 3052 { 3053 int err; 3054 3055 /* Processing in order, therefore bytes_read should be increasing. */ 3056 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); 3057 rwa->bytes_read = rrd->bytes_read; 3058 3059 /* We can only heal write records; other ones get ignored */ 3060 if (rwa->heal && rrd->header.drr_type != DRR_WRITE) { 3061 if (rrd->abd != NULL) { 3062 abd_free(rrd->abd); 3063 rrd->abd = NULL; 3064 } else if (rrd->payload != NULL) { 3065 kmem_free(rrd->payload, rrd->payload_size); 3066 rrd->payload = NULL; 3067 } 3068 return (0); 3069 } 3070 3071 if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) { 3072 err = flush_write_batch(rwa); 3073 if (err != 0) { 3074 if (rrd->abd != NULL) { 3075 abd_free(rrd->abd); 3076 rrd->abd = NULL; 3077 rrd->payload = NULL; 3078 } else if (rrd->payload != NULL) { 3079 kmem_free(rrd->payload, rrd->payload_size); 3080 rrd->payload = NULL; 3081 } 3082 3083 return (err); 3084 } 3085 } 3086 3087 switch (rrd->header.drr_type) { 3088 case DRR_OBJECT: 3089 { 3090 struct drr_object *drro = &rrd->header.drr_u.drr_object; 3091 err = receive_object(rwa, drro, rrd->payload); 3092 kmem_free(rrd->payload, rrd->payload_size); 3093 rrd->payload = NULL; 3094 break; 3095 } 3096 case DRR_FREEOBJECTS: 3097 { 3098 struct drr_freeobjects *drrfo = 3099 &rrd->header.drr_u.drr_freeobjects; 3100 err = receive_freeobjects(rwa, drrfo); 3101 break; 3102 } 3103 case DRR_WRITE: 3104 { 3105 err = receive_process_write_record(rwa, rrd); 3106 if (rwa->heal) { 3107 /* 3108 * If healing - always free the abd after processing 3109 */ 3110 abd_free(rrd->abd); 3111 rrd->abd = NULL; 3112 } else if (err != EAGAIN) { 3113 /* 3114 * On success, a non-healing 3115 * receive_process_write_record() returns 3116 * EAGAIN to indicate that we do not want to free 3117 * the rrd or arc_buf. 3118 */ 3119 ASSERT(err != 0); 3120 abd_free(rrd->abd); 3121 rrd->abd = NULL; 3122 } 3123 break; 3124 } 3125 case DRR_WRITE_EMBEDDED: 3126 { 3127 struct drr_write_embedded *drrwe = 3128 &rrd->header.drr_u.drr_write_embedded; 3129 err = receive_write_embedded(rwa, drrwe, rrd->payload); 3130 kmem_free(rrd->payload, rrd->payload_size); 3131 rrd->payload = NULL; 3132 break; 3133 } 3134 case DRR_FREE: 3135 { 3136 struct drr_free *drrf = &rrd->header.drr_u.drr_free; 3137 err = receive_free(rwa, drrf); 3138 break; 3139 } 3140 case DRR_SPILL: 3141 { 3142 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; 3143 err = receive_spill(rwa, drrs, rrd->abd); 3144 if (err != 0) 3145 abd_free(rrd->abd); 3146 rrd->abd = NULL; 3147 rrd->payload = NULL; 3148 break; 3149 } 3150 case DRR_OBJECT_RANGE: 3151 { 3152 struct drr_object_range *drror = 3153 &rrd->header.drr_u.drr_object_range; 3154 err = receive_object_range(rwa, drror); 3155 break; 3156 } 3157 case DRR_REDACT: 3158 { 3159 struct drr_redact *drrr = &rrd->header.drr_u.drr_redact; 3160 err = receive_redact(rwa, drrr); 3161 break; 3162 } 3163 default: 3164 err = (SET_ERROR(EINVAL)); 3165 } 3166 3167 if (err != 0) 3168 dprintf_drr(rrd, err); 3169 3170 return (err); 3171 } 3172 3173 /* 3174 * dmu_recv_stream's worker thread; pull records off the queue, and then call 3175 * receive_process_record When we're done, signal the main thread and exit. 3176 */ 3177 static __attribute__((noreturn)) void 3178 receive_writer_thread(void *arg) 3179 { 3180 struct receive_writer_arg *rwa = arg; 3181 struct receive_record_arg *rrd; 3182 fstrans_cookie_t cookie = spl_fstrans_mark(); 3183 3184 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; 3185 rrd = bqueue_dequeue(&rwa->q)) { 3186 /* 3187 * If there's an error, the main thread will stop putting things 3188 * on the queue, but we need to clear everything in it before we 3189 * can exit. 3190 */ 3191 int err = 0; 3192 if (rwa->err == 0) { 3193 err = receive_process_record(rwa, rrd); 3194 } else if (rrd->abd != NULL) { 3195 abd_free(rrd->abd); 3196 rrd->abd = NULL; 3197 rrd->payload = NULL; 3198 } else if (rrd->payload != NULL) { 3199 kmem_free(rrd->payload, rrd->payload_size); 3200 rrd->payload = NULL; 3201 } 3202 /* 3203 * EAGAIN indicates that this record has been saved (on 3204 * raw->write_batch), and will be used again, so we don't 3205 * free it. 3206 * When healing data we always need to free the record. 3207 */ 3208 if (err != EAGAIN || rwa->heal) { 3209 if (rwa->err == 0) 3210 rwa->err = err; 3211 kmem_free(rrd, sizeof (*rrd)); 3212 } 3213 } 3214 kmem_free(rrd, sizeof (*rrd)); 3215 3216 if (rwa->heal) { 3217 zio_wait(rwa->heal_pio); 3218 } else { 3219 int err = flush_write_batch(rwa); 3220 if (rwa->err == 0) 3221 rwa->err = err; 3222 } 3223 mutex_enter(&rwa->mutex); 3224 rwa->done = B_TRUE; 3225 cv_signal(&rwa->cv); 3226 mutex_exit(&rwa->mutex); 3227 spl_fstrans_unmark(cookie); 3228 thread_exit(); 3229 } 3230 3231 static int 3232 resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl) 3233 { 3234 uint64_t val; 3235 objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset; 3236 uint64_t dsobj = dmu_objset_id(drc->drc_os); 3237 uint64_t resume_obj, resume_off; 3238 3239 if (nvlist_lookup_uint64(begin_nvl, 3240 "resume_object", &resume_obj) != 0 || 3241 nvlist_lookup_uint64(begin_nvl, 3242 "resume_offset", &resume_off) != 0) { 3243 return (SET_ERROR(EINVAL)); 3244 } 3245 VERIFY0(zap_lookup(mos, dsobj, 3246 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val)); 3247 if (resume_obj != val) 3248 return (SET_ERROR(EINVAL)); 3249 VERIFY0(zap_lookup(mos, dsobj, 3250 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val)); 3251 if (resume_off != val) 3252 return (SET_ERROR(EINVAL)); 3253 3254 return (0); 3255 } 3256 3257 /* 3258 * Read in the stream's records, one by one, and apply them to the pool. There 3259 * are two threads involved; the thread that calls this function will spin up a 3260 * worker thread, read the records off the stream one by one, and issue 3261 * prefetches for any necessary indirect blocks. It will then push the records 3262 * onto an internal blocking queue. The worker thread will pull the records off 3263 * the queue, and actually write the data into the DMU. This way, the worker 3264 * thread doesn't have to wait for reads to complete, since everything it needs 3265 * (the indirect blocks) will be prefetched. 3266 * 3267 * NB: callers *must* call dmu_recv_end() if this succeeds. 3268 */ 3269 int 3270 dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp) 3271 { 3272 int err = 0; 3273 struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP); 3274 3275 if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) { 3276 uint64_t bytes = 0; 3277 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, 3278 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, 3279 sizeof (bytes), 1, &bytes); 3280 drc->drc_bytes_read += bytes; 3281 } 3282 3283 drc->drc_ignore_objlist = objlist_create(); 3284 3285 /* these were verified in dmu_recv_begin */ 3286 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 3287 DMU_SUBSTREAM); 3288 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 3289 3290 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); 3291 ASSERT0(drc->drc_os->os_encrypted && 3292 (drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)); 3293 3294 /* handle DSL encryption key payload */ 3295 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) { 3296 nvlist_t *keynvl = NULL; 3297 3298 ASSERT(drc->drc_os->os_encrypted); 3299 ASSERT(drc->drc_raw); 3300 3301 err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata", 3302 &keynvl); 3303 if (err != 0) 3304 goto out; 3305 3306 if (!drc->drc_heal) { 3307 /* 3308 * If this is a new dataset we set the key immediately. 3309 * Otherwise we don't want to change the key until we 3310 * are sure the rest of the receive succeeded so we 3311 * stash the keynvl away until then. 3312 */ 3313 err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa), 3314 drc->drc_ds->ds_object, drc->drc_fromsnapobj, 3315 drc->drc_drrb->drr_type, keynvl, drc->drc_newfs); 3316 if (err != 0) 3317 goto out; 3318 } 3319 3320 /* see comment in dmu_recv_end_sync() */ 3321 drc->drc_ivset_guid = 0; 3322 (void) nvlist_lookup_uint64(keynvl, "to_ivset_guid", 3323 &drc->drc_ivset_guid); 3324 3325 if (!drc->drc_newfs) 3326 drc->drc_keynvl = fnvlist_dup(keynvl); 3327 } 3328 3329 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) { 3330 err = resume_check(drc, drc->drc_begin_nvl); 3331 if (err != 0) 3332 goto out; 3333 } 3334 3335 /* 3336 * For compatibility with recursive send streams, we do this here, 3337 * rather than in dmu_recv_begin. If we pull the next header too 3338 * early, and it's the END record, we break the `recv_skip` logic. 3339 */ 3340 if (drc->drc_drr_begin->drr_payloadlen == 0) { 3341 err = receive_read_payload_and_next_header(drc, 0, NULL); 3342 if (err != 0) 3343 goto out; 3344 } 3345 3346 /* 3347 * If we failed before this point we will clean up any new resume 3348 * state that was created. Now that we've gotten past the initial 3349 * checks we are ok to retain that resume state. 3350 */ 3351 drc->drc_should_save = B_TRUE; 3352 3353 (void) bqueue_init(&rwa->q, zfs_recv_queue_ff, 3354 MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize), 3355 offsetof(struct receive_record_arg, node)); 3356 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL); 3357 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL); 3358 rwa->os = drc->drc_os; 3359 rwa->byteswap = drc->drc_byteswap; 3360 rwa->heal = drc->drc_heal; 3361 rwa->tofs = drc->drc_tofs; 3362 rwa->resumable = drc->drc_resumable; 3363 rwa->raw = drc->drc_raw; 3364 rwa->spill = drc->drc_spill; 3365 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0); 3366 rwa->os->os_raw_receive = drc->drc_raw; 3367 if (drc->drc_heal) { 3368 rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL, 3369 ZIO_FLAG_GODFATHER); 3370 } 3371 list_create(&rwa->write_batch, sizeof (struct receive_record_arg), 3372 offsetof(struct receive_record_arg, node.bqn_node)); 3373 3374 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc, 3375 TS_RUN, minclsyspri); 3376 /* 3377 * We're reading rwa->err without locks, which is safe since we are the 3378 * only reader, and the worker thread is the only writer. It's ok if we 3379 * miss a write for an iteration or two of the loop, since the writer 3380 * thread will keep freeing records we send it until we send it an eos 3381 * marker. 3382 * 3383 * We can leave this loop in 3 ways: First, if rwa->err is 3384 * non-zero. In that case, the writer thread will free the rrd we just 3385 * pushed. Second, if we're interrupted; in that case, either it's the 3386 * first loop and drc->drc_rrd was never allocated, or it's later, and 3387 * drc->drc_rrd has been handed off to the writer thread who will free 3388 * it. Finally, if receive_read_record fails or we're at the end of the 3389 * stream, then we free drc->drc_rrd and exit. 3390 */ 3391 while (rwa->err == 0) { 3392 if (issig()) { 3393 err = SET_ERROR(EINTR); 3394 break; 3395 } 3396 3397 ASSERT3P(drc->drc_rrd, ==, NULL); 3398 drc->drc_rrd = drc->drc_next_rrd; 3399 drc->drc_next_rrd = NULL; 3400 /* Allocates and loads header into drc->drc_next_rrd */ 3401 err = receive_read_record(drc); 3402 3403 if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) { 3404 kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd)); 3405 drc->drc_rrd = NULL; 3406 break; 3407 } 3408 3409 bqueue_enqueue(&rwa->q, drc->drc_rrd, 3410 sizeof (struct receive_record_arg) + 3411 drc->drc_rrd->payload_size); 3412 drc->drc_rrd = NULL; 3413 } 3414 3415 ASSERT3P(drc->drc_rrd, ==, NULL); 3416 drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP); 3417 drc->drc_rrd->eos_marker = B_TRUE; 3418 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1); 3419 3420 mutex_enter(&rwa->mutex); 3421 while (!rwa->done) { 3422 /* 3423 * We need to use cv_wait_sig() so that any process that may 3424 * be sleeping here can still fork. 3425 */ 3426 (void) cv_wait_sig(&rwa->cv, &rwa->mutex); 3427 } 3428 mutex_exit(&rwa->mutex); 3429 3430 /* 3431 * If we are receiving a full stream as a clone, all object IDs which 3432 * are greater than the maximum ID referenced in the stream are 3433 * by definition unused and must be freed. 3434 */ 3435 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) { 3436 uint64_t obj = rwa->max_object + 1; 3437 int free_err = 0; 3438 int next_err = 0; 3439 3440 while (next_err == 0) { 3441 free_err = dmu_free_long_object(rwa->os, obj); 3442 if (free_err != 0 && free_err != ENOENT) 3443 break; 3444 3445 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0); 3446 } 3447 3448 if (err == 0) { 3449 if (free_err != 0 && free_err != ENOENT) 3450 err = free_err; 3451 else if (next_err != ESRCH) 3452 err = next_err; 3453 } 3454 } 3455 3456 cv_destroy(&rwa->cv); 3457 mutex_destroy(&rwa->mutex); 3458 bqueue_destroy(&rwa->q); 3459 list_destroy(&rwa->write_batch); 3460 if (err == 0) 3461 err = rwa->err; 3462 3463 out: 3464 /* 3465 * If we hit an error before we started the receive_writer_thread 3466 * we need to clean up the next_rrd we create by processing the 3467 * DRR_BEGIN record. 3468 */ 3469 if (drc->drc_next_rrd != NULL) 3470 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); 3471 3472 /* 3473 * The objset will be invalidated by dmu_recv_end() when we do 3474 * dsl_dataset_clone_swap_sync_impl(). 3475 */ 3476 drc->drc_os = NULL; 3477 3478 kmem_free(rwa, sizeof (*rwa)); 3479 nvlist_free(drc->drc_begin_nvl); 3480 3481 if (err != 0) { 3482 /* 3483 * Clean up references. If receive is not resumable, 3484 * destroy what we created, so we don't leave it in 3485 * the inconsistent state. 3486 */ 3487 dmu_recv_cleanup_ds(drc); 3488 nvlist_free(drc->drc_keynvl); 3489 } 3490 3491 objlist_destroy(drc->drc_ignore_objlist); 3492 drc->drc_ignore_objlist = NULL; 3493 *voffp = drc->drc_voff; 3494 return (err); 3495 } 3496 3497 static int 3498 dmu_recv_end_check(void *arg, dmu_tx_t *tx) 3499 { 3500 dmu_recv_cookie_t *drc = arg; 3501 dsl_pool_t *dp = dmu_tx_pool(tx); 3502 int error; 3503 3504 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 3505 3506 if (drc->drc_heal) { 3507 error = 0; 3508 } else if (!drc->drc_newfs) { 3509 dsl_dataset_t *origin_head; 3510 3511 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 3512 if (error != 0) 3513 return (error); 3514 if (drc->drc_force) { 3515 /* 3516 * We will destroy any snapshots in tofs (i.e. before 3517 * origin_head) that are after the origin (which is 3518 * the snap before drc_ds, because drc_ds can not 3519 * have any snaps of its own). 3520 */ 3521 uint64_t obj; 3522 3523 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3524 while (obj != 3525 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3526 dsl_dataset_t *snap; 3527 error = dsl_dataset_hold_obj(dp, obj, FTAG, 3528 &snap); 3529 if (error != 0) 3530 break; 3531 if (snap->ds_dir != origin_head->ds_dir) 3532 error = SET_ERROR(EINVAL); 3533 if (error == 0) { 3534 error = dsl_destroy_snapshot_check_impl( 3535 snap, B_FALSE); 3536 } 3537 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3538 dsl_dataset_rele(snap, FTAG); 3539 if (error != 0) 3540 break; 3541 } 3542 if (error != 0) { 3543 dsl_dataset_rele(origin_head, FTAG); 3544 return (error); 3545 } 3546 } 3547 if (drc->drc_keynvl != NULL) { 3548 error = dsl_crypto_recv_raw_key_check(drc->drc_ds, 3549 drc->drc_keynvl, tx); 3550 if (error != 0) { 3551 dsl_dataset_rele(origin_head, FTAG); 3552 return (error); 3553 } 3554 } 3555 3556 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 3557 origin_head, drc->drc_force, drc->drc_owner, tx); 3558 if (error != 0) { 3559 dsl_dataset_rele(origin_head, FTAG); 3560 return (error); 3561 } 3562 error = dsl_dataset_snapshot_check_impl(origin_head, 3563 drc->drc_tosnap, tx, B_TRUE, 1, 3564 drc->drc_cred, drc->drc_proc); 3565 dsl_dataset_rele(origin_head, FTAG); 3566 if (error != 0) 3567 return (error); 3568 3569 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 3570 } else { 3571 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 3572 drc->drc_tosnap, tx, B_TRUE, 1, 3573 drc->drc_cred, drc->drc_proc); 3574 } 3575 return (error); 3576 } 3577 3578 static void 3579 dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 3580 { 3581 dmu_recv_cookie_t *drc = arg; 3582 dsl_pool_t *dp = dmu_tx_pool(tx); 3583 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0; 3584 uint64_t newsnapobj = 0; 3585 3586 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 3587 tx, "snap=%s", drc->drc_tosnap); 3588 drc->drc_ds->ds_objset->os_raw_receive = B_FALSE; 3589 3590 if (drc->drc_heal) { 3591 if (drc->drc_keynvl != NULL) { 3592 nvlist_free(drc->drc_keynvl); 3593 drc->drc_keynvl = NULL; 3594 } 3595 } else if (!drc->drc_newfs) { 3596 dsl_dataset_t *origin_head; 3597 3598 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 3599 &origin_head)); 3600 3601 if (drc->drc_force) { 3602 /* 3603 * Destroy any snapshots of drc_tofs (origin_head) 3604 * after the origin (the snap before drc_ds). 3605 */ 3606 uint64_t obj; 3607 3608 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3609 while (obj != 3610 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3611 dsl_dataset_t *snap; 3612 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 3613 &snap)); 3614 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 3615 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3616 dsl_destroy_snapshot_sync_impl(snap, 3617 B_FALSE, tx); 3618 dsl_dataset_rele(snap, FTAG); 3619 } 3620 } 3621 if (drc->drc_keynvl != NULL) { 3622 dsl_crypto_recv_raw_key_sync(drc->drc_ds, 3623 drc->drc_keynvl, tx); 3624 nvlist_free(drc->drc_keynvl); 3625 drc->drc_keynvl = NULL; 3626 } 3627 3628 VERIFY3P(drc->drc_ds->ds_prev, ==, 3629 origin_head->ds_prev); 3630 3631 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 3632 origin_head, tx); 3633 /* 3634 * The objset was evicted by dsl_dataset_clone_swap_sync_impl, 3635 * so drc_os is no longer valid. 3636 */ 3637 drc->drc_os = NULL; 3638 3639 dsl_dataset_snapshot_sync_impl(origin_head, 3640 drc->drc_tosnap, tx); 3641 3642 /* set snapshot's creation time and guid */ 3643 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 3644 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = 3645 drc->drc_drrb->drr_creation_time; 3646 dsl_dataset_phys(origin_head->ds_prev)->ds_guid = 3647 drc->drc_drrb->drr_toguid; 3648 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= 3649 ~DS_FLAG_INCONSISTENT; 3650 3651 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 3652 dsl_dataset_phys(origin_head)->ds_flags &= 3653 ~DS_FLAG_INCONSISTENT; 3654 3655 newsnapobj = 3656 dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3657 3658 dsl_dataset_rele(origin_head, FTAG); 3659 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 3660 3661 if (drc->drc_owner != NULL) 3662 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 3663 } else { 3664 dsl_dataset_t *ds = drc->drc_ds; 3665 3666 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 3667 3668 /* set snapshot's creation time and guid */ 3669 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 3670 dsl_dataset_phys(ds->ds_prev)->ds_creation_time = 3671 drc->drc_drrb->drr_creation_time; 3672 dsl_dataset_phys(ds->ds_prev)->ds_guid = 3673 drc->drc_drrb->drr_toguid; 3674 dsl_dataset_phys(ds->ds_prev)->ds_flags &= 3675 ~DS_FLAG_INCONSISTENT; 3676 3677 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3678 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 3679 if (dsl_dataset_has_resume_receive_state(ds)) { 3680 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3681 DS_FIELD_RESUME_FROMGUID, tx); 3682 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3683 DS_FIELD_RESUME_OBJECT, tx); 3684 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3685 DS_FIELD_RESUME_OFFSET, tx); 3686 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3687 DS_FIELD_RESUME_BYTES, tx); 3688 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3689 DS_FIELD_RESUME_TOGUID, tx); 3690 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3691 DS_FIELD_RESUME_TONAME, tx); 3692 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3693 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx); 3694 } 3695 newsnapobj = 3696 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; 3697 } 3698 3699 /* 3700 * If this is a raw receive, the crypt_keydata nvlist will include 3701 * a to_ivset_guid for us to set on the new snapshot. This value 3702 * will override the value generated by the snapshot code. However, 3703 * this value may not be present, because older implementations of 3704 * the raw send code did not include this value, and we are still 3705 * allowed to receive them if the zfs_disable_ivset_guid_check 3706 * tunable is set, in which case we will leave the newly-generated 3707 * value. 3708 */ 3709 if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) { 3710 dmu_object_zapify(dp->dp_meta_objset, newsnapobj, 3711 DMU_OT_DSL_DATASET, tx); 3712 VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj, 3713 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1, 3714 &drc->drc_ivset_guid, tx)); 3715 } 3716 3717 /* 3718 * Release the hold from dmu_recv_begin. This must be done before 3719 * we return to open context, so that when we free the dataset's dnode 3720 * we can evict its bonus buffer. Since the dataset may be destroyed 3721 * at this point (and therefore won't have a valid pointer to the spa) 3722 * we release the key mapping manually here while we do have a valid 3723 * pointer, if it exists. 3724 */ 3725 if (!drc->drc_raw && encrypted) { 3726 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa, 3727 drc->drc_ds->ds_object, drc->drc_ds); 3728 } 3729 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag); 3730 drc->drc_ds = NULL; 3731 } 3732 3733 static int dmu_recv_end_modified_blocks = 3; 3734 3735 static int 3736 dmu_recv_existing_end(dmu_recv_cookie_t *drc) 3737 { 3738 #ifdef _KERNEL 3739 /* 3740 * We will be destroying the ds; make sure its origin is unmounted if 3741 * necessary. 3742 */ 3743 char name[ZFS_MAX_DATASET_NAME_LEN]; 3744 dsl_dataset_name(drc->drc_ds, name); 3745 zfs_destroy_unmount_origin(name); 3746 #endif 3747 3748 return (dsl_sync_task(drc->drc_tofs, 3749 dmu_recv_end_check, dmu_recv_end_sync, drc, 3750 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3751 } 3752 3753 static int 3754 dmu_recv_new_end(dmu_recv_cookie_t *drc) 3755 { 3756 return (dsl_sync_task(drc->drc_tofs, 3757 dmu_recv_end_check, dmu_recv_end_sync, drc, 3758 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3759 } 3760 3761 int 3762 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 3763 { 3764 int error; 3765 3766 drc->drc_owner = owner; 3767 3768 if (drc->drc_newfs) 3769 error = dmu_recv_new_end(drc); 3770 else 3771 error = dmu_recv_existing_end(drc); 3772 3773 if (error != 0) { 3774 dmu_recv_cleanup_ds(drc); 3775 nvlist_free(drc->drc_keynvl); 3776 } else if (!drc->drc_heal) { 3777 if (drc->drc_newfs) { 3778 zvol_create_minor(drc->drc_tofs); 3779 } 3780 char *snapname = kmem_asprintf("%s@%s", 3781 drc->drc_tofs, drc->drc_tosnap); 3782 zvol_create_minor(snapname); 3783 kmem_strfree(snapname); 3784 } 3785 return (error); 3786 } 3787 3788 /* 3789 * Return TRUE if this objset is currently being received into. 3790 */ 3791 boolean_t 3792 dmu_objset_is_receiving(objset_t *os) 3793 { 3794 return (os->os_dsl_dataset != NULL && 3795 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 3796 } 3797 3798 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, UINT, ZMOD_RW, 3799 "Maximum receive queue length"); 3800 3801 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, UINT, ZMOD_RW, 3802 "Receive queue fill fraction"); 3803 3804 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW, 3805 "Maximum amount of writes to batch into one transaction"); 3806 3807 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW, 3808 "Ignore errors during corrective receive"); 3809 /* END CSTYLED */ 3810