Lines Matching +full:n +full:- +full:mos

1 // SPDX-License-Identifier: CDDL-1.0
10 * or https://opensource.org/licenses/CDDL-1.0.
156 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) in byteswap_record()
157 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) in byteswap_record()
158 drr->drr_type = BSWAP_32(drr->drr_type); in byteswap_record()
159 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); in byteswap_record()
161 switch (drr->drr_type) { in byteswap_record()
192 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); in byteswap_record()
230 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); in byteswap_record()
236 if (drr->drr_type != DRR_BEGIN) { in byteswap_record()
237 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); in byteswap_record()
286 dmu_recv_cookie_t *drc = drba->drba_cookie; in redact_check()
287 struct drr_begin *drrb = drc->drc_drrb; in redact_check()
288 int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); in redact_check()
297 if (drrb->drr_fromguid == 0) in redact_check()
303 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl, in redact_check()
323 err = nvlist_lookup_uint64_array(drc->drc_begin_nvl, in redact_check()
331 drrb->drr_toguid)) { in redact_check()
347 * If we previously received a stream with --large-block, we don't support
348 * receiving an incremental on top of it without --large-block. This avoids
349 * forcing a read-modify-write or trying to re-aggregate a string of WRITE
369 dsl_pool_t *dp = ds->ds_dir->dd_pool; in recv_begin_check_existing_impl()
370 boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0; in recv_begin_check_existing_impl()
375 error = zap_lookup(dp->dp_meta_objset, in recv_begin_check_existing_impl()
376 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, in recv_begin_check_existing_impl()
386 error = zap_lookup(dp->dp_meta_objset, in recv_begin_check_existing_impl()
387 dsl_dataset_phys(ds)->ds_snapnames_zapobj, in recv_begin_check_existing_impl()
388 drba->drba_cookie->drc_tosnap, 8, 1, &obj); in recv_begin_check_existing_impl()
389 if (drba->drba_cookie->drc_heal) { in recv_begin_check_existing_impl()
397 error = zap_count(dp->dp_meta_objset, in recv_begin_check_existing_impl()
398 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children); in recv_begin_check_existing_impl()
401 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS && in recv_begin_check_existing_impl()
414 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, in recv_begin_check_existing_impl()
415 NULL, drba->drba_cred); in recv_begin_check_existing_impl()
419 if (drba->drba_cookie->drc_heal) { in recv_begin_check_existing_impl()
425 if (drba->drba_cookie->drc_force) in recv_begin_check_existing_impl()
428 /* Must have keys loaded if doing encrypted non-raw recv. */ in recv_begin_check_existing_impl()
430 if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object, in recv_begin_check_existing_impl()
445 drba->drba_cookie->drc_drrb->drr_toguid != in recv_begin_check_existing_impl()
446 dsl_dataset_phys(snap)->ds_guid) { in recv_begin_check_existing_impl()
453 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; in recv_begin_check_existing_impl()
455 /* Can't perform a raw receive on top of a non-raw receive */ in recv_begin_check_existing_impl()
469 if (snap->ds_dir != ds->ds_dir) { in recv_begin_check_existing_impl()
473 if (dsl_dataset_phys(snap)->ds_guid == fromguid) in recv_begin_check_existing_impl()
475 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; in recv_begin_check_existing_impl()
481 if (drba->drba_cookie->drc_force) { in recv_begin_check_existing_impl()
482 drba->drba_cookie->drc_fromsnapobj = obj; in recv_begin_check_existing_impl()
494 dsl_dataset_phys(ds)->ds_prev_snap_obj != in recv_begin_check_existing_impl()
495 snap->ds_object)) { in recv_begin_check_existing_impl()
499 drba->drba_cookie->drc_fromsnapobj = in recv_begin_check_existing_impl()
500 ds->ds_prev->ds_object; in recv_begin_check_existing_impl()
519 if (!drba->drba_cookie->drc_force) in recv_begin_check_existing_impl()
523 * We don't support using zfs recv -F to blow away in recv_begin_check_existing_impl()
539 ds->ds_dir->dd_parent, drba->drba_dcp, in recv_begin_check_existing_impl()
580 * un-embed / un-mooch / split up the blocks / dnodes during the in recv_begin_check_feature_flags_impl()
625 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; in dmu_recv_begin_check()
626 uint64_t fromguid = drrb->drr_fromguid; in dmu_recv_begin_check()
627 int flags = drrb->drr_flags; in dmu_recv_begin_check()
630 uint64_t featureflags = drba->drba_cookie->drc_featureflags; in dmu_recv_begin_check()
632 const char *tofs = drba->drba_cookie->drc_tofs; in dmu_recv_begin_check()
635 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); in dmu_recv_begin_check()
638 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == in dmu_recv_begin_check()
640 drrb->drr_type >= DMU_OST_NUMTYPES || in dmu_recv_begin_check()
641 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) in dmu_recv_begin_check()
644 error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa); in dmu_recv_begin_check()
649 if (drba->drba_cookie->drc_resumable && in dmu_recv_begin_check()
650 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) in dmu_recv_begin_check()
655 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION)) in dmu_recv_begin_check()
671 if (drba->drba_dcp == NULL || in dmu_recv_begin_check()
672 drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) { in dmu_recv_begin_check()
682 if (flags & DRR_FLAG_CLONE || drba->drba_origin) { in dmu_recv_begin_check()
696 if (drba->drba_cookie->drc_heal == B_TRUE) in dmu_recv_begin_check()
700 * If it's a non-clone incremental, we are missing the in dmu_recv_begin_check()
704 drba->drba_origin)) in dmu_recv_begin_check()
712 if (fromguid == 0 && drba->drba_origin != NULL && in dmu_recv_begin_check()
718 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); in dmu_recv_begin_check()
724 drba->drba_origin == NULL) { in dmu_recv_begin_check()
734 error = dmu_objset_create_crypt_check(ds->ds_dir, in dmu_recv_begin_check()
735 drba->drba_dcp, &will_encrypt); in dmu_recv_begin_check()
753 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, in dmu_recv_begin_check()
754 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); in dmu_recv_begin_check()
760 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, in dmu_recv_begin_check()
761 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); in dmu_recv_begin_check()
778 if (drba->drba_origin != NULL) { in dmu_recv_begin_check()
780 error = dsl_dataset_hold_flags(dp, drba->drba_origin, in dmu_recv_begin_check()
786 if (!origin->ds_is_snapshot) { in dmu_recv_begin_check()
791 if (dsl_dataset_phys(origin)->ds_guid != fromguid && in dmu_recv_begin_check()
798 if (origin->ds_dir->dd_crypto_obj != 0 && in dmu_recv_begin_check()
842 objset_t *mos = dp->dp_meta_objset; in dmu_recv_begin_sync() local
843 dmu_recv_cookie_t *drc = drba->drba_cookie; in dmu_recv_begin_sync()
844 struct drr_begin *drrb = drc->drc_drrb; in dmu_recv_begin_sync()
845 const char *tofs = drc->drc_tofs; in dmu_recv_begin_sync()
846 uint64_t featureflags = drc->drc_featureflags; in dmu_recv_begin_sync()
854 dsl_crypto_params_t *dcp = drba->drba_dcp; in dmu_recv_begin_sync()
856 if (drrb->drr_flags & DRR_FLAG_CI_DATA) in dmu_recv_begin_sync()
863 * Raw, non-incremental recvs always use a dummy dcp with in dmu_recv_begin_sync()
867 if (dcp == NULL && drrb->drr_fromguid == 0 && in dmu_recv_begin_sync()
868 drba->drba_origin == NULL) { in dmu_recv_begin_sync()
873 dcp->cp_cmd = DCP_CMD_RAW_RECV; in dmu_recv_begin_sync()
881 if (drba->drba_cookie->drc_fromsnapobj != 0) { in dmu_recv_begin_sync()
883 drba->drba_cookie->drc_fromsnapobj, FTAG, &snap)); in dmu_recv_begin_sync()
886 if (drc->drc_heal) { in dmu_recv_begin_sync()
888 VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap, in dmu_recv_begin_sync()
891 dsobj = dsl_dataset_create_sync(ds->ds_dir, in dmu_recv_begin_sync()
892 recv_clone_name, snap, crflags, drba->drba_cred, in dmu_recv_begin_sync()
895 if (drba->drba_cookie->drc_fromsnapobj != 0) in dmu_recv_begin_sync()
905 if (drba->drba_origin != NULL) { in dmu_recv_begin_sync()
906 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, in dmu_recv_begin_sync()
913 origin, crflags, drba->drba_cred, dcp, tx); in dmu_recv_begin_sync()
917 drc->drc_newfs = B_TRUE; in dmu_recv_begin_sync()
934 if (drc->drc_resumable) { in dmu_recv_begin_sync()
936 if (drrb->drr_fromguid != 0) { in dmu_recv_begin_sync()
937 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, in dmu_recv_begin_sync()
938 8, 1, &drrb->drr_fromguid, tx)); in dmu_recv_begin_sync()
940 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, in dmu_recv_begin_sync()
941 8, 1, &drrb->drr_toguid, tx)); in dmu_recv_begin_sync()
942 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, in dmu_recv_begin_sync()
943 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); in dmu_recv_begin_sync()
946 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, in dmu_recv_begin_sync()
948 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, in dmu_recv_begin_sync()
950 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, in dmu_recv_begin_sync()
953 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK, in dmu_recv_begin_sync()
957 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, in dmu_recv_begin_sync()
961 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK, in dmu_recv_begin_sync()
965 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK, in dmu_recv_begin_sync()
971 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl, in dmu_recv_begin_sync()
974 VERIFY0(zap_add(mos, dsobj, in dmu_recv_begin_sync()
982 * Usually the os->os_encrypted value is tied to the presence of a in dmu_recv_begin_sync()
987 os->os_encrypted = B_TRUE; in dmu_recv_begin_sync()
988 drba->drba_cookie->drc_raw = B_TRUE; in dmu_recv_begin_sync()
994 VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl, in dmu_recv_begin_sync()
1018 dmu_buf_will_dirty(newds->ds_dbuf, tx); in dmu_recv_begin_sync()
1019 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; in dmu_recv_begin_sync()
1026 dsl_dataset_activate_feature(newds->ds_object, in dmu_recv_begin_sync()
1028 newds->ds_feature[SPA_FEATURE_LONGNAME] = (void *)B_TRUE; in dmu_recv_begin_sync()
1032 * If we actually created a non-clone, we need to create the objset in dmu_recv_begin_sync()
1037 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG); in dmu_recv_begin_sync()
1040 !drc->drc_heal) { in dmu_recv_begin_sync()
1041 (void) dmu_objset_create_impl(dp->dp_spa, in dmu_recv_begin_sync()
1042 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); in dmu_recv_begin_sync()
1044 rrw_exit(&newds->ds_bp_rwlock, FTAG); in dmu_recv_begin_sync()
1046 drba->drba_cookie->drc_ds = newds; in dmu_recv_begin_sync()
1047 drba->drba_cookie->drc_os = os; in dmu_recv_begin_sync()
1056 dmu_recv_cookie_t *drc = drba->drba_cookie; in dmu_recv_resume_begin_check()
1058 struct drr_begin *drrb = drc->drc_drrb; in dmu_recv_resume_begin_check()
1062 const char *tofs = drc->drc_tofs; in dmu_recv_resume_begin_check()
1065 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); in dmu_recv_resume_begin_check()
1066 ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING); in dmu_recv_resume_begin_check()
1068 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == in dmu_recv_resume_begin_check()
1070 drrb->drr_type >= DMU_OST_NUMTYPES) in dmu_recv_resume_begin_check()
1077 error = recv_begin_check_feature_flags_impl(drc->drc_featureflags, in dmu_recv_resume_begin_check()
1078 dp->dp_spa); in dmu_recv_resume_begin_check()
1088 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) { in dmu_recv_resume_begin_check()
1090 if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)) in dmu_recv_resume_begin_check()
1109 if (recvexist && drrb->drr_fromguid == 0 && !drc->drc_force) { in dmu_recv_resume_begin_check()
1126 error = zap_lookup(dp->dp_meta_objset, ds->ds_object, in dmu_recv_resume_begin_check()
1128 if (error != 0 || drrb->drr_toguid != val) { in dmu_recv_resume_begin_check()
1144 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { in dmu_recv_resume_begin_check()
1156 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, in dmu_recv_resume_begin_check()
1158 if (drrb->drr_fromguid != val) { in dmu_recv_resume_begin_check()
1163 if (ds->ds_prev != NULL && drrb->drr_fromguid != 0) in dmu_recv_resume_begin_check()
1164 drc->drc_fromsnapobj = ds->ds_prev->ds_object; in dmu_recv_resume_begin_check()
1171 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) { in dmu_recv_resume_begin_check()
1178 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl, in dmu_recv_resume_begin_check()
1201 error = recv_check_large_blocks(ds, drc->drc_featureflags); in dmu_recv_resume_begin_check()
1216 const char *tofs = drba->drba_cookie->drc_tofs; in dmu_recv_resume_begin_sync()
1217 uint64_t featureflags = drba->drba_cookie->drc_featureflags; in dmu_recv_resume_begin_sync()
1227 drba->drba_cookie->drc_raw = B_TRUE; in dmu_recv_resume_begin_sync()
1237 drba->drba_cookie->drc_newfs = B_TRUE; in dmu_recv_resume_begin_sync()
1241 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); in dmu_recv_resume_begin_sync()
1243 drba->drba_cookie->drc_raw); in dmu_recv_resume_begin_sync()
1244 rrw_exit(&ds->ds_bp_rwlock, FTAG); in dmu_recv_resume_begin_sync()
1246 drba->drba_cookie->drc_ds = ds; in dmu_recv_resume_begin_sync()
1247 VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os)); in dmu_recv_resume_begin_sync()
1248 drba->drba_cookie->drc_should_save = B_TRUE; in dmu_recv_resume_begin_sync()
1271 drc->drc_drr_begin = drr_begin; in dmu_recv_begin()
1272 drc->drc_drrb = &drr_begin->drr_u.drr_begin; in dmu_recv_begin()
1273 drc->drc_tosnap = tosnap; in dmu_recv_begin()
1274 drc->drc_tofs = tofs; in dmu_recv_begin()
1275 drc->drc_force = force; in dmu_recv_begin()
1276 drc->drc_heal = heal; in dmu_recv_begin()
1277 drc->drc_resumable = resumable; in dmu_recv_begin()
1278 drc->drc_cred = cr; in dmu_recv_begin()
1279 drc->drc_clone = (origin != NULL); in dmu_recv_begin()
1281 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { in dmu_recv_begin()
1282 drc->drc_byteswap = B_TRUE; in dmu_recv_begin()
1284 sizeof (dmu_replay_record_t), &drc->drc_cksum); in dmu_recv_begin()
1286 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { in dmu_recv_begin()
1288 sizeof (dmu_replay_record_t), &drc->drc_cksum); in dmu_recv_begin()
1291 drc->drc_cred = NULL; in dmu_recv_begin()
1295 drc->drc_fp = fp; in dmu_recv_begin()
1296 drc->drc_voff = *voffp; in dmu_recv_begin()
1297 drc->drc_featureflags = in dmu_recv_begin()
1298 DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); in dmu_recv_begin()
1300 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen; in dmu_recv_begin()
1310 drc->drc_cred = NULL; in dmu_recv_begin()
1329 drc->drc_cred = NULL; in dmu_recv_begin()
1332 err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl, in dmu_recv_begin()
1336 kmem_free(drc->drc_next_rrd, in dmu_recv_begin()
1337 sizeof (*drc->drc_next_rrd)); in dmu_recv_begin()
1339 drc->drc_cred = NULL; in dmu_recv_begin()
1344 if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK) in dmu_recv_begin()
1345 drc->drc_spill = B_TRUE; in dmu_recv_begin()
1349 drba.drba_cred = drc->drc_cred; in dmu_recv_begin()
1351 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) { in dmu_recv_begin()
1357 * For non-raw, non-incremental, non-resuming receives the in dmu_recv_begin()
1359 * with "zfs recv -o". For these receives we create a dcp and in dmu_recv_begin()
1363 * read-only properties. Any other kind of receive that in dmu_recv_begin()
1366 if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & in dmu_recv_begin()
1368 origin == NULL && drc->drc_drrb->drr_fromguid == 0) { in dmu_recv_begin()
1382 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); in dmu_recv_begin()
1383 nvlist_free(drc->drc_begin_nvl); in dmu_recv_begin()
1385 drc->drc_cred = NULL; in dmu_recv_begin()
1402 cr_cb_data_t *data = zio->io_private; in corrective_read_done()
1404 if (zio->io_error == 0) { in corrective_read_done()
1405 spa_remove_error(data->spa, &data->zb, in corrective_read_done()
1406 BP_GET_LOGICAL_BIRTH(zio->io_bp)); in corrective_read_done()
1409 abd_free(zio->io_abd); in corrective_read_done()
1423 abd_t *abd = rrd->abd; in do_corrective_recv()
1424 zio_cksum_t bp_cksum = bp->blk_cksum; in do_corrective_recv()
1428 if (rwa->raw) in do_corrective_recv()
1431 err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn); in do_corrective_recv()
1434 SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0, in do_corrective_recv()
1435 dbuf_whichblock(dn, 0, drrw->drr_offset)); in do_corrective_recv()
1438 if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) { in do_corrective_recv()
1441 drrw->drr_logical_size, B_FALSE); in do_corrective_recv()
1442 err = zio_decompress_data(drrw->drr_compressiontype, in do_corrective_recv()
1455 if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { in do_corrective_recv()
1461 rwa->os->os_complevel); in do_corrective_recv()
1462 abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize); in do_corrective_recv()
1470 * The stream is not encrypted but the data on-disk is. in do_corrective_recv()
1471 * We need to re-encrypt the buf using the same in do_corrective_recv()
1475 if (!rwa->raw && BP_USES_CRYPT(bp)) { in do_corrective_recv()
1482 dsl_pool_t *dp = dmu_objset_pool(rwa->os); in do_corrective_recv()
1489 err = dsl_dataset_hold_flags(dp, rwa->tofs, in do_corrective_recv()
1498 err = spa_keystore_lookup_key(rwa->os->os_spa, in do_corrective_recv()
1508 err = zio_do_crypt_abd(B_TRUE, &dck->dck_key, in do_corrective_recv()
1512 spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG); in do_corrective_recv()
1531 rrd->abd = abd; in do_corrective_recv()
1533 io = zio_rewrite(NULL, rwa->os->os_spa, BP_GET_LOGICAL_BIRTH(bp), bp, in do_corrective_recv()
1542 if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) { in do_corrective_recv()
1554 cb_data->spa = rwa->os->os_spa; in do_corrective_recv()
1555 cb_data->size = drrw->drr_logical_size; in do_corrective_recv()
1556 cb_data->zb = zb; in do_corrective_recv()
1557 /* Test if healing worked by re-reading the bp */ in do_corrective_recv()
1558 err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp, in do_corrective_recv()
1559 abd_alloc_for_io(drrw->drr_logical_size, B_FALSE), in do_corrective_recv()
1560 drrw->drr_logical_size, corrective_read_done, in do_corrective_recv()
1579 (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0); in receive_read()
1582 ssize_t resid = len - done; in receive_read()
1583 zfs_file_t *fp = drc->drc_fp; in receive_read()
1585 len - done, &resid); in receive_read()
1586 if (err == 0 && resid == len - done) { in receive_read()
1594 drc->drc_voff += len - done - resid; in receive_read()
1595 done = len - resid; in receive_read()
1600 drc->drc_bytes_read += len; in receive_read()
1613 ((DN_OLD_MAX_BONUSLEN - in deduce_nblkptr()
1624 if (!rwa->resumable) in save_resume_state()
1631 ASSERT(rwa->bytes_read != 0); in save_resume_state()
1635 * (non-meta-dnode) object number. in save_resume_state()
1644 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); in save_resume_state()
1645 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || in save_resume_state()
1646 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); in save_resume_state()
1647 ASSERT3U(rwa->bytes_read, >=, in save_resume_state()
1648 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); in save_resume_state()
1650 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; in save_resume_state()
1651 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; in save_resume_state()
1652 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; in save_resume_state()
1667 err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data, in receive_object_is_same_generation()
1689 uint32_t indblksz = drro->drr_indblkshift ? in receive_handle_existing_object()
1690 1ULL << drro->drr_indblkshift : 0; in receive_handle_existing_object()
1691 int nblkptr = deduce_nblkptr(drro->drr_bonustype, in receive_handle_existing_object()
1692 drro->drr_bonuslen); in receive_handle_existing_object()
1693 uint8_t dn_slots = drro->drr_dn_slots != 0 ? in receive_handle_existing_object()
1694 drro->drr_dn_slots : DNODE_MIN_SLOTS; in receive_handle_existing_object()
1698 *object_to_hold = drro->drr_object; in receive_handle_existing_object()
1701 if (rwa->raw && nblkptr != drro->drr_nblkptr) in receive_handle_existing_object()
1706 * have freed this object, and then happened to re-allocate in receive_handle_existing_object()
1715 * If this is a -L (--large-block) incremental stream, and in receive_handle_existing_object()
1716 * the previous stream was not -L, the block size may appear in receive_handle_existing_object()
1738 if (drro->drr_blksz != doi->doi_data_block_size) { in receive_handle_existing_object()
1739 if (rwa->raw) { in receive_handle_existing_object()
1743 * due to changing --large-block to be on. in receive_handle_existing_object()
1750 } else if (rwa->full) { in receive_handle_existing_object()
1760 } else if (drro->drr_type != in receive_handle_existing_object()
1762 doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) { in receive_handle_existing_object()
1773 } else if (doi->doi_max_offset <= in receive_handle_existing_object()
1774 doi->doi_data_block_size) { in receive_handle_existing_object()
1778 * WRITE record. This can not be the no-L -> in receive_handle_existing_object()
1779 * -L case, because the no-L case would have in receive_handle_existing_object()
1781 * supported -L -> no-L, it would not be safe in receive_handle_existing_object()
1789 err = receive_object_is_same_generation(rwa->os, in receive_handle_existing_object()
1790 drro->drr_object, doi->doi_bonus_type, in receive_handle_existing_object()
1791 drro->drr_bonustype, bonus_data, &is_same_gen); in receive_handle_existing_object()
1800 * --large-block was changed to be in receive_handle_existing_object()
1804 if (drro->drr_blksz <= in receive_handle_existing_object()
1805 doi->doi_data_block_size) in receive_handle_existing_object()
1812 doi->doi_data_block_size; in receive_handle_existing_object()
1820 if (nblkptr < doi->doi_nblkptr) in receive_handle_existing_object()
1824 if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) in receive_handle_existing_object()
1831 * - A changed indirect block size in receive_handle_existing_object()
1832 * - A smaller nlevels in receive_handle_existing_object()
1834 if (rwa->raw) { in receive_handle_existing_object()
1835 if (indblksz != doi->doi_metadata_block_size) in receive_handle_existing_object()
1837 if (drro->drr_nlevels < doi->doi_indirection) in receive_handle_existing_object()
1842 err = dmu_free_long_range(rwa->os, drro->drr_object, in receive_handle_existing_object()
1851 * number of of dnode slots on an object. For non-raw sends this in receive_handle_existing_object()
1859 if ((rwa->raw && ((doi->doi_indirection > 1 && in receive_handle_existing_object()
1860 indblksz != doi->doi_metadata_block_size) || in receive_handle_existing_object()
1861 drro->drr_nlevels < doi->doi_indirection)) || in receive_handle_existing_object()
1862 dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) { in receive_handle_existing_object()
1863 err = dmu_free_long_object(rwa->os, drro->drr_object); in receive_handle_existing_object()
1867 txg_wait_synced(dmu_objset_pool(rwa->os), 0); in receive_handle_existing_object()
1884 if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) { in receive_handle_existing_object()
1885 err = dmu_free_long_range(rwa->os, drro->drr_object, in receive_handle_existing_object()
1886 (drro->drr_maxblkid + 1) * doi->doi_data_block_size, in receive_handle_existing_object()
1901 uint32_t new_blksz = drro->drr_blksz; in receive_object()
1902 uint8_t dn_slots = drro->drr_dn_slots != 0 ? in receive_object()
1903 drro->drr_dn_slots : DNODE_MIN_SLOTS; in receive_object()
1905 if (drro->drr_type == DMU_OT_NONE || in receive_object()
1906 !DMU_OT_IS_VALID(drro->drr_type) || in receive_object()
1907 !DMU_OT_IS_VALID(drro->drr_bonustype) || in receive_object()
1908 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || in receive_object()
1909 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || in receive_object()
1910 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || in receive_object()
1911 drro->drr_blksz < SPA_MINBLOCKSIZE || in receive_object()
1912 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || in receive_object()
1913 drro->drr_bonuslen > in receive_object()
1914 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) || in receive_object()
1916 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) { in receive_object()
1920 if (rwa->raw) { in receive_object()
1925 if (drro->drr_object < rwa->or_firstobj || in receive_object()
1926 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots || in receive_object()
1927 drro->drr_raw_bonuslen < drro->drr_bonuslen || in receive_object()
1928 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT || in receive_object()
1929 drro->drr_nlevels > DN_MAX_LEVELS || in receive_object()
1930 drro->drr_nblkptr > DN_MAX_NBLKPTR || in receive_object()
1932 drro->drr_raw_bonuslen) in receive_object()
1939 if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) || in receive_object()
1940 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) { in receive_object()
1944 if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 || in receive_object()
1945 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) { in receive_object()
1950 err = dmu_object_info(rwa->os, drro->drr_object, &doi); in receive_object()
1955 if (drro->drr_object > rwa->max_object) in receive_object()
1956 rwa->max_object = drro->drr_object; in receive_object()
1974 * multi-slot dnode. This will be resolved when the next txg in receive_object()
1979 txg_wait_synced(dmu_objset_pool(rwa->os), 0); in receive_object()
1981 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT) in receive_object()
1993 if (rwa->or_need_sync == ORNS_YES) in receive_object()
1994 txg_wait_synced(dmu_objset_pool(rwa->os), 0); in receive_object()
2001 rwa->or_need_sync = ORNS_NO; in receive_object()
2004 * If this is a multi-slot dnode there is a chance that this in receive_object()
2012 for (uint64_t slot = drro->drr_object + 1; in receive_object()
2013 slot < drro->drr_object + dn_slots; in receive_object()
2017 err = dmu_object_info(rwa->os, slot, &slot_doi); in receive_object()
2023 err = dmu_free_long_object(rwa->os, slot); in receive_object()
2031 txg_wait_synced(dmu_objset_pool(rwa->os), 0); in receive_object()
2034 tx = dmu_tx_create(rwa->os); in receive_object()
2045 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object, in receive_object()
2046 drro->drr_type, new_blksz, in receive_object()
2047 drro->drr_bonustype, drro->drr_bonuslen, in receive_object()
2049 } else if (drro->drr_type != doi.doi_type || in receive_object()
2051 drro->drr_bonustype != doi.doi_bonus_type || in receive_object()
2052 drro->drr_bonuslen != doi.doi_bonus_size) { in receive_object()
2054 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object, in receive_object()
2055 drro->drr_type, new_blksz, in receive_object()
2056 drro->drr_bonustype, drro->drr_bonuslen, in receive_object()
2057 dn_slots << DNODE_SHIFT, rwa->spill ? in receive_object()
2058 DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx); in receive_object()
2059 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) { in receive_object()
2065 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx); in receive_object()
2073 if (rwa->or_crypt_params_present) { in receive_object()
2081 * the dnode block to end up on-disk with the incorrect MAC, in receive_object()
2084 * inconsistent, no code paths will do a non-raw read (or in receive_object()
2090 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE; in receive_object()
2092 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os), in receive_object()
2099 dmu_buf_set_crypt_params(db, rwa->or_byteorder, in receive_object()
2100 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx); in receive_object()
2104 rwa->or_crypt_params_present = B_FALSE; in receive_object()
2107 dmu_object_set_checksum(rwa->os, drro->drr_object, in receive_object()
2108 drro->drr_checksumtype, tx); in receive_object()
2109 dmu_object_set_compress(rwa->os, drro->drr_object, in receive_object()
2110 drro->drr_compress, tx); in receive_object()
2113 if (rwa->raw) { in receive_object()
2118 * For non-new objects block size and indirect block in receive_object()
2121 ASSERT3U(new_blksz, ==, drro->drr_blksz); in receive_object()
2122 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object, in receive_object()
2123 drro->drr_blksz, drro->drr_indblkshift, tx)); in receive_object()
2124 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object, in receive_object()
2125 drro->drr_nlevels, tx)); in receive_object()
2131 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object, in receive_object()
2132 drro->drr_maxblkid, tx)); in receive_object()
2140 if (rwa->raw) in receive_object()
2143 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn)); in receive_object()
2148 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); in receive_object()
2149 memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro)); in receive_object()
2155 if (rwa->byteswap && !rwa->raw) { in receive_object()
2157 DMU_OT_BYTESWAP(drro->drr_bonustype); in receive_object()
2158 dmu_ot_byteswap[byteswap].ob_func(db->db_data, in receive_object()
2172 save_resume_state(rwa, drro->drr_object, 0, tx); in receive_object()
2186 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) in receive_freeobjects()
2189 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj; in receive_freeobjects()
2190 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && in receive_freeobjects()
2192 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { in receive_freeobjects()
2196 err = dmu_object_info(rwa->os, obj, &doi); in receive_freeobjects()
2202 err = dmu_free_long_object(rwa->os, obj); in receive_freeobjects()
2207 if (rwa->or_need_sync == ORNS_MAYBE) in receive_freeobjects()
2208 rwa->or_need_sync = ORNS_YES; in receive_freeobjects()
2217 * rwa->write_batch list.
2225 if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0) in flush_write_batch_impl()
2228 struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch); in flush_write_batch_impl()
2229 struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write; in flush_write_batch_impl()
2231 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch); in flush_write_batch_impl()
2232 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write; in flush_write_batch_impl()
2234 ASSERT3U(rwa->last_object, ==, last_drrw->drr_object); in flush_write_batch_impl()
2235 ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset); in flush_write_batch_impl()
2237 dmu_tx_t *tx = dmu_tx_create(rwa->os); in flush_write_batch_impl()
2238 dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset, in flush_write_batch_impl()
2239 last_drrw->drr_offset - first_drrw->drr_offset + in flush_write_batch_impl()
2240 last_drrw->drr_logical_size); in flush_write_batch_impl()
2249 while ((rrd = list_head(&rwa->write_batch)) != NULL) { in flush_write_batch_impl()
2250 struct drr_write *drrw = &rrd->header.drr_u.drr_write; in flush_write_batch_impl()
2251 abd_t *abd = rrd->abd; in flush_write_batch_impl()
2253 ASSERT3U(drrw->drr_object, ==, rwa->last_object); in flush_write_batch_impl()
2255 if (drrw->drr_logical_size != dn->dn_datablksz) { in flush_write_batch_impl()
2259 * large-block stream into a dataset that previously did in flush_write_batch_impl()
2260 * a non-large-block receive. Lightweight writes must in flush_write_batch_impl()
2264 ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz); in flush_write_batch_impl()
2267 abd_alloc_linear(drrw->drr_logical_size, in flush_write_batch_impl()
2271 drrw->drr_compressiontype, in flush_write_batch_impl()
2278 drrw->drr_offset, in flush_write_batch_impl()
2279 drrw->drr_logical_size, in flush_write_batch_impl()
2287 drrw->drr_offset, in flush_write_batch_impl()
2288 drrw->drr_logical_size, in flush_write_batch_impl()
2297 dmu_write_policy(rwa->os, dn, 0, 0, &zp); in flush_write_batch_impl()
2301 if (rwa->raw) { in flush_write_batch_impl()
2303 zp.zp_compress = drrw->drr_compressiontype; in flush_write_batch_impl()
2305 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^ in flush_write_batch_impl()
2306 rwa->byteswap; in flush_write_batch_impl()
2307 memcpy(zp.zp_salt, drrw->drr_salt, in flush_write_batch_impl()
2309 memcpy(zp.zp_iv, drrw->drr_iv, in flush_write_batch_impl()
2311 memcpy(zp.zp_mac, drrw->drr_mac, in flush_write_batch_impl()
2316 SPA_DVAS_PER_BP - 1); in flush_write_batch_impl()
2319 SPA_DVAS_PER_BP - 1); in flush_write_batch_impl()
2323 ASSERT3U(drrw->drr_compressed_size, >, 0); in flush_write_batch_impl()
2324 ASSERT3U(drrw->drr_logical_size, >=, in flush_write_batch_impl()
2325 drrw->drr_compressed_size); in flush_write_batch_impl()
2326 zp.zp_compress = drrw->drr_compressiontype; in flush_write_batch_impl()
2328 } else if (rwa->byteswap) { in flush_write_batch_impl()
2339 DMU_OT_BYTESWAP(drrw->drr_type); in flush_write_batch_impl()
2351 drrw->drr_offset, abd, &zp, zio_flags, tx); in flush_write_batch_impl()
2368 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); in flush_write_batch_impl()
2370 list_remove(&rwa->write_batch, rrd); in flush_write_batch_impl()
2382 if (list_is_empty(&rwa->write_batch)) in flush_write_batch()
2384 int err = rwa->err; in flush_write_batch()
2389 while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) { in flush_write_batch()
2390 abd_free(rrd->abd); in flush_write_batch()
2394 ASSERT(list_is_empty(&rwa->write_batch)); in flush_write_batch()
2404 ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE); in receive_process_write_record()
2405 struct drr_write *drrw = &rrd->header.drr_u.drr_write; in receive_process_write_record()
2407 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset || in receive_process_write_record()
2408 !DMU_OT_IS_VALID(drrw->drr_type)) in receive_process_write_record()
2411 if (rwa->heal) { in receive_process_write_record()
2416 if (rwa->raw) in receive_process_write_record()
2419 if (rwa->byteswap) { in receive_process_write_record()
2421 DMU_OT_BYTESWAP(drrw->drr_type); in receive_process_write_record()
2422 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd), in receive_process_write_record()
2426 err = dmu_buf_hold_noread(rwa->os, drrw->drr_object, in receive_process_write_record()
2427 drrw->drr_offset, FTAG, &dbp); in receive_process_write_record()
2445 /* Make sure the on-disk block and recv record sizes match */ in receive_process_write_record()
2446 if (drrw->drr_logical_size != dbp->db_size) { in receive_process_write_record()
2462 if (drrw->drr_object < rwa->last_object || in receive_process_write_record()
2463 (drrw->drr_object == rwa->last_object && in receive_process_write_record()
2464 drrw->drr_offset < rwa->last_offset)) { in receive_process_write_record()
2468 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch); in receive_process_write_record()
2469 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write; in receive_process_write_record()
2473 (drrw->drr_object != first_drrw->drr_object || in receive_process_write_record()
2474 drrw->drr_offset >= first_drrw->drr_offset + batch_size)) { in receive_process_write_record()
2480 rwa->last_object = drrw->drr_object; in receive_process_write_record()
2481 rwa->last_offset = drrw->drr_offset; in receive_process_write_record()
2483 if (rwa->last_object > rwa->max_object) in receive_process_write_record()
2484 rwa->max_object = rwa->last_object; in receive_process_write_record()
2486 list_insert_tail(&rwa->write_batch, rrd); in receive_process_write_record()
2501 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) in receive_write_embedded()
2504 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) in receive_write_embedded()
2507 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) in receive_write_embedded()
2509 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) in receive_write_embedded()
2511 if (rwa->raw) in receive_write_embedded()
2514 if (drrwe->drr_object > rwa->max_object) in receive_write_embedded()
2515 rwa->max_object = drrwe->drr_object; in receive_write_embedded()
2517 tx = dmu_tx_create(rwa->os); in receive_write_embedded()
2519 dmu_tx_hold_write(tx, drrwe->drr_object, in receive_write_embedded()
2520 drrwe->drr_offset, drrwe->drr_length); in receive_write_embedded()
2527 dmu_write_embedded(rwa->os, drrwe->drr_object, in receive_write_embedded()
2528 drrwe->drr_offset, data, drrwe->drr_etype, in receive_write_embedded()
2529 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, in receive_write_embedded()
2530 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); in receive_write_embedded()
2533 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); in receive_write_embedded()
2545 if (drrs->drr_length < SPA_MINBLOCKSIZE || in receive_spill()
2546 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) in receive_spill()
2555 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) { in receive_spill()
2560 if (rwa->raw) { in receive_spill()
2561 if (!DMU_OT_IS_VALID(drrs->drr_type) || in receive_spill()
2562 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS || in receive_spill()
2563 drrs->drr_compressed_size == 0) in receive_spill()
2567 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) in receive_spill()
2570 if (drrs->drr_object > rwa->max_object) in receive_spill()
2571 rwa->max_object = drrs->drr_object; in receive_spill()
2573 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); in receive_spill()
2580 dmu_tx_t *tx = dmu_tx_create(rwa->os); in receive_spill()
2582 dmu_tx_hold_spill(tx, db->db_object); in receive_spill()
2597 if (db_spill->db_size != drrs->drr_length) { in receive_spill()
2600 drrs->drr_length, tx)); in receive_spill()
2604 if (rwa->raw) { in receive_spill()
2606 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^ in receive_spill()
2607 rwa->byteswap; in receive_spill()
2609 abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os), in receive_spill()
2610 drrs->drr_object, byteorder, drrs->drr_salt, in receive_spill()
2611 drrs->drr_iv, drrs->drr_mac, drrs->drr_type, in receive_spill()
2612 drrs->drr_compressed_size, drrs->drr_length, in receive_spill()
2613 drrs->drr_compressiontype, 0); in receive_spill()
2615 abuf = arc_loan_buf(dmu_objset_spa(rwa->os), in receive_spill()
2616 DMU_OT_IS_METADATA(drrs->drr_type), in receive_spill()
2617 drrs->drr_length); in receive_spill()
2618 if (rwa->byteswap) { in receive_spill()
2620 DMU_OT_BYTESWAP(drrs->drr_type); in receive_spill()
2626 memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs)); in receive_spill()
2643 if (drrf->drr_length != -1ULL && in receive_free()
2644 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) in receive_free()
2647 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) in receive_free()
2650 if (drrf->drr_object > rwa->max_object) in receive_free()
2651 rwa->max_object = drrf->drr_object; in receive_free()
2653 err = dmu_free_long_range(rwa->os, drrf->drr_object, in receive_free()
2654 drrf->drr_offset, drrf->drr_length); in receive_free()
2666 * the send stream is byteswapped (rwa->byteswap). Finally, in receive_object_range()
2668 * in non-native format on the send side. in receive_object_range()
2670 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^ in receive_object_range()
2671 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags); in receive_object_range()
2676 * sending and receiving sides for the time being. For non-raw sends, in receive_object_range()
2684 if (drror->drr_numslots != DNODES_PER_BLOCK || in receive_object_range()
2685 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 || in receive_object_range()
2686 !rwa->raw) in receive_object_range()
2689 if (drror->drr_firstobj > rwa->max_object) in receive_object_range()
2690 rwa->max_object = drror->drr_firstobj; in receive_object_range()
2697 rwa->or_crypt_params_present = B_TRUE; in receive_object_range()
2698 rwa->or_firstobj = drror->drr_firstobj; in receive_object_range()
2699 rwa->or_numslots = drror->drr_numslots; in receive_object_range()
2700 memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN); in receive_object_range()
2701 memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN); in receive_object_range()
2702 memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN); in receive_object_range()
2703 rwa->or_byteorder = byteorder; in receive_object_range()
2705 rwa->or_need_sync = ORNS_MAYBE; in receive_object_range()
2718 drrf.drr_length = drrr->drr_length; in receive_redact()
2719 drrf.drr_object = drrr->drr_object; in receive_redact()
2720 drrf.drr_offset = drrr->drr_offset; in receive_redact()
2721 drrf.drr_toguid = drrr->drr_toguid; in receive_redact()
2729 dsl_dataset_t *ds = drc->drc_ds; in dmu_recv_cleanup_ds()
2732 dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; in dmu_recv_cleanup_ds()
2740 txg_wait_synced(ds->ds_dir->dd_pool, 0); in dmu_recv_cleanup_ds()
2741 ds->ds_objset->os_raw_receive = B_FALSE; in dmu_recv_cleanup_ds()
2743 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); in dmu_recv_cleanup_ds()
2744 if (drc->drc_resumable && drc->drc_should_save && in dmu_recv_cleanup_ds()
2746 rrw_exit(&ds->ds_bp_rwlock, FTAG); in dmu_recv_cleanup_ds()
2750 rrw_exit(&ds->ds_bp_rwlock, FTAG); in dmu_recv_cleanup_ds()
2753 if (!drc->drc_heal) in dmu_recv_cleanup_ds()
2761 if (drc->drc_byteswap) { in receive_cksum()
2763 &drc->drc_cksum); in receive_cksum()
2765 (void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum); in receive_cksum()
2772 * Allocate drc->drc_next_rrd and read the next record's header into
2773 * drc->drc_next_rrd->header.
2789 if (drc->drc_rrd != NULL) { in receive_read_payload_and_next_header()
2790 drc->drc_rrd->payload = buf; in receive_read_payload_and_next_header()
2791 drc->drc_rrd->payload_size = len; in receive_read_payload_and_next_header()
2792 drc->drc_rrd->bytes_read = drc->drc_bytes_read; in receive_read_payload_and_next_header()
2798 drc->drc_prev_cksum = drc->drc_cksum; in receive_read_payload_and_next_header()
2800 drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP); in receive_read_payload_and_next_header()
2801 err = receive_read(drc, sizeof (drc->drc_next_rrd->header), in receive_read_payload_and_next_header()
2802 &drc->drc_next_rrd->header); in receive_read_payload_and_next_header()
2803 drc->drc_next_rrd->bytes_read = drc->drc_bytes_read; in receive_read_payload_and_next_header()
2806 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); in receive_read_payload_and_next_header()
2807 drc->drc_next_rrd = NULL; in receive_read_payload_and_next_header()
2810 if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) { in receive_read_payload_and_next_header()
2811 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); in receive_read_payload_and_next_header()
2812 drc->drc_next_rrd = NULL; in receive_read_payload_and_next_header()
2821 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); in receive_read_payload_and_next_header()
2824 &drc->drc_next_rrd->header); in receive_read_payload_and_next_header()
2827 drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum; in receive_read_payload_and_next_header()
2829 &drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum; in receive_read_payload_and_next_header()
2831 if (drc->drc_byteswap) in receive_read_payload_and_next_header()
2832 byteswap_record(&drc->drc_next_rrd->header); in receive_read_payload_and_next_header()
2835 !ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) { in receive_read_payload_and_next_header()
2836 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); in receive_read_payload_and_next_header()
2837 drc->drc_next_rrd = NULL; in receive_read_payload_and_next_header()
2867 if (!objlist_exists(drc->drc_ignore_objlist, object)) { in receive_read_prefetch()
2868 dmu_prefetch(drc->drc_os, object, 1, offset, length, in receive_read_prefetch()
2881 switch (drc->drc_rrd->header.drr_type) { in receive_read_record()
2885 &drc->drc_rrd->header.drr_u.drr_object; in receive_read_record()
2898 err = dmu_object_info(drc->drc_os, drro->drr_object, &doi); in receive_read_record()
2904 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { in receive_read_record()
2905 objlist_insert(drc->drc_ignore_objlist, in receive_read_record()
2906 drro->drr_object); in receive_read_record()
2918 struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write; in receive_read_record()
2927 drc->drc_rrd->abd = abd; in receive_read_record()
2928 receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset, in receive_read_record()
2929 drrw->drr_logical_size); in receive_read_record()
2935 &drc->drc_rrd->header.drr_u.drr_write_embedded; in receive_read_record()
2936 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); in receive_read_record()
2945 receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset, in receive_read_record()
2946 drrwe->drr_length); in receive_read_record()
2961 struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end; in receive_read_record()
2962 if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum, in receive_read_record()
2963 drre->drr_checksum)) in receive_read_record()
2969 struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill; in receive_read_record()
2977 drc->drc_rrd->abd = abd; in receive_read_record()
2997 switch (rrd->header.drr_type) { in dprintf_drr()
3000 struct drr_object *drro = &rrd->header.drr_u.drr_object; in dprintf_drr()
3003 "compress = %u dn_slots = %u err = %d\n", in dprintf_drr()
3004 (u_longlong_t)drro->drr_object, drro->drr_type, in dprintf_drr()
3005 drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen, in dprintf_drr()
3006 drro->drr_checksumtype, drro->drr_compress, in dprintf_drr()
3007 drro->drr_dn_slots, err); in dprintf_drr()
3013 &rrd->header.drr_u.drr_freeobjects; in dprintf_drr()
3015 "numobjs = %llu err = %d\n", in dprintf_drr()
3016 (u_longlong_t)drrfo->drr_firstobj, in dprintf_drr()
3017 (u_longlong_t)drrfo->drr_numobjs, err); in dprintf_drr()
3022 struct drr_write *drrw = &rrd->header.drr_u.drr_write; in dprintf_drr()
3025 "compress = %u psize = %llu err = %d\n", in dprintf_drr()
3026 (u_longlong_t)drrw->drr_object, drrw->drr_type, in dprintf_drr()
3027 (u_longlong_t)drrw->drr_offset, in dprintf_drr()
3028 (u_longlong_t)drrw->drr_logical_size, in dprintf_drr()
3029 drrw->drr_checksumtype, drrw->drr_flags, in dprintf_drr()
3030 drrw->drr_compressiontype, in dprintf_drr()
3031 (u_longlong_t)drrw->drr_compressed_size, err); in dprintf_drr()
3037 &rrd->header.drr_u.drr_write_byref; in dprintf_drr()
3041 "flags = %u err = %d\n", in dprintf_drr()
3042 (u_longlong_t)drrwbr->drr_object, in dprintf_drr()
3043 (u_longlong_t)drrwbr->drr_offset, in dprintf_drr()
3044 (u_longlong_t)drrwbr->drr_length, in dprintf_drr()
3045 (u_longlong_t)drrwbr->drr_toguid, in dprintf_drr()
3046 (u_longlong_t)drrwbr->drr_refguid, in dprintf_drr()
3047 (u_longlong_t)drrwbr->drr_refobject, in dprintf_drr()
3048 (u_longlong_t)drrwbr->drr_refoffset, in dprintf_drr()
3049 drrwbr->drr_checksumtype, drrwbr->drr_flags, err); in dprintf_drr()
3055 &rrd->header.drr_u.drr_write_embedded; in dprintf_drr()
3058 "psize = %u err = %d\n", in dprintf_drr()
3059 (u_longlong_t)drrwe->drr_object, in dprintf_drr()
3060 (u_longlong_t)drrwe->drr_offset, in dprintf_drr()
3061 (u_longlong_t)drrwe->drr_length, in dprintf_drr()
3062 drrwe->drr_compression, drrwe->drr_etype, in dprintf_drr()
3063 drrwe->drr_lsize, drrwe->drr_psize, err); in dprintf_drr()
3068 struct drr_free *drrf = &rrd->header.drr_u.drr_free; in dprintf_drr()
3070 "length = %lld err = %d\n", in dprintf_drr()
3071 (u_longlong_t)drrf->drr_object, in dprintf_drr()
3072 (u_longlong_t)drrf->drr_offset, in dprintf_drr()
3073 (longlong_t)drrf->drr_length, in dprintf_drr()
3079 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; in dprintf_drr()
3081 "err = %d\n", (u_longlong_t)drrs->drr_object, in dprintf_drr()
3082 (u_longlong_t)drrs->drr_length, err); in dprintf_drr()
3088 &rrd->header.drr_u.drr_object_range; in dprintf_drr()
3090 "numslots = %llu flags = %u err = %d\n", in dprintf_drr()
3091 (u_longlong_t)drror->drr_firstobj, in dprintf_drr()
3092 (u_longlong_t)drror->drr_numslots, in dprintf_drr()
3093 drror->drr_flags, err); in dprintf_drr()
3112 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); in receive_process_record()
3113 rwa->bytes_read = rrd->bytes_read; in receive_process_record()
3116 if (rwa->heal && rrd->header.drr_type != DRR_WRITE) { in receive_process_record()
3117 if (rrd->abd != NULL) { in receive_process_record()
3118 abd_free(rrd->abd); in receive_process_record()
3119 rrd->abd = NULL; in receive_process_record()
3120 } else if (rrd->payload != NULL) { in receive_process_record()
3121 kmem_free(rrd->payload, rrd->payload_size); in receive_process_record()
3122 rrd->payload = NULL; in receive_process_record()
3127 if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) { in receive_process_record()
3130 if (rrd->abd != NULL) { in receive_process_record()
3131 abd_free(rrd->abd); in receive_process_record()
3132 rrd->abd = NULL; in receive_process_record()
3133 rrd->payload = NULL; in receive_process_record()
3134 } else if (rrd->payload != NULL) { in receive_process_record()
3135 kmem_free(rrd->payload, rrd->payload_size); in receive_process_record()
3136 rrd->payload = NULL; in receive_process_record()
3143 switch (rrd->header.drr_type) { in receive_process_record()
3146 struct drr_object *drro = &rrd->header.drr_u.drr_object; in receive_process_record()
3147 err = receive_object(rwa, drro, rrd->payload); in receive_process_record()
3148 kmem_free(rrd->payload, rrd->payload_size); in receive_process_record()
3149 rrd->payload = NULL; in receive_process_record()
3155 &rrd->header.drr_u.drr_freeobjects; in receive_process_record()
3162 if (rwa->heal) { in receive_process_record()
3164 * If healing - always free the abd after processing in receive_process_record()
3166 abd_free(rrd->abd); in receive_process_record()
3167 rrd->abd = NULL; in receive_process_record()
3170 * On success, a non-healing in receive_process_record()
3176 abd_free(rrd->abd); in receive_process_record()
3177 rrd->abd = NULL; in receive_process_record()
3184 &rrd->header.drr_u.drr_write_embedded; in receive_process_record()
3185 err = receive_write_embedded(rwa, drrwe, rrd->payload); in receive_process_record()
3186 kmem_free(rrd->payload, rrd->payload_size); in receive_process_record()
3187 rrd->payload = NULL; in receive_process_record()
3192 struct drr_free *drrf = &rrd->header.drr_u.drr_free; in receive_process_record()
3198 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; in receive_process_record()
3199 err = receive_spill(rwa, drrs, rrd->abd); in receive_process_record()
3201 abd_free(rrd->abd); in receive_process_record()
3202 rrd->abd = NULL; in receive_process_record()
3203 rrd->payload = NULL; in receive_process_record()
3209 &rrd->header.drr_u.drr_object_range; in receive_process_record()
3215 struct drr_redact *drrr = &rrd->header.drr_u.drr_redact; in receive_process_record()
3240 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; in receive_writer_thread()
3241 rrd = bqueue_dequeue(&rwa->q)) { in receive_writer_thread()
3248 if (rwa->err == 0) { in receive_writer_thread()
3250 } else if (rrd->abd != NULL) { in receive_writer_thread()
3251 abd_free(rrd->abd); in receive_writer_thread()
3252 rrd->abd = NULL; in receive_writer_thread()
3253 rrd->payload = NULL; in receive_writer_thread()
3254 } else if (rrd->payload != NULL) { in receive_writer_thread()
3255 kmem_free(rrd->payload, rrd->payload_size); in receive_writer_thread()
3256 rrd->payload = NULL; in receive_writer_thread()
3260 * raw->write_batch), and will be used again, so we don't in receive_writer_thread()
3264 if (err != EAGAIN || rwa->heal) { in receive_writer_thread()
3265 if (rwa->err == 0) in receive_writer_thread()
3266 rwa->err = err; in receive_writer_thread()
3272 if (rwa->heal) { in receive_writer_thread()
3273 zio_wait(rwa->heal_pio); in receive_writer_thread()
3276 if (rwa->err == 0) in receive_writer_thread()
3277 rwa->err = err; in receive_writer_thread()
3279 mutex_enter(&rwa->mutex); in receive_writer_thread()
3280 rwa->done = B_TRUE; in receive_writer_thread()
3281 cv_signal(&rwa->cv); in receive_writer_thread()
3282 mutex_exit(&rwa->mutex); in receive_writer_thread()
3291 objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset; in resume_check() local
3292 uint64_t dsobj = dmu_objset_id(drc->drc_os); in resume_check()
3301 VERIFY0(zap_lookup(mos, dsobj, in resume_check()
3305 VERIFY0(zap_lookup(mos, dsobj, in resume_check()
3331 if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) { in dmu_recv_stream()
3333 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, in dmu_recv_stream()
3334 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, in dmu_recv_stream()
3336 drc->drc_bytes_read += bytes; in dmu_recv_stream()
3339 drc->drc_ignore_objlist = objlist_create(); in dmu_recv_stream()
3342 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, in dmu_recv_stream()
3344 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); in dmu_recv_stream()
3346 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); in dmu_recv_stream()
3347 ASSERT0(drc->drc_os->os_encrypted && in dmu_recv_stream()
3348 (drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)); in dmu_recv_stream()
3351 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) { in dmu_recv_stream()
3354 ASSERT(drc->drc_os->os_encrypted); in dmu_recv_stream()
3355 ASSERT(drc->drc_raw); in dmu_recv_stream()
3357 err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata", in dmu_recv_stream()
3362 if (!drc->drc_heal) { in dmu_recv_stream()
3369 err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa), in dmu_recv_stream()
3370 drc->drc_ds->ds_object, drc->drc_fromsnapobj, in dmu_recv_stream()
3371 drc->drc_drrb->drr_type, keynvl, drc->drc_newfs); in dmu_recv_stream()
3377 drc->drc_ivset_guid = 0; in dmu_recv_stream()
3379 &drc->drc_ivset_guid); in dmu_recv_stream()
3381 if (!drc->drc_newfs) in dmu_recv_stream()
3382 drc->drc_keynvl = fnvlist_dup(keynvl); in dmu_recv_stream()
3385 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) { in dmu_recv_stream()
3386 err = resume_check(drc, drc->drc_begin_nvl); in dmu_recv_stream()
3396 if (drc->drc_drr_begin->drr_payloadlen == 0) { in dmu_recv_stream()
3407 drc->drc_should_save = B_TRUE; in dmu_recv_stream()
3409 (void) bqueue_init(&rwa->q, zfs_recv_queue_ff, in dmu_recv_stream()
3412 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL); in dmu_recv_stream()
3413 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL); in dmu_recv_stream()
3414 rwa->os = drc->drc_os; in dmu_recv_stream()
3415 rwa->byteswap = drc->drc_byteswap; in dmu_recv_stream()
3416 rwa->heal = drc->drc_heal; in dmu_recv_stream()
3417 rwa->tofs = drc->drc_tofs; in dmu_recv_stream()
3418 rwa->resumable = drc->drc_resumable; in dmu_recv_stream()
3419 rwa->raw = drc->drc_raw; in dmu_recv_stream()
3420 rwa->spill = drc->drc_spill; in dmu_recv_stream()
3421 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0); in dmu_recv_stream()
3422 rwa->os->os_raw_receive = drc->drc_raw; in dmu_recv_stream()
3423 if (drc->drc_heal) { in dmu_recv_stream()
3424 rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL, in dmu_recv_stream()
3427 list_create(&rwa->write_batch, sizeof (struct receive_record_arg), in dmu_recv_stream()
3433 * We're reading rwa->err without locks, which is safe since we are the in dmu_recv_stream()
3439 * We can leave this loop in 3 ways: First, if rwa->err is in dmu_recv_stream()
3440 * non-zero. In that case, the writer thread will free the rrd we just in dmu_recv_stream()
3442 * first loop and drc->drc_rrd was never allocated, or it's later, and in dmu_recv_stream()
3443 * drc->drc_rrd has been handed off to the writer thread who will free in dmu_recv_stream()
3445 * stream, then we free drc->drc_rrd and exit. in dmu_recv_stream()
3447 while (rwa->err == 0) { in dmu_recv_stream()
3453 ASSERT3P(drc->drc_rrd, ==, NULL); in dmu_recv_stream()
3454 drc->drc_rrd = drc->drc_next_rrd; in dmu_recv_stream()
3455 drc->drc_next_rrd = NULL; in dmu_recv_stream()
3456 /* Allocates and loads header into drc->drc_next_rrd */ in dmu_recv_stream()
3459 if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) { in dmu_recv_stream()
3460 kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd)); in dmu_recv_stream()
3461 drc->drc_rrd = NULL; in dmu_recv_stream()
3465 bqueue_enqueue(&rwa->q, drc->drc_rrd, in dmu_recv_stream()
3467 drc->drc_rrd->payload_size); in dmu_recv_stream()
3468 drc->drc_rrd = NULL; in dmu_recv_stream()
3471 ASSERT3P(drc->drc_rrd, ==, NULL); in dmu_recv_stream()
3472 drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP); in dmu_recv_stream()
3473 drc->drc_rrd->eos_marker = B_TRUE; in dmu_recv_stream()
3474 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1); in dmu_recv_stream()
3476 mutex_enter(&rwa->mutex); in dmu_recv_stream()
3477 while (!rwa->done) { in dmu_recv_stream()
3482 (void) cv_wait_sig(&rwa->cv, &rwa->mutex); in dmu_recv_stream()
3484 mutex_exit(&rwa->mutex); in dmu_recv_stream()
3491 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) { in dmu_recv_stream()
3492 uint64_t obj = rwa->max_object + 1; in dmu_recv_stream()
3497 free_err = dmu_free_long_object(rwa->os, obj); in dmu_recv_stream()
3501 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0); in dmu_recv_stream()
3512 cv_destroy(&rwa->cv); in dmu_recv_stream()
3513 mutex_destroy(&rwa->mutex); in dmu_recv_stream()
3514 bqueue_destroy(&rwa->q); in dmu_recv_stream()
3515 list_destroy(&rwa->write_batch); in dmu_recv_stream()
3517 err = rwa->err; in dmu_recv_stream()
3525 if (drc->drc_next_rrd != NULL) in dmu_recv_stream()
3526 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd)); in dmu_recv_stream()
3532 drc->drc_os = NULL; in dmu_recv_stream()
3535 nvlist_free(drc->drc_begin_nvl); in dmu_recv_stream()
3544 nvlist_free(drc->drc_keynvl); in dmu_recv_stream()
3545 crfree(drc->drc_cred); in dmu_recv_stream()
3546 drc->drc_cred = NULL; in dmu_recv_stream()
3549 objlist_destroy(drc->drc_ignore_objlist); in dmu_recv_stream()
3550 drc->drc_ignore_objlist = NULL; in dmu_recv_stream()
3551 *voffp = drc->drc_voff; in dmu_recv_stream()
3562 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); in dmu_recv_end_check()
3564 if (drc->drc_heal) { in dmu_recv_end_check()
3566 } else if (!drc->drc_newfs) { in dmu_recv_end_check()
3569 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); in dmu_recv_end_check()
3572 if (drc->drc_force) { in dmu_recv_end_check()
3581 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; in dmu_recv_end_check()
3583 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { in dmu_recv_end_check()
3589 if (snap->ds_dir != origin_head->ds_dir) in dmu_recv_end_check()
3595 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; in dmu_recv_end_check()
3605 if (drc->drc_keynvl != NULL) { in dmu_recv_end_check()
3606 error = dsl_crypto_recv_raw_key_check(drc->drc_ds, in dmu_recv_end_check()
3607 drc->drc_keynvl, tx); in dmu_recv_end_check()
3614 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, in dmu_recv_end_check()
3615 origin_head, drc->drc_force, drc->drc_owner, tx); in dmu_recv_end_check()
3621 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); in dmu_recv_end_check()
3626 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); in dmu_recv_end_check()
3628 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, in dmu_recv_end_check()
3629 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); in dmu_recv_end_check()
3639 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0; in dmu_recv_end_sync()
3642 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", in dmu_recv_end_sync()
3643 tx, "snap=%s", drc->drc_tosnap); in dmu_recv_end_sync()
3644 drc->drc_ds->ds_objset->os_raw_receive = B_FALSE; in dmu_recv_end_sync()
3646 if (drc->drc_heal) { in dmu_recv_end_sync()
3647 if (drc->drc_keynvl != NULL) { in dmu_recv_end_sync()
3648 nvlist_free(drc->drc_keynvl); in dmu_recv_end_sync()
3649 drc->drc_keynvl = NULL; in dmu_recv_end_sync()
3651 } else if (!drc->drc_newfs) { in dmu_recv_end_sync()
3654 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, in dmu_recv_end_sync()
3657 if (drc->drc_force) { in dmu_recv_end_sync()
3664 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; in dmu_recv_end_sync()
3666 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { in dmu_recv_end_sync()
3670 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); in dmu_recv_end_sync()
3671 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; in dmu_recv_end_sync()
3677 if (drc->drc_keynvl != NULL) { in dmu_recv_end_sync()
3678 dsl_crypto_recv_raw_key_sync(drc->drc_ds, in dmu_recv_end_sync()
3679 drc->drc_keynvl, tx); in dmu_recv_end_sync()
3680 nvlist_free(drc->drc_keynvl); in dmu_recv_end_sync()
3681 drc->drc_keynvl = NULL; in dmu_recv_end_sync()
3684 VERIFY3P(drc->drc_ds->ds_prev, ==, in dmu_recv_end_sync()
3685 origin_head->ds_prev); in dmu_recv_end_sync()
3687 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, in dmu_recv_end_sync()
3693 drc->drc_os = NULL; in dmu_recv_end_sync()
3696 drc->drc_tosnap, tx); in dmu_recv_end_sync()
3699 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); in dmu_recv_end_sync()
3700 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = in dmu_recv_end_sync()
3701 drc->drc_drrb->drr_creation_time; in dmu_recv_end_sync()
3702 dsl_dataset_phys(origin_head->ds_prev)->ds_guid = in dmu_recv_end_sync()
3703 drc->drc_drrb->drr_toguid; in dmu_recv_end_sync()
3704 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= in dmu_recv_end_sync()
3707 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); in dmu_recv_end_sync()
3708 dsl_dataset_phys(origin_head)->ds_flags &= in dmu_recv_end_sync()
3712 dsl_dataset_phys(origin_head)->ds_prev_snap_obj; in dmu_recv_end_sync()
3715 dsl_destroy_head_sync_impl(drc->drc_ds, tx); in dmu_recv_end_sync()
3717 if (drc->drc_owner != NULL) in dmu_recv_end_sync()
3718 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); in dmu_recv_end_sync()
3720 dsl_dataset_t *ds = drc->drc_ds; in dmu_recv_end_sync()
3722 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); in dmu_recv_end_sync()
3725 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); in dmu_recv_end_sync()
3726 dsl_dataset_phys(ds->ds_prev)->ds_creation_time = in dmu_recv_end_sync()
3727 drc->drc_drrb->drr_creation_time; in dmu_recv_end_sync()
3728 dsl_dataset_phys(ds->ds_prev)->ds_guid = in dmu_recv_end_sync()
3729 drc->drc_drrb->drr_toguid; in dmu_recv_end_sync()
3730 dsl_dataset_phys(ds->ds_prev)->ds_flags &= in dmu_recv_end_sync()
3733 dmu_buf_will_dirty(ds->ds_dbuf, tx); in dmu_recv_end_sync()
3734 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; in dmu_recv_end_sync()
3736 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, in dmu_recv_end_sync()
3738 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, in dmu_recv_end_sync()
3740 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, in dmu_recv_end_sync()
3742 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, in dmu_recv_end_sync()
3744 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, in dmu_recv_end_sync()
3746 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, in dmu_recv_end_sync()
3748 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, in dmu_recv_end_sync()
3752 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; in dmu_recv_end_sync()
3762 * tunable is set, in which case we will leave the newly-generated in dmu_recv_end_sync()
3765 if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) { in dmu_recv_end_sync()
3766 dmu_object_zapify(dp->dp_meta_objset, newsnapobj, in dmu_recv_end_sync()
3768 VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj, in dmu_recv_end_sync()
3770 &drc->drc_ivset_guid, tx)); in dmu_recv_end_sync()
3781 if (!drc->drc_raw && encrypted) { in dmu_recv_end_sync()
3782 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa, in dmu_recv_end_sync()
3783 drc->drc_ds->ds_object, drc->drc_ds); in dmu_recv_end_sync()
3785 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag); in dmu_recv_end_sync()
3786 drc->drc_ds = NULL; in dmu_recv_end_sync()
3800 dsl_dataset_name(drc->drc_ds, name); in dmu_recv_existing_end()
3804 return (dsl_sync_task(drc->drc_tofs, in dmu_recv_existing_end()
3812 return (dsl_sync_task(drc->drc_tofs, in dmu_recv_new_end()
3822 drc->drc_owner = owner; in dmu_recv_end()
3824 if (drc->drc_newfs) in dmu_recv_end()
3831 nvlist_free(drc->drc_keynvl); in dmu_recv_end()
3832 } else if (!drc->drc_heal) { in dmu_recv_end()
3833 if (drc->drc_newfs) { in dmu_recv_end()
3834 zvol_create_minor(drc->drc_tofs); in dmu_recv_end()
3837 drc->drc_tofs, drc->drc_tosnap); in dmu_recv_end()
3842 crfree(drc->drc_cred); in dmu_recv_end()
3843 drc->drc_cred = NULL; in dmu_recv_end()
3854 return (os->os_dsl_dataset != NULL && in dmu_objset_is_receiving()
3855 os->os_dsl_dataset->ds_owner == dmu_recv_tag); in dmu_objset_is_receiving()