1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 */ 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_prop.h> 39 #include <sys/dsl_pool.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/zfs_ioctl.h> 42 #include <sys/zap.h> 43 #include <sys/zio_checksum.h> 44 #include <sys/zfs_znode.h> 45 #include <zfs_fletcher.h> 46 #include <sys/avl.h> 47 #include <sys/ddt.h> 48 #include <sys/zfs_onexit.h> 49 #include <sys/dmu_send.h> 50 #include <sys/dsl_destroy.h> 51 #include <sys/blkptr.h> 52 #include <sys/dsl_bookmark.h> 53 #include <sys/zfeature.h> 54 55 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 56 int zfs_send_corrupt_data = B_FALSE; 57 58 static char *dmu_recv_tag = "dmu_recv_tag"; 59 static const char *recv_clone_name = "%recv"; 60 61 static int 62 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) 63 { 64 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset; 65 ssize_t resid; /* have to get resid to get detailed errno */ 66 ASSERT0(len % 8); 67 68 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc); 69 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp, 70 (caddr_t)buf, len, 71 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); 72 73 mutex_enter(&ds->ds_sendstream_lock); 74 *dsp->dsa_off += len; 75 mutex_exit(&ds->ds_sendstream_lock); 76 77 return (dsp->dsa_err); 78 } 79 80 static int 81 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 82 uint64_t length) 83 { 84 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); 85 86 /* 87 * When we receive a free record, dbuf_free_range() assumes 88 * that the receiving system doesn't have any dbufs in the range 89 * being freed. This is always true because there is a one-record 90 * constraint: we only send one WRITE record for any given 91 * object+offset. We know that the one-record constraint is 92 * true because we always send data in increasing order by 93 * object,offset. 94 * 95 * If the increasing-order constraint ever changes, we should find 96 * another way to assert that the one-record constraint is still 97 * satisfied. 98 */ 99 ASSERT(object > dsp->dsa_last_data_object || 100 (object == dsp->dsa_last_data_object && 101 offset > dsp->dsa_last_data_offset)); 102 103 /* 104 * If we are doing a non-incremental send, then there can't 105 * be any data in the dataset we're receiving into. Therefore 106 * a free record would simply be a no-op. Save space by not 107 * sending it to begin with. 108 */ 109 if (!dsp->dsa_incremental) 110 return (0); 111 112 if (length != -1ULL && offset + length < offset) 113 length = -1ULL; 114 115 /* 116 * If there is a pending op, but it's not PENDING_FREE, push it out, 117 * since free block aggregation can only be done for blocks of the 118 * same type (i.e., DRR_FREE records can only be aggregated with 119 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 120 * aggregated with other DRR_FREEOBJECTS records. 121 */ 122 if (dsp->dsa_pending_op != PENDING_NONE && 123 dsp->dsa_pending_op != PENDING_FREE) { 124 if (dump_bytes(dsp, dsp->dsa_drr, 125 sizeof (dmu_replay_record_t)) != 0) 126 return (SET_ERROR(EINTR)); 127 dsp->dsa_pending_op = PENDING_NONE; 128 } 129 130 if (dsp->dsa_pending_op == PENDING_FREE) { 131 /* 132 * There should never be a PENDING_FREE if length is -1 133 * (because dump_dnode is the only place where this 134 * function is called with a -1, and only after flushing 135 * any pending record). 136 */ 137 ASSERT(length != -1ULL); 138 /* 139 * Check to see whether this free block can be aggregated 140 * with pending one. 141 */ 142 if (drrf->drr_object == object && drrf->drr_offset + 143 drrf->drr_length == offset) { 144 drrf->drr_length += length; 145 return (0); 146 } else { 147 /* not a continuation. Push out pending record */ 148 if (dump_bytes(dsp, dsp->dsa_drr, 149 sizeof (dmu_replay_record_t)) != 0) 150 return (SET_ERROR(EINTR)); 151 dsp->dsa_pending_op = PENDING_NONE; 152 } 153 } 154 /* create a FREE record and make it pending */ 155 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 156 dsp->dsa_drr->drr_type = DRR_FREE; 157 drrf->drr_object = object; 158 drrf->drr_offset = offset; 159 drrf->drr_length = length; 160 drrf->drr_toguid = dsp->dsa_toguid; 161 if (length == -1ULL) { 162 if (dump_bytes(dsp, dsp->dsa_drr, 163 sizeof (dmu_replay_record_t)) != 0) 164 return (SET_ERROR(EINTR)); 165 } else { 166 dsp->dsa_pending_op = PENDING_FREE; 167 } 168 169 return (0); 170 } 171 172 static int 173 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, 174 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data) 175 { 176 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); 177 178 /* 179 * We send data in increasing object, offset order. 180 * See comment in dump_free() for details. 181 */ 182 ASSERT(object > dsp->dsa_last_data_object || 183 (object == dsp->dsa_last_data_object && 184 offset > dsp->dsa_last_data_offset)); 185 dsp->dsa_last_data_object = object; 186 dsp->dsa_last_data_offset = offset + blksz - 1; 187 188 /* 189 * If there is any kind of pending aggregation (currently either 190 * a grouping of free objects or free blocks), push it out to 191 * the stream, since aggregation can't be done across operations 192 * of different types. 193 */ 194 if (dsp->dsa_pending_op != PENDING_NONE) { 195 if (dump_bytes(dsp, dsp->dsa_drr, 196 sizeof (dmu_replay_record_t)) != 0) 197 return (SET_ERROR(EINTR)); 198 dsp->dsa_pending_op = PENDING_NONE; 199 } 200 /* write a DATA record */ 201 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 202 dsp->dsa_drr->drr_type = DRR_WRITE; 203 drrw->drr_object = object; 204 drrw->drr_type = type; 205 drrw->drr_offset = offset; 206 drrw->drr_length = blksz; 207 drrw->drr_toguid = dsp->dsa_toguid; 208 if (BP_IS_EMBEDDED(bp)) { 209 /* 210 * There's no pre-computed checksum of embedded BP's, so 211 * (like fletcher4-checkummed blocks) userland will have 212 * to compute a dedup-capable checksum itself. 213 */ 214 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; 215 } else { 216 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 217 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup) 218 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; 219 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 220 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 221 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 222 drrw->drr_key.ddk_cksum = bp->blk_cksum; 223 } 224 225 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 226 return (SET_ERROR(EINTR)); 227 if (dump_bytes(dsp, data, blksz) != 0) 228 return (SET_ERROR(EINTR)); 229 return (0); 230 } 231 232 static int 233 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 234 int blksz, const blkptr_t *bp) 235 { 236 char buf[BPE_PAYLOAD_SIZE]; 237 struct drr_write_embedded *drrw = 238 &(dsp->dsa_drr->drr_u.drr_write_embedded); 239 240 if (dsp->dsa_pending_op != PENDING_NONE) { 241 if (dump_bytes(dsp, dsp->dsa_drr, 242 sizeof (dmu_replay_record_t)) != 0) 243 return (EINTR); 244 dsp->dsa_pending_op = PENDING_NONE; 245 } 246 247 ASSERT(BP_IS_EMBEDDED(bp)); 248 249 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 250 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED; 251 drrw->drr_object = object; 252 drrw->drr_offset = offset; 253 drrw->drr_length = blksz; 254 drrw->drr_toguid = dsp->dsa_toguid; 255 drrw->drr_compression = BP_GET_COMPRESS(bp); 256 drrw->drr_etype = BPE_GET_ETYPE(bp); 257 drrw->drr_lsize = BPE_GET_LSIZE(bp); 258 drrw->drr_psize = BPE_GET_PSIZE(bp); 259 260 decode_embedded_bp_compressed(bp, buf); 261 262 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 263 return (EINTR); 264 if (dump_bytes(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) 265 return (EINTR); 266 return (0); 267 } 268 269 static int 270 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) 271 { 272 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); 273 274 if (dsp->dsa_pending_op != PENDING_NONE) { 275 if (dump_bytes(dsp, dsp->dsa_drr, 276 sizeof (dmu_replay_record_t)) != 0) 277 return (SET_ERROR(EINTR)); 278 dsp->dsa_pending_op = PENDING_NONE; 279 } 280 281 /* write a SPILL record */ 282 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 283 dsp->dsa_drr->drr_type = DRR_SPILL; 284 drrs->drr_object = object; 285 drrs->drr_length = blksz; 286 drrs->drr_toguid = dsp->dsa_toguid; 287 288 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t))) 289 return (SET_ERROR(EINTR)); 290 if (dump_bytes(dsp, data, blksz)) 291 return (SET_ERROR(EINTR)); 292 return (0); 293 } 294 295 static int 296 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) 297 { 298 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); 299 300 /* See comment in dump_free(). */ 301 if (!dsp->dsa_incremental) 302 return (0); 303 304 /* 305 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 306 * push it out, since free block aggregation can only be done for 307 * blocks of the same type (i.e., DRR_FREE records can only be 308 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 309 * can only be aggregated with other DRR_FREEOBJECTS records. 310 */ 311 if (dsp->dsa_pending_op != PENDING_NONE && 312 dsp->dsa_pending_op != PENDING_FREEOBJECTS) { 313 if (dump_bytes(dsp, dsp->dsa_drr, 314 sizeof (dmu_replay_record_t)) != 0) 315 return (SET_ERROR(EINTR)); 316 dsp->dsa_pending_op = PENDING_NONE; 317 } 318 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { 319 /* 320 * See whether this free object array can be aggregated 321 * with pending one 322 */ 323 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 324 drrfo->drr_numobjs += numobjs; 325 return (0); 326 } else { 327 /* can't be aggregated. Push out pending record */ 328 if (dump_bytes(dsp, dsp->dsa_drr, 329 sizeof (dmu_replay_record_t)) != 0) 330 return (SET_ERROR(EINTR)); 331 dsp->dsa_pending_op = PENDING_NONE; 332 } 333 } 334 335 /* write a FREEOBJECTS record */ 336 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 337 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; 338 drrfo->drr_firstobj = firstobj; 339 drrfo->drr_numobjs = numobjs; 340 drrfo->drr_toguid = dsp->dsa_toguid; 341 342 dsp->dsa_pending_op = PENDING_FREEOBJECTS; 343 344 return (0); 345 } 346 347 static int 348 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) 349 { 350 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); 351 352 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 353 return (dump_freeobjects(dsp, object, 1)); 354 355 if (dsp->dsa_pending_op != PENDING_NONE) { 356 if (dump_bytes(dsp, dsp->dsa_drr, 357 sizeof (dmu_replay_record_t)) != 0) 358 return (SET_ERROR(EINTR)); 359 dsp->dsa_pending_op = PENDING_NONE; 360 } 361 362 /* write an OBJECT record */ 363 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 364 dsp->dsa_drr->drr_type = DRR_OBJECT; 365 drro->drr_object = object; 366 drro->drr_type = dnp->dn_type; 367 drro->drr_bonustype = dnp->dn_bonustype; 368 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 369 drro->drr_bonuslen = dnp->dn_bonuslen; 370 drro->drr_checksumtype = dnp->dn_checksum; 371 drro->drr_compress = dnp->dn_compress; 372 drro->drr_toguid = dsp->dsa_toguid; 373 374 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 375 return (SET_ERROR(EINTR)); 376 377 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) 378 return (SET_ERROR(EINTR)); 379 380 /* Free anything past the end of the file. */ 381 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * 382 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) 383 return (SET_ERROR(EINTR)); 384 if (dsp->dsa_err != 0) 385 return (SET_ERROR(EINTR)); 386 return (0); 387 } 388 389 static boolean_t 390 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp) 391 { 392 if (!BP_IS_EMBEDDED(bp)) 393 return (B_FALSE); 394 395 /* 396 * Compression function must be legacy, or explicitly enabled. 397 */ 398 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && 399 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4))) 400 return (B_FALSE); 401 402 /* 403 * Embed type must be explicitly enabled. 404 */ 405 switch (BPE_GET_ETYPE(bp)) { 406 case BP_EMBEDDED_TYPE_DATA: 407 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 408 return (B_TRUE); 409 break; 410 default: 411 return (B_FALSE); 412 } 413 return (B_FALSE); 414 } 415 416 #define BP_SPAN(dnp, level) \ 417 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ 418 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) 419 420 /* ARGSUSED */ 421 static int 422 backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 423 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 424 { 425 dmu_sendarg_t *dsp = arg; 426 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 427 int err = 0; 428 429 if (issig(JUSTLOOKING) && issig(FORREAL)) 430 return (SET_ERROR(EINTR)); 431 432 if (zb->zb_object != DMU_META_DNODE_OBJECT && 433 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { 434 return (0); 435 } else if (zb->zb_level == ZB_ZIL_LEVEL) { 436 /* 437 * If we are sending a non-snapshot (which is allowed on 438 * read-only pools), it may have a ZIL, which must be ignored. 439 */ 440 return (0); 441 } else if (BP_IS_HOLE(bp) && 442 zb->zb_object == DMU_META_DNODE_OBJECT) { 443 uint64_t span = BP_SPAN(dnp, zb->zb_level); 444 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; 445 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT); 446 } else if (BP_IS_HOLE(bp)) { 447 uint64_t span = BP_SPAN(dnp, zb->zb_level); 448 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span); 449 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { 450 return (0); 451 } else if (type == DMU_OT_DNODE) { 452 dnode_phys_t *blk; 453 int i; 454 int blksz = BP_GET_LSIZE(bp); 455 uint32_t aflags = ARC_WAIT; 456 arc_buf_t *abuf; 457 458 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 459 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 460 &aflags, zb) != 0) 461 return (SET_ERROR(EIO)); 462 463 blk = abuf->b_data; 464 for (i = 0; i < blksz >> DNODE_SHIFT; i++) { 465 uint64_t dnobj = (zb->zb_blkid << 466 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; 467 err = dump_dnode(dsp, dnobj, blk+i); 468 if (err != 0) 469 break; 470 } 471 (void) arc_buf_remove_ref(abuf, &abuf); 472 } else if (type == DMU_OT_SA) { 473 uint32_t aflags = ARC_WAIT; 474 arc_buf_t *abuf; 475 int blksz = BP_GET_LSIZE(bp); 476 477 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 478 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 479 &aflags, zb) != 0) 480 return (SET_ERROR(EIO)); 481 482 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data); 483 (void) arc_buf_remove_ref(abuf, &abuf); 484 } else if (backup_do_embed(dsp, bp)) { 485 /* it's an embedded level-0 block of a regular object */ 486 int blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 487 err = dump_write_embedded(dsp, zb->zb_object, 488 zb->zb_blkid * blksz, blksz, bp); 489 } else { /* it's a level-0 block of a regular object */ 490 uint32_t aflags = ARC_WAIT; 491 arc_buf_t *abuf; 492 int blksz = BP_GET_LSIZE(bp); 493 494 ASSERT3U(blksz, ==, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); 495 ASSERT0(zb->zb_level); 496 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 497 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 498 &aflags, zb) != 0) { 499 if (zfs_send_corrupt_data) { 500 /* Send a block filled with 0x"zfs badd bloc" */ 501 abuf = arc_buf_alloc(spa, blksz, &abuf, 502 ARC_BUFC_DATA); 503 uint64_t *ptr; 504 for (ptr = abuf->b_data; 505 (char *)ptr < (char *)abuf->b_data + blksz; 506 ptr++) 507 *ptr = 0x2f5baddb10c; 508 } else { 509 return (SET_ERROR(EIO)); 510 } 511 } 512 513 err = dump_write(dsp, type, zb->zb_object, zb->zb_blkid * blksz, 514 blksz, bp, abuf->b_data); 515 (void) arc_buf_remove_ref(abuf, &abuf); 516 } 517 518 ASSERT(err == 0 || err == EINTR); 519 return (err); 520 } 521 522 /* 523 * Releases dp using the specified tag. 524 */ 525 static int 526 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds, 527 zfs_bookmark_phys_t *fromzb, boolean_t is_clone, boolean_t embedok, 528 int outfd, vnode_t *vp, offset_t *off) 529 { 530 objset_t *os; 531 dmu_replay_record_t *drr; 532 dmu_sendarg_t *dsp; 533 int err; 534 uint64_t fromtxg = 0; 535 uint64_t featureflags = 0; 536 537 err = dmu_objset_from_ds(ds, &os); 538 if (err != 0) { 539 dsl_pool_rele(dp, tag); 540 return (err); 541 } 542 543 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 544 drr->drr_type = DRR_BEGIN; 545 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 546 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, 547 DMU_SUBSTREAM); 548 549 #ifdef _KERNEL 550 if (dmu_objset_type(os) == DMU_OST_ZFS) { 551 uint64_t version; 552 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { 553 kmem_free(drr, sizeof (dmu_replay_record_t)); 554 dsl_pool_rele(dp, tag); 555 return (SET_ERROR(EINVAL)); 556 } 557 if (version >= ZPL_VERSION_SA) { 558 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; 559 } 560 } 561 #endif 562 563 if (embedok && 564 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { 565 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; 566 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 567 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4; 568 } else { 569 embedok = B_FALSE; 570 } 571 572 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo, 573 featureflags); 574 575 drr->drr_u.drr_begin.drr_creation_time = 576 ds->ds_phys->ds_creation_time; 577 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); 578 if (is_clone) 579 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 580 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; 581 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 582 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; 583 584 if (fromzb != NULL) { 585 drr->drr_u.drr_begin.drr_fromguid = fromzb->zbm_guid; 586 fromtxg = fromzb->zbm_creation_txg; 587 } 588 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); 589 if (!dsl_dataset_is_snapshot(ds)) { 590 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--", 591 sizeof (drr->drr_u.drr_begin.drr_toname)); 592 } 593 594 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); 595 596 dsp->dsa_drr = drr; 597 dsp->dsa_vp = vp; 598 dsp->dsa_outfd = outfd; 599 dsp->dsa_proc = curproc; 600 dsp->dsa_os = os; 601 dsp->dsa_off = off; 602 dsp->dsa_toguid = ds->ds_phys->ds_guid; 603 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0); 604 dsp->dsa_pending_op = PENDING_NONE; 605 dsp->dsa_incremental = (fromzb != NULL); 606 dsp->dsa_featureflags = featureflags; 607 608 mutex_enter(&ds->ds_sendstream_lock); 609 list_insert_head(&ds->ds_sendstreams, dsp); 610 mutex_exit(&ds->ds_sendstream_lock); 611 612 dsl_dataset_long_hold(ds, FTAG); 613 dsl_pool_rele(dp, tag); 614 615 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { 616 err = dsp->dsa_err; 617 goto out; 618 } 619 620 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH, 621 backup_cb, dsp); 622 623 if (dsp->dsa_pending_op != PENDING_NONE) 624 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) 625 err = SET_ERROR(EINTR); 626 627 if (err != 0) { 628 if (err == EINTR && dsp->dsa_err != 0) 629 err = dsp->dsa_err; 630 goto out; 631 } 632 633 bzero(drr, sizeof (dmu_replay_record_t)); 634 drr->drr_type = DRR_END; 635 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; 636 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; 637 638 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { 639 err = dsp->dsa_err; 640 goto out; 641 } 642 643 out: 644 mutex_enter(&ds->ds_sendstream_lock); 645 list_remove(&ds->ds_sendstreams, dsp); 646 mutex_exit(&ds->ds_sendstream_lock); 647 648 kmem_free(drr, sizeof (dmu_replay_record_t)); 649 kmem_free(dsp, sizeof (dmu_sendarg_t)); 650 651 dsl_dataset_long_rele(ds, FTAG); 652 653 return (err); 654 } 655 656 int 657 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 658 boolean_t embedok, int outfd, vnode_t *vp, offset_t *off) 659 { 660 dsl_pool_t *dp; 661 dsl_dataset_t *ds; 662 dsl_dataset_t *fromds = NULL; 663 int err; 664 665 err = dsl_pool_hold(pool, FTAG, &dp); 666 if (err != 0) 667 return (err); 668 669 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); 670 if (err != 0) { 671 dsl_pool_rele(dp, FTAG); 672 return (err); 673 } 674 675 if (fromsnap != 0) { 676 zfs_bookmark_phys_t zb; 677 boolean_t is_clone; 678 679 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); 680 if (err != 0) { 681 dsl_dataset_rele(ds, FTAG); 682 dsl_pool_rele(dp, FTAG); 683 return (err); 684 } 685 if (!dsl_dataset_is_before(ds, fromds, 0)) 686 err = SET_ERROR(EXDEV); 687 zb.zbm_creation_time = fromds->ds_phys->ds_creation_time; 688 zb.zbm_creation_txg = fromds->ds_phys->ds_creation_txg; 689 zb.zbm_guid = fromds->ds_phys->ds_guid; 690 is_clone = (fromds->ds_dir != ds->ds_dir); 691 dsl_dataset_rele(fromds, FTAG); 692 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, embedok, 693 outfd, vp, off); 694 } else { 695 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, embedok, 696 outfd, vp, off); 697 } 698 dsl_dataset_rele(ds, FTAG); 699 return (err); 700 } 701 702 int 703 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, 704 int outfd, vnode_t *vp, offset_t *off) 705 { 706 dsl_pool_t *dp; 707 dsl_dataset_t *ds; 708 int err; 709 boolean_t owned = B_FALSE; 710 711 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) 712 return (SET_ERROR(EINVAL)); 713 714 err = dsl_pool_hold(tosnap, FTAG, &dp); 715 if (err != 0) 716 return (err); 717 718 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) { 719 /* 720 * We are sending a filesystem or volume. Ensure 721 * that it doesn't change by owning the dataset. 722 */ 723 err = dsl_dataset_own(dp, tosnap, FTAG, &ds); 724 owned = B_TRUE; 725 } else { 726 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); 727 } 728 if (err != 0) { 729 dsl_pool_rele(dp, FTAG); 730 return (err); 731 } 732 733 if (fromsnap != NULL) { 734 zfs_bookmark_phys_t zb; 735 boolean_t is_clone = B_FALSE; 736 int fsnamelen = strchr(tosnap, '@') - tosnap; 737 738 /* 739 * If the fromsnap is in a different filesystem, then 740 * mark the send stream as a clone. 741 */ 742 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || 743 (fromsnap[fsnamelen] != '@' && 744 fromsnap[fsnamelen] != '#')) { 745 is_clone = B_TRUE; 746 } 747 748 if (strchr(fromsnap, '@')) { 749 dsl_dataset_t *fromds; 750 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); 751 if (err == 0) { 752 if (!dsl_dataset_is_before(ds, fromds, 0)) 753 err = SET_ERROR(EXDEV); 754 zb.zbm_creation_time = 755 fromds->ds_phys->ds_creation_time; 756 zb.zbm_creation_txg = 757 fromds->ds_phys->ds_creation_txg; 758 zb.zbm_guid = fromds->ds_phys->ds_guid; 759 is_clone = (ds->ds_dir != fromds->ds_dir); 760 dsl_dataset_rele(fromds, FTAG); 761 } 762 } else { 763 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb); 764 } 765 if (err != 0) { 766 dsl_dataset_rele(ds, FTAG); 767 dsl_pool_rele(dp, FTAG); 768 return (err); 769 } 770 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, embedok, 771 outfd, vp, off); 772 } else { 773 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, embedok, 774 outfd, vp, off); 775 } 776 if (owned) 777 dsl_dataset_disown(ds, FTAG); 778 else 779 dsl_dataset_rele(ds, FTAG); 780 return (err); 781 } 782 783 int 784 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep) 785 { 786 dsl_pool_t *dp = ds->ds_dir->dd_pool; 787 int err; 788 uint64_t size; 789 790 ASSERT(dsl_pool_config_held(dp)); 791 792 /* tosnap must be a snapshot */ 793 if (!dsl_dataset_is_snapshot(ds)) 794 return (SET_ERROR(EINVAL)); 795 796 /* 797 * fromsnap must be an earlier snapshot from the same fs as tosnap, 798 * or the origin's fs. 799 */ 800 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0)) 801 return (SET_ERROR(EXDEV)); 802 803 /* Get uncompressed size estimate of changed data. */ 804 if (fromds == NULL) { 805 size = ds->ds_phys->ds_uncompressed_bytes; 806 } else { 807 uint64_t used, comp; 808 err = dsl_dataset_space_written(fromds, ds, 809 &used, &comp, &size); 810 if (err != 0) 811 return (err); 812 } 813 814 /* 815 * Assume that space (both on-disk and in-stream) is dominated by 816 * data. We will adjust for indirect blocks and the copies property, 817 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 818 */ 819 820 /* 821 * Subtract out approximate space used by indirect blocks. 822 * Assume most space is used by data blocks (non-indirect, non-dnode). 823 * Assume all blocks are recordsize. Assume ditto blocks and 824 * internal fragmentation counter out compression. 825 * 826 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 827 * block, which we observe in practice. 828 */ 829 uint64_t recordsize; 830 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize); 831 if (err != 0) 832 return (err); 833 size -= size / recordsize * sizeof (blkptr_t); 834 835 /* Add in the space for the record associated with each block. */ 836 size += size / recordsize * sizeof (dmu_replay_record_t); 837 838 *sizep = size; 839 840 return (0); 841 } 842 843 typedef struct dmu_recv_begin_arg { 844 const char *drba_origin; 845 dmu_recv_cookie_t *drba_cookie; 846 cred_t *drba_cred; 847 uint64_t drba_snapobj; 848 } dmu_recv_begin_arg_t; 849 850 static int 851 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 852 uint64_t fromguid) 853 { 854 uint64_t val; 855 int error; 856 dsl_pool_t *dp = ds->ds_dir->dd_pool; 857 858 /* temporary clone name must not exist */ 859 error = zap_lookup(dp->dp_meta_objset, 860 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name, 861 8, 1, &val); 862 if (error != ENOENT) 863 return (error == 0 ? EBUSY : error); 864 865 /* new snapshot name must not exist */ 866 error = zap_lookup(dp->dp_meta_objset, 867 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap, 868 8, 1, &val); 869 if (error != ENOENT) 870 return (error == 0 ? EEXIST : error); 871 872 /* 873 * Check snapshot limit before receiving. We'll recheck again at the 874 * end, but might as well abort before receiving if we're already over 875 * the limit. 876 * 877 * Note that we do not check the file system limit with 878 * dsl_dir_fscount_check because the temporary %clones don't count 879 * against that limit. 880 */ 881 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, 882 NULL, drba->drba_cred); 883 if (error != 0) 884 return (error); 885 886 if (fromguid != 0) { 887 dsl_dataset_t *snap; 888 uint64_t obj = ds->ds_phys->ds_prev_snap_obj; 889 890 /* Find snapshot in this dir that matches fromguid. */ 891 while (obj != 0) { 892 error = dsl_dataset_hold_obj(dp, obj, FTAG, 893 &snap); 894 if (error != 0) 895 return (SET_ERROR(ENODEV)); 896 if (snap->ds_dir != ds->ds_dir) { 897 dsl_dataset_rele(snap, FTAG); 898 return (SET_ERROR(ENODEV)); 899 } 900 if (snap->ds_phys->ds_guid == fromguid) 901 break; 902 obj = snap->ds_phys->ds_prev_snap_obj; 903 dsl_dataset_rele(snap, FTAG); 904 } 905 if (obj == 0) 906 return (SET_ERROR(ENODEV)); 907 908 if (drba->drba_cookie->drc_force) { 909 drba->drba_snapobj = obj; 910 } else { 911 /* 912 * If we are not forcing, there must be no 913 * changes since fromsnap. 914 */ 915 if (dsl_dataset_modified_since_snap(ds, snap)) { 916 dsl_dataset_rele(snap, FTAG); 917 return (SET_ERROR(ETXTBSY)); 918 } 919 drba->drba_snapobj = ds->ds_prev->ds_object; 920 } 921 922 dsl_dataset_rele(snap, FTAG); 923 } else { 924 /* if full, most recent snapshot must be $ORIGIN */ 925 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL) 926 return (SET_ERROR(ENODEV)); 927 drba->drba_snapobj = ds->ds_phys->ds_prev_snap_obj; 928 } 929 930 return (0); 931 932 } 933 934 static int 935 dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 936 { 937 dmu_recv_begin_arg_t *drba = arg; 938 dsl_pool_t *dp = dmu_tx_pool(tx); 939 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 940 uint64_t fromguid = drrb->drr_fromguid; 941 int flags = drrb->drr_flags; 942 int error; 943 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 944 dsl_dataset_t *ds; 945 const char *tofs = drba->drba_cookie->drc_tofs; 946 947 /* already checked */ 948 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 949 950 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 951 DMU_COMPOUNDSTREAM || 952 drrb->drr_type >= DMU_OST_NUMTYPES || 953 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 954 return (SET_ERROR(EINVAL)); 955 956 /* Verify pool version supports SA if SA_SPILL feature set */ 957 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 958 spa_version(dp->dp_spa) < SPA_VERSION_SA) 959 return (SET_ERROR(ENOTSUP)); 960 961 /* 962 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 963 * record to a plan WRITE record, so the pool must have the 964 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 965 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 966 */ 967 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 968 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 969 return (SET_ERROR(ENOTSUP)); 970 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) && 971 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 972 return (SET_ERROR(ENOTSUP)); 973 974 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 975 if (error == 0) { 976 /* target fs already exists; recv into temp clone */ 977 978 /* Can't recv a clone into an existing fs */ 979 if (flags & DRR_FLAG_CLONE) { 980 dsl_dataset_rele(ds, FTAG); 981 return (SET_ERROR(EINVAL)); 982 } 983 984 error = recv_begin_check_existing_impl(drba, ds, fromguid); 985 dsl_dataset_rele(ds, FTAG); 986 } else if (error == ENOENT) { 987 /* target fs does not exist; must be a full backup or clone */ 988 char buf[MAXNAMELEN]; 989 990 /* 991 * If it's a non-clone incremental, we are missing the 992 * target fs, so fail the recv. 993 */ 994 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE)) 995 return (SET_ERROR(ENOENT)); 996 997 /* Open the parent of tofs */ 998 ASSERT3U(strlen(tofs), <, MAXNAMELEN); 999 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 1000 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 1001 if (error != 0) 1002 return (error); 1003 1004 /* 1005 * Check filesystem and snapshot limits before receiving. We'll 1006 * recheck snapshot limits again at the end (we create the 1007 * filesystems and increment those counts during begin_sync). 1008 */ 1009 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1010 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); 1011 if (error != 0) { 1012 dsl_dataset_rele(ds, FTAG); 1013 return (error); 1014 } 1015 1016 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1017 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); 1018 if (error != 0) { 1019 dsl_dataset_rele(ds, FTAG); 1020 return (error); 1021 } 1022 1023 if (drba->drba_origin != NULL) { 1024 dsl_dataset_t *origin; 1025 error = dsl_dataset_hold(dp, drba->drba_origin, 1026 FTAG, &origin); 1027 if (error != 0) { 1028 dsl_dataset_rele(ds, FTAG); 1029 return (error); 1030 } 1031 if (!dsl_dataset_is_snapshot(origin)) { 1032 dsl_dataset_rele(origin, FTAG); 1033 dsl_dataset_rele(ds, FTAG); 1034 return (SET_ERROR(EINVAL)); 1035 } 1036 if (origin->ds_phys->ds_guid != fromguid) { 1037 dsl_dataset_rele(origin, FTAG); 1038 dsl_dataset_rele(ds, FTAG); 1039 return (SET_ERROR(ENODEV)); 1040 } 1041 dsl_dataset_rele(origin, FTAG); 1042 } 1043 dsl_dataset_rele(ds, FTAG); 1044 error = 0; 1045 } 1046 return (error); 1047 } 1048 1049 static void 1050 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 1051 { 1052 dmu_recv_begin_arg_t *drba = arg; 1053 dsl_pool_t *dp = dmu_tx_pool(tx); 1054 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1055 const char *tofs = drba->drba_cookie->drc_tofs; 1056 dsl_dataset_t *ds, *newds; 1057 uint64_t dsobj; 1058 int error; 1059 uint64_t crflags; 1060 1061 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ? 1062 DS_FLAG_CI_DATASET : 0; 1063 1064 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1065 if (error == 0) { 1066 /* create temporary clone */ 1067 dsl_dataset_t *snap = NULL; 1068 if (drba->drba_snapobj != 0) { 1069 VERIFY0(dsl_dataset_hold_obj(dp, 1070 drba->drba_snapobj, FTAG, &snap)); 1071 } 1072 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, 1073 snap, crflags, drba->drba_cred, tx); 1074 dsl_dataset_rele(snap, FTAG); 1075 dsl_dataset_rele(ds, FTAG); 1076 } else { 1077 dsl_dir_t *dd; 1078 const char *tail; 1079 dsl_dataset_t *origin = NULL; 1080 1081 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 1082 1083 if (drba->drba_origin != NULL) { 1084 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 1085 FTAG, &origin)); 1086 } 1087 1088 /* Create new dataset. */ 1089 dsobj = dsl_dataset_create_sync(dd, 1090 strrchr(tofs, '/') + 1, 1091 origin, crflags, drba->drba_cred, tx); 1092 if (origin != NULL) 1093 dsl_dataset_rele(origin, FTAG); 1094 dsl_dir_rele(dd, FTAG); 1095 drba->drba_cookie->drc_newfs = B_TRUE; 1096 } 1097 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); 1098 1099 dmu_buf_will_dirty(newds->ds_dbuf, tx); 1100 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 1101 1102 /* 1103 * If we actually created a non-clone, we need to create the 1104 * objset in our new dataset. 1105 */ 1106 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { 1107 (void) dmu_objset_create_impl(dp->dp_spa, 1108 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 1109 } 1110 1111 drba->drba_cookie->drc_ds = newds; 1112 1113 spa_history_log_internal_ds(newds, "receive", tx, ""); 1114 } 1115 1116 /* 1117 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 1118 * succeeds; otherwise we will leak the holds on the datasets. 1119 */ 1120 int 1121 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb, 1122 boolean_t force, char *origin, dmu_recv_cookie_t *drc) 1123 { 1124 dmu_recv_begin_arg_t drba = { 0 }; 1125 dmu_replay_record_t *drr; 1126 1127 bzero(drc, sizeof (dmu_recv_cookie_t)); 1128 drc->drc_drrb = drrb; 1129 drc->drc_tosnap = tosnap; 1130 drc->drc_tofs = tofs; 1131 drc->drc_force = force; 1132 drc->drc_cred = CRED(); 1133 1134 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) 1135 drc->drc_byteswap = B_TRUE; 1136 else if (drrb->drr_magic != DMU_BACKUP_MAGIC) 1137 return (SET_ERROR(EINVAL)); 1138 1139 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 1140 drr->drr_type = DRR_BEGIN; 1141 drr->drr_u.drr_begin = *drc->drc_drrb; 1142 if (drc->drc_byteswap) { 1143 fletcher_4_incremental_byteswap(drr, 1144 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1145 } else { 1146 fletcher_4_incremental_native(drr, 1147 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1148 } 1149 kmem_free(drr, sizeof (dmu_replay_record_t)); 1150 1151 if (drc->drc_byteswap) { 1152 drrb->drr_magic = BSWAP_64(drrb->drr_magic); 1153 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo); 1154 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); 1155 drrb->drr_type = BSWAP_32(drrb->drr_type); 1156 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); 1157 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); 1158 } 1159 1160 drba.drba_origin = origin; 1161 drba.drba_cookie = drc; 1162 drba.drba_cred = CRED(); 1163 1164 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync, 1165 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1166 } 1167 1168 struct restorearg { 1169 int err; 1170 boolean_t byteswap; 1171 vnode_t *vp; 1172 char *buf; 1173 uint64_t voff; 1174 int bufsize; /* amount of memory allocated for buf */ 1175 zio_cksum_t cksum; 1176 avl_tree_t *guid_to_ds_map; 1177 }; 1178 1179 typedef struct guid_map_entry { 1180 uint64_t guid; 1181 dsl_dataset_t *gme_ds; 1182 avl_node_t avlnode; 1183 } guid_map_entry_t; 1184 1185 static int 1186 guid_compare(const void *arg1, const void *arg2) 1187 { 1188 const guid_map_entry_t *gmep1 = arg1; 1189 const guid_map_entry_t *gmep2 = arg2; 1190 1191 if (gmep1->guid < gmep2->guid) 1192 return (-1); 1193 else if (gmep1->guid > gmep2->guid) 1194 return (1); 1195 return (0); 1196 } 1197 1198 static void 1199 free_guid_map_onexit(void *arg) 1200 { 1201 avl_tree_t *ca = arg; 1202 void *cookie = NULL; 1203 guid_map_entry_t *gmep; 1204 1205 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { 1206 dsl_dataset_long_rele(gmep->gme_ds, gmep); 1207 dsl_dataset_rele(gmep->gme_ds, gmep); 1208 kmem_free(gmep, sizeof (guid_map_entry_t)); 1209 } 1210 avl_destroy(ca); 1211 kmem_free(ca, sizeof (avl_tree_t)); 1212 } 1213 1214 static void * 1215 restore_read(struct restorearg *ra, int len) 1216 { 1217 void *rv; 1218 int done = 0; 1219 1220 /* some things will require 8-byte alignment, so everything must */ 1221 ASSERT0(len % 8); 1222 1223 while (done < len) { 1224 ssize_t resid; 1225 1226 ra->err = vn_rdwr(UIO_READ, ra->vp, 1227 (caddr_t)ra->buf + done, len - done, 1228 ra->voff, UIO_SYSSPACE, FAPPEND, 1229 RLIM64_INFINITY, CRED(), &resid); 1230 1231 if (resid == len - done) 1232 ra->err = SET_ERROR(EINVAL); 1233 ra->voff += len - done - resid; 1234 done = len - resid; 1235 if (ra->err != 0) 1236 return (NULL); 1237 } 1238 1239 ASSERT3U(done, ==, len); 1240 rv = ra->buf; 1241 if (ra->byteswap) 1242 fletcher_4_incremental_byteswap(rv, len, &ra->cksum); 1243 else 1244 fletcher_4_incremental_native(rv, len, &ra->cksum); 1245 return (rv); 1246 } 1247 1248 static void 1249 backup_byteswap(dmu_replay_record_t *drr) 1250 { 1251 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 1252 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 1253 drr->drr_type = BSWAP_32(drr->drr_type); 1254 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 1255 switch (drr->drr_type) { 1256 case DRR_BEGIN: 1257 DO64(drr_begin.drr_magic); 1258 DO64(drr_begin.drr_versioninfo); 1259 DO64(drr_begin.drr_creation_time); 1260 DO32(drr_begin.drr_type); 1261 DO32(drr_begin.drr_flags); 1262 DO64(drr_begin.drr_toguid); 1263 DO64(drr_begin.drr_fromguid); 1264 break; 1265 case DRR_OBJECT: 1266 DO64(drr_object.drr_object); 1267 DO32(drr_object.drr_type); 1268 DO32(drr_object.drr_bonustype); 1269 DO32(drr_object.drr_blksz); 1270 DO32(drr_object.drr_bonuslen); 1271 DO64(drr_object.drr_toguid); 1272 break; 1273 case DRR_FREEOBJECTS: 1274 DO64(drr_freeobjects.drr_firstobj); 1275 DO64(drr_freeobjects.drr_numobjs); 1276 DO64(drr_freeobjects.drr_toguid); 1277 break; 1278 case DRR_WRITE: 1279 DO64(drr_write.drr_object); 1280 DO32(drr_write.drr_type); 1281 DO64(drr_write.drr_offset); 1282 DO64(drr_write.drr_length); 1283 DO64(drr_write.drr_toguid); 1284 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]); 1285 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]); 1286 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]); 1287 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]); 1288 DO64(drr_write.drr_key.ddk_prop); 1289 break; 1290 case DRR_WRITE_BYREF: 1291 DO64(drr_write_byref.drr_object); 1292 DO64(drr_write_byref.drr_offset); 1293 DO64(drr_write_byref.drr_length); 1294 DO64(drr_write_byref.drr_toguid); 1295 DO64(drr_write_byref.drr_refguid); 1296 DO64(drr_write_byref.drr_refobject); 1297 DO64(drr_write_byref.drr_refoffset); 1298 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]); 1299 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]); 1300 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]); 1301 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]); 1302 DO64(drr_write_byref.drr_key.ddk_prop); 1303 break; 1304 case DRR_WRITE_EMBEDDED: 1305 DO64(drr_write_embedded.drr_object); 1306 DO64(drr_write_embedded.drr_offset); 1307 DO64(drr_write_embedded.drr_length); 1308 DO64(drr_write_embedded.drr_toguid); 1309 DO32(drr_write_embedded.drr_lsize); 1310 DO32(drr_write_embedded.drr_psize); 1311 break; 1312 case DRR_FREE: 1313 DO64(drr_free.drr_object); 1314 DO64(drr_free.drr_offset); 1315 DO64(drr_free.drr_length); 1316 DO64(drr_free.drr_toguid); 1317 break; 1318 case DRR_SPILL: 1319 DO64(drr_spill.drr_object); 1320 DO64(drr_spill.drr_length); 1321 DO64(drr_spill.drr_toguid); 1322 break; 1323 case DRR_END: 1324 DO64(drr_end.drr_checksum.zc_word[0]); 1325 DO64(drr_end.drr_checksum.zc_word[1]); 1326 DO64(drr_end.drr_checksum.zc_word[2]); 1327 DO64(drr_end.drr_checksum.zc_word[3]); 1328 DO64(drr_end.drr_toguid); 1329 break; 1330 } 1331 #undef DO64 1332 #undef DO32 1333 } 1334 1335 static int 1336 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) 1337 { 1338 int err; 1339 dmu_tx_t *tx; 1340 void *data = NULL; 1341 1342 if (drro->drr_type == DMU_OT_NONE || 1343 !DMU_OT_IS_VALID(drro->drr_type) || 1344 !DMU_OT_IS_VALID(drro->drr_bonustype) || 1345 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 1346 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 1347 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 1348 drro->drr_blksz < SPA_MINBLOCKSIZE || 1349 drro->drr_blksz > SPA_MAXBLOCKSIZE || 1350 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 1351 return (SET_ERROR(EINVAL)); 1352 } 1353 1354 err = dmu_object_info(os, drro->drr_object, NULL); 1355 1356 if (err != 0 && err != ENOENT) 1357 return (SET_ERROR(EINVAL)); 1358 1359 if (drro->drr_bonuslen) { 1360 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8)); 1361 if (ra->err != 0) 1362 return (ra->err); 1363 } 1364 1365 if (err == ENOENT) { 1366 /* currently free, want to be allocated */ 1367 tx = dmu_tx_create(os); 1368 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1369 err = dmu_tx_assign(tx, TXG_WAIT); 1370 if (err != 0) { 1371 dmu_tx_abort(tx); 1372 return (err); 1373 } 1374 err = dmu_object_claim(os, drro->drr_object, 1375 drro->drr_type, drro->drr_blksz, 1376 drro->drr_bonustype, drro->drr_bonuslen, tx); 1377 dmu_tx_commit(tx); 1378 } else { 1379 /* currently allocated, want to be allocated */ 1380 err = dmu_object_reclaim(os, drro->drr_object, 1381 drro->drr_type, drro->drr_blksz, 1382 drro->drr_bonustype, drro->drr_bonuslen); 1383 } 1384 if (err != 0) { 1385 return (SET_ERROR(EINVAL)); 1386 } 1387 1388 tx = dmu_tx_create(os); 1389 dmu_tx_hold_bonus(tx, drro->drr_object); 1390 err = dmu_tx_assign(tx, TXG_WAIT); 1391 if (err != 0) { 1392 dmu_tx_abort(tx); 1393 return (err); 1394 } 1395 1396 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype, 1397 tx); 1398 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); 1399 1400 if (data != NULL) { 1401 dmu_buf_t *db; 1402 1403 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); 1404 dmu_buf_will_dirty(db, tx); 1405 1406 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 1407 bcopy(data, db->db_data, drro->drr_bonuslen); 1408 if (ra->byteswap) { 1409 dmu_object_byteswap_t byteswap = 1410 DMU_OT_BYTESWAP(drro->drr_bonustype); 1411 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 1412 drro->drr_bonuslen); 1413 } 1414 dmu_buf_rele(db, FTAG); 1415 } 1416 dmu_tx_commit(tx); 1417 return (0); 1418 } 1419 1420 /* ARGSUSED */ 1421 static int 1422 restore_freeobjects(struct restorearg *ra, objset_t *os, 1423 struct drr_freeobjects *drrfo) 1424 { 1425 uint64_t obj; 1426 1427 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 1428 return (SET_ERROR(EINVAL)); 1429 1430 for (obj = drrfo->drr_firstobj; 1431 obj < drrfo->drr_firstobj + drrfo->drr_numobjs; 1432 (void) dmu_object_next(os, &obj, FALSE, 0)) { 1433 int err; 1434 1435 if (dmu_object_info(os, obj, NULL) != 0) 1436 continue; 1437 1438 err = dmu_free_long_object(os, obj); 1439 if (err != 0) 1440 return (err); 1441 } 1442 return (0); 1443 } 1444 1445 static int 1446 restore_write(struct restorearg *ra, objset_t *os, 1447 struct drr_write *drrw) 1448 { 1449 dmu_tx_t *tx; 1450 void *data; 1451 int err; 1452 1453 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 1454 !DMU_OT_IS_VALID(drrw->drr_type)) 1455 return (SET_ERROR(EINVAL)); 1456 1457 data = restore_read(ra, drrw->drr_length); 1458 if (data == NULL) 1459 return (ra->err); 1460 1461 if (dmu_object_info(os, drrw->drr_object, NULL) != 0) 1462 return (SET_ERROR(EINVAL)); 1463 1464 tx = dmu_tx_create(os); 1465 1466 dmu_tx_hold_write(tx, drrw->drr_object, 1467 drrw->drr_offset, drrw->drr_length); 1468 err = dmu_tx_assign(tx, TXG_WAIT); 1469 if (err != 0) { 1470 dmu_tx_abort(tx); 1471 return (err); 1472 } 1473 if (ra->byteswap) { 1474 dmu_object_byteswap_t byteswap = 1475 DMU_OT_BYTESWAP(drrw->drr_type); 1476 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length); 1477 } 1478 dmu_write(os, drrw->drr_object, 1479 drrw->drr_offset, drrw->drr_length, data, tx); 1480 dmu_tx_commit(tx); 1481 return (0); 1482 } 1483 1484 /* 1485 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed 1486 * streams to refer to a copy of the data that is already on the 1487 * system because it came in earlier in the stream. This function 1488 * finds the earlier copy of the data, and uses that copy instead of 1489 * data from the stream to fulfill this write. 1490 */ 1491 static int 1492 restore_write_byref(struct restorearg *ra, objset_t *os, 1493 struct drr_write_byref *drrwbr) 1494 { 1495 dmu_tx_t *tx; 1496 int err; 1497 guid_map_entry_t gmesrch; 1498 guid_map_entry_t *gmep; 1499 avl_index_t where; 1500 objset_t *ref_os = NULL; 1501 dmu_buf_t *dbp; 1502 1503 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) 1504 return (SET_ERROR(EINVAL)); 1505 1506 /* 1507 * If the GUID of the referenced dataset is different from the 1508 * GUID of the target dataset, find the referenced dataset. 1509 */ 1510 if (drrwbr->drr_toguid != drrwbr->drr_refguid) { 1511 gmesrch.guid = drrwbr->drr_refguid; 1512 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch, 1513 &where)) == NULL) { 1514 return (SET_ERROR(EINVAL)); 1515 } 1516 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) 1517 return (SET_ERROR(EINVAL)); 1518 } else { 1519 ref_os = os; 1520 } 1521 1522 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, 1523 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH); 1524 if (err != 0) 1525 return (err); 1526 1527 tx = dmu_tx_create(os); 1528 1529 dmu_tx_hold_write(tx, drrwbr->drr_object, 1530 drrwbr->drr_offset, drrwbr->drr_length); 1531 err = dmu_tx_assign(tx, TXG_WAIT); 1532 if (err != 0) { 1533 dmu_tx_abort(tx); 1534 return (err); 1535 } 1536 dmu_write(os, drrwbr->drr_object, 1537 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); 1538 dmu_buf_rele(dbp, FTAG); 1539 dmu_tx_commit(tx); 1540 return (0); 1541 } 1542 1543 static int 1544 restore_write_embedded(struct restorearg *ra, objset_t *os, 1545 struct drr_write_embedded *drrwnp) 1546 { 1547 dmu_tx_t *tx; 1548 int err; 1549 void *data; 1550 1551 if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset) 1552 return (EINVAL); 1553 1554 if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE) 1555 return (EINVAL); 1556 1557 if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES) 1558 return (EINVAL); 1559 if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS) 1560 return (EINVAL); 1561 1562 data = restore_read(ra, P2ROUNDUP(drrwnp->drr_psize, 8)); 1563 if (data == NULL) 1564 return (ra->err); 1565 1566 tx = dmu_tx_create(os); 1567 1568 dmu_tx_hold_write(tx, drrwnp->drr_object, 1569 drrwnp->drr_offset, drrwnp->drr_length); 1570 err = dmu_tx_assign(tx, TXG_WAIT); 1571 if (err != 0) { 1572 dmu_tx_abort(tx); 1573 return (err); 1574 } 1575 1576 dmu_write_embedded(os, drrwnp->drr_object, 1577 drrwnp->drr_offset, data, drrwnp->drr_etype, 1578 drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize, 1579 ra->byteswap ^ ZFS_HOST_BYTEORDER, tx); 1580 1581 dmu_tx_commit(tx); 1582 return (0); 1583 } 1584 1585 static int 1586 restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs) 1587 { 1588 dmu_tx_t *tx; 1589 void *data; 1590 dmu_buf_t *db, *db_spill; 1591 int err; 1592 1593 if (drrs->drr_length < SPA_MINBLOCKSIZE || 1594 drrs->drr_length > SPA_MAXBLOCKSIZE) 1595 return (SET_ERROR(EINVAL)); 1596 1597 data = restore_read(ra, drrs->drr_length); 1598 if (data == NULL) 1599 return (ra->err); 1600 1601 if (dmu_object_info(os, drrs->drr_object, NULL) != 0) 1602 return (SET_ERROR(EINVAL)); 1603 1604 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db)); 1605 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { 1606 dmu_buf_rele(db, FTAG); 1607 return (err); 1608 } 1609 1610 tx = dmu_tx_create(os); 1611 1612 dmu_tx_hold_spill(tx, db->db_object); 1613 1614 err = dmu_tx_assign(tx, TXG_WAIT); 1615 if (err != 0) { 1616 dmu_buf_rele(db, FTAG); 1617 dmu_buf_rele(db_spill, FTAG); 1618 dmu_tx_abort(tx); 1619 return (err); 1620 } 1621 dmu_buf_will_dirty(db_spill, tx); 1622 1623 if (db_spill->db_size < drrs->drr_length) 1624 VERIFY(0 == dbuf_spill_set_blksz(db_spill, 1625 drrs->drr_length, tx)); 1626 bcopy(data, db_spill->db_data, drrs->drr_length); 1627 1628 dmu_buf_rele(db, FTAG); 1629 dmu_buf_rele(db_spill, FTAG); 1630 1631 dmu_tx_commit(tx); 1632 return (0); 1633 } 1634 1635 /* ARGSUSED */ 1636 static int 1637 restore_free(struct restorearg *ra, objset_t *os, 1638 struct drr_free *drrf) 1639 { 1640 int err; 1641 1642 if (drrf->drr_length != -1ULL && 1643 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 1644 return (SET_ERROR(EINVAL)); 1645 1646 if (dmu_object_info(os, drrf->drr_object, NULL) != 0) 1647 return (SET_ERROR(EINVAL)); 1648 1649 err = dmu_free_long_range(os, drrf->drr_object, 1650 drrf->drr_offset, drrf->drr_length); 1651 return (err); 1652 } 1653 1654 /* used to destroy the drc_ds on error */ 1655 static void 1656 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 1657 { 1658 char name[MAXNAMELEN]; 1659 dsl_dataset_name(drc->drc_ds, name); 1660 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 1661 (void) dsl_destroy_head(name); 1662 } 1663 1664 /* 1665 * NB: callers *must* call dmu_recv_end() if this succeeds. 1666 */ 1667 int 1668 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp, 1669 int cleanup_fd, uint64_t *action_handlep) 1670 { 1671 struct restorearg ra = { 0 }; 1672 dmu_replay_record_t *drr; 1673 objset_t *os; 1674 zio_cksum_t pcksum; 1675 int featureflags; 1676 1677 ra.byteswap = drc->drc_byteswap; 1678 ra.cksum = drc->drc_cksum; 1679 ra.vp = vp; 1680 ra.voff = *voffp; 1681 ra.bufsize = 1<<20; 1682 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); 1683 1684 /* these were verified in dmu_recv_begin */ 1685 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 1686 DMU_SUBSTREAM); 1687 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 1688 1689 /* 1690 * Open the objset we are modifying. 1691 */ 1692 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os)); 1693 1694 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT); 1695 1696 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 1697 1698 /* if this stream is dedup'ed, set up the avl tree for guid mapping */ 1699 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { 1700 minor_t minor; 1701 1702 if (cleanup_fd == -1) { 1703 ra.err = SET_ERROR(EBADF); 1704 goto out; 1705 } 1706 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); 1707 if (ra.err != 0) { 1708 cleanup_fd = -1; 1709 goto out; 1710 } 1711 1712 if (*action_handlep == 0) { 1713 ra.guid_to_ds_map = 1714 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 1715 avl_create(ra.guid_to_ds_map, guid_compare, 1716 sizeof (guid_map_entry_t), 1717 offsetof(guid_map_entry_t, avlnode)); 1718 ra.err = zfs_onexit_add_cb(minor, 1719 free_guid_map_onexit, ra.guid_to_ds_map, 1720 action_handlep); 1721 if (ra.err != 0) 1722 goto out; 1723 } else { 1724 ra.err = zfs_onexit_cb_data(minor, *action_handlep, 1725 (void **)&ra.guid_to_ds_map); 1726 if (ra.err != 0) 1727 goto out; 1728 } 1729 1730 drc->drc_guid_to_ds_map = ra.guid_to_ds_map; 1731 } 1732 1733 /* 1734 * Read records and process them. 1735 */ 1736 pcksum = ra.cksum; 1737 while (ra.err == 0 && 1738 NULL != (drr = restore_read(&ra, sizeof (*drr)))) { 1739 if (issig(JUSTLOOKING) && issig(FORREAL)) { 1740 ra.err = SET_ERROR(EINTR); 1741 goto out; 1742 } 1743 1744 if (ra.byteswap) 1745 backup_byteswap(drr); 1746 1747 switch (drr->drr_type) { 1748 case DRR_OBJECT: 1749 { 1750 /* 1751 * We need to make a copy of the record header, 1752 * because restore_{object,write} may need to 1753 * restore_read(), which will invalidate drr. 1754 */ 1755 struct drr_object drro = drr->drr_u.drr_object; 1756 ra.err = restore_object(&ra, os, &drro); 1757 break; 1758 } 1759 case DRR_FREEOBJECTS: 1760 { 1761 struct drr_freeobjects drrfo = 1762 drr->drr_u.drr_freeobjects; 1763 ra.err = restore_freeobjects(&ra, os, &drrfo); 1764 break; 1765 } 1766 case DRR_WRITE: 1767 { 1768 struct drr_write drrw = drr->drr_u.drr_write; 1769 ra.err = restore_write(&ra, os, &drrw); 1770 break; 1771 } 1772 case DRR_WRITE_BYREF: 1773 { 1774 struct drr_write_byref drrwbr = 1775 drr->drr_u.drr_write_byref; 1776 ra.err = restore_write_byref(&ra, os, &drrwbr); 1777 break; 1778 } 1779 case DRR_WRITE_EMBEDDED: 1780 { 1781 struct drr_write_embedded drrwe = 1782 drr->drr_u.drr_write_embedded; 1783 ra.err = restore_write_embedded(&ra, os, &drrwe); 1784 break; 1785 } 1786 case DRR_FREE: 1787 { 1788 struct drr_free drrf = drr->drr_u.drr_free; 1789 ra.err = restore_free(&ra, os, &drrf); 1790 break; 1791 } 1792 case DRR_END: 1793 { 1794 struct drr_end drre = drr->drr_u.drr_end; 1795 /* 1796 * We compare against the *previous* checksum 1797 * value, because the stored checksum is of 1798 * everything before the DRR_END record. 1799 */ 1800 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum)) 1801 ra.err = SET_ERROR(ECKSUM); 1802 goto out; 1803 } 1804 case DRR_SPILL: 1805 { 1806 struct drr_spill drrs = drr->drr_u.drr_spill; 1807 ra.err = restore_spill(&ra, os, &drrs); 1808 break; 1809 } 1810 default: 1811 ra.err = SET_ERROR(EINVAL); 1812 goto out; 1813 } 1814 pcksum = ra.cksum; 1815 } 1816 ASSERT(ra.err != 0); 1817 1818 out: 1819 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) 1820 zfs_onexit_fd_rele(cleanup_fd); 1821 1822 if (ra.err != 0) { 1823 /* 1824 * destroy what we created, so we don't leave it in the 1825 * inconsistent restoring state. 1826 */ 1827 dmu_recv_cleanup_ds(drc); 1828 } 1829 1830 kmem_free(ra.buf, ra.bufsize); 1831 *voffp = ra.voff; 1832 return (ra.err); 1833 } 1834 1835 static int 1836 dmu_recv_end_check(void *arg, dmu_tx_t *tx) 1837 { 1838 dmu_recv_cookie_t *drc = arg; 1839 dsl_pool_t *dp = dmu_tx_pool(tx); 1840 int error; 1841 1842 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 1843 1844 if (!drc->drc_newfs) { 1845 dsl_dataset_t *origin_head; 1846 1847 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 1848 if (error != 0) 1849 return (error); 1850 if (drc->drc_force) { 1851 /* 1852 * We will destroy any snapshots in tofs (i.e. before 1853 * origin_head) that are after the origin (which is 1854 * the snap before drc_ds, because drc_ds can not 1855 * have any snaps of its own). 1856 */ 1857 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj; 1858 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) { 1859 dsl_dataset_t *snap; 1860 error = dsl_dataset_hold_obj(dp, obj, FTAG, 1861 &snap); 1862 if (error != 0) 1863 return (error); 1864 if (snap->ds_dir != origin_head->ds_dir) 1865 error = SET_ERROR(EINVAL); 1866 if (error == 0) { 1867 error = dsl_destroy_snapshot_check_impl( 1868 snap, B_FALSE); 1869 } 1870 obj = snap->ds_phys->ds_prev_snap_obj; 1871 dsl_dataset_rele(snap, FTAG); 1872 if (error != 0) 1873 return (error); 1874 } 1875 } 1876 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 1877 origin_head, drc->drc_force, drc->drc_owner, tx); 1878 if (error != 0) { 1879 dsl_dataset_rele(origin_head, FTAG); 1880 return (error); 1881 } 1882 error = dsl_dataset_snapshot_check_impl(origin_head, 1883 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 1884 dsl_dataset_rele(origin_head, FTAG); 1885 if (error != 0) 1886 return (error); 1887 1888 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 1889 } else { 1890 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 1891 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 1892 } 1893 return (error); 1894 } 1895 1896 static void 1897 dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 1898 { 1899 dmu_recv_cookie_t *drc = arg; 1900 dsl_pool_t *dp = dmu_tx_pool(tx); 1901 1902 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 1903 tx, "snap=%s", drc->drc_tosnap); 1904 1905 if (!drc->drc_newfs) { 1906 dsl_dataset_t *origin_head; 1907 1908 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 1909 &origin_head)); 1910 1911 if (drc->drc_force) { 1912 /* 1913 * Destroy any snapshots of drc_tofs (origin_head) 1914 * after the origin (the snap before drc_ds). 1915 */ 1916 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj; 1917 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) { 1918 dsl_dataset_t *snap; 1919 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 1920 &snap)); 1921 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 1922 obj = snap->ds_phys->ds_prev_snap_obj; 1923 dsl_destroy_snapshot_sync_impl(snap, 1924 B_FALSE, tx); 1925 dsl_dataset_rele(snap, FTAG); 1926 } 1927 } 1928 VERIFY3P(drc->drc_ds->ds_prev, ==, 1929 origin_head->ds_prev); 1930 1931 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 1932 origin_head, tx); 1933 dsl_dataset_snapshot_sync_impl(origin_head, 1934 drc->drc_tosnap, tx); 1935 1936 /* set snapshot's creation time and guid */ 1937 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 1938 origin_head->ds_prev->ds_phys->ds_creation_time = 1939 drc->drc_drrb->drr_creation_time; 1940 origin_head->ds_prev->ds_phys->ds_guid = 1941 drc->drc_drrb->drr_toguid; 1942 origin_head->ds_prev->ds_phys->ds_flags &= 1943 ~DS_FLAG_INCONSISTENT; 1944 1945 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 1946 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1947 1948 dsl_dataset_rele(origin_head, FTAG); 1949 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 1950 1951 if (drc->drc_owner != NULL) 1952 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 1953 } else { 1954 dsl_dataset_t *ds = drc->drc_ds; 1955 1956 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 1957 1958 /* set snapshot's creation time and guid */ 1959 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1960 ds->ds_prev->ds_phys->ds_creation_time = 1961 drc->drc_drrb->drr_creation_time; 1962 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid; 1963 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1964 1965 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1966 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1967 } 1968 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj; 1969 /* 1970 * Release the hold from dmu_recv_begin. This must be done before 1971 * we return to open context, so that when we free the dataset's dnode, 1972 * we can evict its bonus buffer. 1973 */ 1974 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 1975 drc->drc_ds = NULL; 1976 } 1977 1978 static int 1979 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) 1980 { 1981 dsl_pool_t *dp; 1982 dsl_dataset_t *snapds; 1983 guid_map_entry_t *gmep; 1984 int err; 1985 1986 ASSERT(guid_map != NULL); 1987 1988 err = dsl_pool_hold(name, FTAG, &dp); 1989 if (err != 0) 1990 return (err); 1991 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); 1992 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); 1993 if (err == 0) { 1994 gmep->guid = snapds->ds_phys->ds_guid; 1995 gmep->gme_ds = snapds; 1996 avl_add(guid_map, gmep); 1997 dsl_dataset_long_hold(snapds, gmep); 1998 } else { 1999 kmem_free(gmep, sizeof (*gmep)); 2000 } 2001 2002 dsl_pool_rele(dp, FTAG); 2003 return (err); 2004 } 2005 2006 static int dmu_recv_end_modified_blocks = 3; 2007 2008 static int 2009 dmu_recv_existing_end(dmu_recv_cookie_t *drc) 2010 { 2011 int error; 2012 char name[MAXNAMELEN]; 2013 2014 #ifdef _KERNEL 2015 /* 2016 * We will be destroying the ds; make sure its origin is unmounted if 2017 * necessary. 2018 */ 2019 dsl_dataset_name(drc->drc_ds, name); 2020 zfs_destroy_unmount_origin(name); 2021 #endif 2022 2023 error = dsl_sync_task(drc->drc_tofs, 2024 dmu_recv_end_check, dmu_recv_end_sync, drc, 2025 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL); 2026 2027 if (error != 0) 2028 dmu_recv_cleanup_ds(drc); 2029 return (error); 2030 } 2031 2032 static int 2033 dmu_recv_new_end(dmu_recv_cookie_t *drc) 2034 { 2035 int error; 2036 2037 error = dsl_sync_task(drc->drc_tofs, 2038 dmu_recv_end_check, dmu_recv_end_sync, drc, 2039 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL); 2040 2041 if (error != 0) { 2042 dmu_recv_cleanup_ds(drc); 2043 } else if (drc->drc_guid_to_ds_map != NULL) { 2044 (void) add_ds_to_guidmap(drc->drc_tofs, 2045 drc->drc_guid_to_ds_map, 2046 drc->drc_newsnapobj); 2047 } 2048 return (error); 2049 } 2050 2051 int 2052 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 2053 { 2054 drc->drc_owner = owner; 2055 2056 if (drc->drc_newfs) 2057 return (dmu_recv_new_end(drc)); 2058 else 2059 return (dmu_recv_existing_end(drc)); 2060 } 2061 2062 /* 2063 * Return TRUE if this objset is currently being received into. 2064 */ 2065 boolean_t 2066 dmu_objset_is_receiving(objset_t *os) 2067 { 2068 return (os->os_dsl_dataset != NULL && 2069 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 2070 } 2071