1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_pool.h> 39 #include <sys/dsl_synctask.h> 40 #include <sys/zfs_ioctl.h> 41 #include <sys/zap.h> 42 #include <sys/zio_checksum.h> 43 44 static char *dmu_recv_tag = "dmu_recv_tag"; 45 46 struct backuparg { 47 dmu_replay_record_t *drr; 48 vnode_t *vp; 49 offset_t *off; 50 objset_t *os; 51 zio_cksum_t zc; 52 int err; 53 }; 54 55 static int 56 dump_bytes(struct backuparg *ba, void *buf, int len) 57 { 58 ssize_t resid; /* have to get resid to get detailed errno */ 59 ASSERT3U(len % 8, ==, 0); 60 61 fletcher_4_incremental_native(buf, len, &ba->zc); 62 ba->err = vn_rdwr(UIO_WRITE, ba->vp, 63 (caddr_t)buf, len, 64 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); 65 *ba->off += len; 66 return (ba->err); 67 } 68 69 static int 70 dump_free(struct backuparg *ba, uint64_t object, uint64_t offset, 71 uint64_t length) 72 { 73 /* write a FREE record */ 74 bzero(ba->drr, sizeof (dmu_replay_record_t)); 75 ba->drr->drr_type = DRR_FREE; 76 ba->drr->drr_u.drr_free.drr_object = object; 77 ba->drr->drr_u.drr_free.drr_offset = offset; 78 ba->drr->drr_u.drr_free.drr_length = length; 79 80 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 81 return (EINTR); 82 return (0); 83 } 84 85 static int 86 dump_data(struct backuparg *ba, dmu_object_type_t type, 87 uint64_t object, uint64_t offset, int blksz, void *data) 88 { 89 /* write a DATA record */ 90 bzero(ba->drr, sizeof (dmu_replay_record_t)); 91 ba->drr->drr_type = DRR_WRITE; 92 ba->drr->drr_u.drr_write.drr_object = object; 93 ba->drr->drr_u.drr_write.drr_type = type; 94 ba->drr->drr_u.drr_write.drr_offset = offset; 95 ba->drr->drr_u.drr_write.drr_length = blksz; 96 97 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 98 return (EINTR); 99 if (dump_bytes(ba, data, blksz)) 100 return (EINTR); 101 return (0); 102 } 103 104 static int 105 dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs) 106 { 107 /* write a FREEOBJECTS record */ 108 bzero(ba->drr, sizeof (dmu_replay_record_t)); 109 ba->drr->drr_type = DRR_FREEOBJECTS; 110 ba->drr->drr_u.drr_freeobjects.drr_firstobj = firstobj; 111 ba->drr->drr_u.drr_freeobjects.drr_numobjs = numobjs; 112 113 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 114 return (EINTR); 115 return (0); 116 } 117 118 static int 119 dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp) 120 { 121 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 122 return (dump_freeobjects(ba, object, 1)); 123 124 /* write an OBJECT record */ 125 bzero(ba->drr, sizeof (dmu_replay_record_t)); 126 ba->drr->drr_type = DRR_OBJECT; 127 ba->drr->drr_u.drr_object.drr_object = object; 128 ba->drr->drr_u.drr_object.drr_type = dnp->dn_type; 129 ba->drr->drr_u.drr_object.drr_bonustype = dnp->dn_bonustype; 130 ba->drr->drr_u.drr_object.drr_blksz = 131 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 132 ba->drr->drr_u.drr_object.drr_bonuslen = dnp->dn_bonuslen; 133 ba->drr->drr_u.drr_object.drr_checksum = dnp->dn_checksum; 134 ba->drr->drr_u.drr_object.drr_compress = dnp->dn_compress; 135 136 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 137 return (EINTR); 138 139 if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8))) 140 return (EINTR); 141 142 /* free anything past the end of the file */ 143 if (dump_free(ba, object, (dnp->dn_maxblkid + 1) * 144 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL)) 145 return (EINTR); 146 if (ba->err) 147 return (EINTR); 148 return (0); 149 } 150 151 #define BP_SPAN(dnp, level) \ 152 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ 153 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) 154 155 static int 156 backup_cb(traverse_blk_cache_t *bc, spa_t *spa, void *arg) 157 { 158 struct backuparg *ba = arg; 159 uint64_t object = bc->bc_bookmark.zb_object; 160 int level = bc->bc_bookmark.zb_level; 161 uint64_t blkid = bc->bc_bookmark.zb_blkid; 162 blkptr_t *bp = bc->bc_blkptr.blk_birth ? &bc->bc_blkptr : NULL; 163 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 164 void *data = bc->bc_data; 165 int err = 0; 166 167 if (issig(JUSTLOOKING) && issig(FORREAL)) 168 return (EINTR); 169 170 ASSERT(data || bp == NULL); 171 172 if (bp == NULL && object == 0) { 173 uint64_t span = BP_SPAN(bc->bc_dnode, level); 174 uint64_t dnobj = (blkid * span) >> DNODE_SHIFT; 175 err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT); 176 } else if (bp == NULL) { 177 uint64_t span = BP_SPAN(bc->bc_dnode, level); 178 err = dump_free(ba, object, blkid * span, span); 179 } else if (data && level == 0 && type == DMU_OT_DNODE) { 180 dnode_phys_t *blk = data; 181 int i; 182 int blksz = BP_GET_LSIZE(bp); 183 184 for (i = 0; i < blksz >> DNODE_SHIFT; i++) { 185 uint64_t dnobj = 186 (blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; 187 err = dump_dnode(ba, dnobj, blk+i); 188 if (err) 189 break; 190 } 191 } else if (level == 0 && 192 type != DMU_OT_DNODE && type != DMU_OT_OBJSET) { 193 int blksz = BP_GET_LSIZE(bp); 194 if (data == NULL) { 195 uint32_t aflags = ARC_WAIT; 196 arc_buf_t *abuf; 197 zbookmark_t zb; 198 199 zb.zb_objset = ba->os->os->os_dsl_dataset->ds_object; 200 zb.zb_object = object; 201 zb.zb_level = level; 202 zb.zb_blkid = blkid; 203 (void) arc_read(NULL, spa, bp, 204 dmu_ot[type].ot_byteswap, arc_getbuf_func, &abuf, 205 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_MUSTSUCCEED, 206 &aflags, &zb); 207 208 if (abuf) { 209 err = dump_data(ba, type, object, blkid * blksz, 210 blksz, abuf->b_data); 211 (void) arc_buf_remove_ref(abuf, &abuf); 212 } 213 } else { 214 err = dump_data(ba, type, object, blkid * blksz, 215 blksz, data); 216 } 217 } 218 219 ASSERT(err == 0 || err == EINTR); 220 return (err); 221 } 222 223 int 224 dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, 225 vnode_t *vp, offset_t *off) 226 { 227 dsl_dataset_t *ds = tosnap->os->os_dsl_dataset; 228 dsl_dataset_t *fromds = fromsnap ? fromsnap->os->os_dsl_dataset : NULL; 229 dmu_replay_record_t *drr; 230 struct backuparg ba; 231 int err; 232 uint64_t fromtxg = 0; 233 234 /* tosnap must be a snapshot */ 235 if (ds->ds_phys->ds_next_snap_obj == 0) 236 return (EINVAL); 237 238 /* fromsnap must be an earlier snapshot from the same fs as tosnap */ 239 if (fromds && (ds->ds_dir != fromds->ds_dir || 240 fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg)) 241 return (EXDEV); 242 243 if (fromorigin) { 244 if (fromsnap) 245 return (EINVAL); 246 247 if (ds->ds_dir->dd_phys->dd_origin_obj != NULL) { 248 dsl_pool_t *dp = ds->ds_dir->dd_pool; 249 rw_enter(&dp->dp_config_rwlock, RW_READER); 250 err = dsl_dataset_open_obj(dp, 251 ds->ds_dir->dd_phys->dd_origin_obj, NULL, 252 DS_MODE_NONE, FTAG, &fromds); 253 rw_exit(&dp->dp_config_rwlock); 254 if (err) 255 return (err); 256 } else { 257 fromorigin = B_FALSE; 258 } 259 } 260 261 262 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 263 drr->drr_type = DRR_BEGIN; 264 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 265 drr->drr_u.drr_begin.drr_version = DMU_BACKUP_STREAM_VERSION; 266 drr->drr_u.drr_begin.drr_creation_time = 267 ds->ds_phys->ds_creation_time; 268 drr->drr_u.drr_begin.drr_type = tosnap->os->os_phys->os_type; 269 if (fromorigin) 270 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 271 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; 272 if (fromds) 273 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid; 274 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); 275 276 if (fromds) 277 fromtxg = fromds->ds_phys->ds_creation_txg; 278 if (fromorigin) 279 dsl_dataset_close(fromds, DS_MODE_NONE, FTAG); 280 281 ba.drr = drr; 282 ba.vp = vp; 283 ba.os = tosnap; 284 ba.off = off; 285 ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0); 286 287 if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) { 288 kmem_free(drr, sizeof (dmu_replay_record_t)); 289 return (ba.err); 290 } 291 292 err = traverse_dsl_dataset(ds, fromtxg, 293 ADVANCE_PRE | ADVANCE_HOLES | ADVANCE_DATA | ADVANCE_NOLOCK, 294 backup_cb, &ba); 295 296 if (err) { 297 if (err == EINTR && ba.err) 298 err = ba.err; 299 kmem_free(drr, sizeof (dmu_replay_record_t)); 300 return (err); 301 } 302 303 bzero(drr, sizeof (dmu_replay_record_t)); 304 drr->drr_type = DRR_END; 305 drr->drr_u.drr_end.drr_checksum = ba.zc; 306 307 if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) { 308 kmem_free(drr, sizeof (dmu_replay_record_t)); 309 return (ba.err); 310 } 311 312 kmem_free(drr, sizeof (dmu_replay_record_t)); 313 314 return (0); 315 } 316 317 struct recvbeginsyncarg { 318 const char *tofs; 319 const char *tosnap; 320 dsl_dataset_t *origin; 321 uint64_t fromguid; 322 dmu_objset_type_t type; 323 void *tag; 324 boolean_t force; 325 char clonelastname[MAXNAMELEN]; 326 dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */ 327 }; 328 329 static dsl_dataset_t * 330 recv_full_sync_impl(dsl_pool_t *dp, uint64_t dsobj, dmu_objset_type_t type, 331 cred_t *cr, dmu_tx_t *tx) 332 { 333 dsl_dataset_t *ds; 334 335 VERIFY(0 == dsl_dataset_open_obj(dp, dsobj, NULL, 336 DS_MODE_EXCLUSIVE, dmu_recv_tag, &ds)); 337 338 if (type != DMU_OST_NONE) { 339 (void) dmu_objset_create_impl(dp->dp_spa, 340 ds, &ds->ds_phys->ds_bp, type, tx); 341 } 342 343 dmu_buf_will_dirty(ds->ds_dbuf, tx); 344 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 345 346 spa_history_internal_log(LOG_DS_REPLAY_FULL_SYNC, 347 ds->ds_dir->dd_pool->dp_spa, tx, cr, "dataset = %lld", 348 ds->ds_phys->ds_dir_obj); 349 350 return (ds); 351 } 352 353 /* ARGSUSED */ 354 static int 355 recv_full_check(void *arg1, void *arg2, dmu_tx_t *tx) 356 { 357 dsl_dir_t *dd = arg1; 358 struct recvbeginsyncarg *rbsa = arg2; 359 objset_t *mos = dd->dd_pool->dp_meta_objset; 360 uint64_t val; 361 int err; 362 363 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, 364 strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val); 365 366 if (err != ENOENT) 367 return (err ? err : EEXIST); 368 369 if (rbsa->origin) { 370 /* make sure it's a snap in the same pool */ 371 if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool) 372 return (EXDEV); 373 if (rbsa->origin->ds_phys->ds_num_children == 0) 374 return (EINVAL); 375 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid) 376 return (ENODEV); 377 } 378 379 return (0); 380 } 381 382 static void 383 recv_full_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 384 { 385 dsl_dir_t *dd = arg1; 386 struct recvbeginsyncarg *rbsa = arg2; 387 uint64_t dsobj; 388 389 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1, 390 rbsa->origin, cr, tx); 391 392 rbsa->ds = recv_full_sync_impl(dd->dd_pool, dsobj, 393 rbsa->origin ? DMU_OST_NONE : rbsa->type, cr, tx); 394 } 395 396 static int 397 recv_full_existing_check(void *arg1, void *arg2, dmu_tx_t *tx) 398 { 399 dsl_dataset_t *ds = arg1; 400 struct recvbeginsyncarg *rbsa = arg2; 401 int err; 402 403 /* must be a head ds */ 404 if (ds->ds_phys->ds_next_snap_obj != 0) 405 return (EINVAL); 406 407 /* must not be a clone ds */ 408 if (ds->ds_prev != NULL) 409 return (EINVAL); 410 411 err = dsl_dataset_destroy_check(ds, rbsa->tag, tx); 412 if (err) 413 return (err); 414 415 if (rbsa->origin) { 416 /* make sure it's a snap in the same pool */ 417 if (rbsa->origin->ds_dir->dd_pool != ds->ds_dir->dd_pool) 418 return (EXDEV); 419 if (rbsa->origin->ds_phys->ds_num_children == 0) 420 return (EINVAL); 421 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid) 422 return (ENODEV); 423 } 424 425 return (0); 426 } 427 428 static void 429 recv_full_existing_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 430 { 431 dsl_dataset_t *ds = arg1; 432 struct recvbeginsyncarg *rbsa = arg2; 433 dsl_dir_t *dd = ds->ds_dir; 434 uint64_t dsobj; 435 436 /* 437 * NB: caller must provide an extra hold on the dsl_dir_t, so it 438 * won't go away when dsl_dataset_destroy_sync() closes the 439 * dataset. 440 */ 441 dsl_dataset_destroy_sync(ds, rbsa->tag, cr, tx); 442 443 dsobj = dsl_dataset_create_sync_impl(dd, rbsa->origin, tx); 444 445 rbsa->ds = recv_full_sync_impl(dd->dd_pool, dsobj, 446 rbsa->origin ? DMU_OST_NONE : rbsa->type, cr, tx); 447 } 448 449 /* ARGSUSED */ 450 static int 451 recv_incremental_check(void *arg1, void *arg2, dmu_tx_t *tx) 452 { 453 dsl_dataset_t *ds = arg1; 454 struct recvbeginsyncarg *rbsa = arg2; 455 int err; 456 uint64_t val; 457 458 /* must not have any changes since most recent snapshot */ 459 if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds)) 460 return (ETXTBSY); 461 462 /* must already be a snapshot of this fs */ 463 if (ds->ds_phys->ds_prev_snap_obj == 0) 464 return (ENODEV); 465 466 /* most recent snapshot must match fromguid */ 467 if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) 468 return (ENODEV); 469 470 /* new snapshot name must not exist */ 471 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, 472 ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val); 473 if (err == 0) 474 return (EEXIST); 475 if (err != ENOENT) 476 return (err); 477 return (0); 478 } 479 480 /* ARGSUSED */ 481 static void 482 recv_online_incremental_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 483 { 484 dsl_dataset_t *ohds = arg1; 485 struct recvbeginsyncarg *rbsa = arg2; 486 dsl_pool_t *dp = ohds->ds_dir->dd_pool; 487 dsl_dataset_t *ods, *cds; 488 uint64_t dsobj; 489 490 /* create the temporary clone */ 491 VERIFY(0 == dsl_dataset_open_obj(dp, ohds->ds_phys->ds_prev_snap_obj, 492 NULL, DS_MODE_STANDARD, FTAG, &ods)); 493 dsobj = dsl_dataset_create_sync(ohds->ds_dir, 494 rbsa->clonelastname, ods, cr, tx); 495 dsl_dataset_close(ods, DS_MODE_STANDARD, FTAG); 496 497 /* open the temporary clone */ 498 VERIFY(0 == dsl_dataset_open_obj(dp, dsobj, NULL, 499 DS_MODE_EXCLUSIVE, dmu_recv_tag, &cds)); 500 501 dmu_buf_will_dirty(cds->ds_dbuf, tx); 502 cds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 503 504 rbsa->ds = cds; 505 506 spa_history_internal_log(LOG_DS_REPLAY_INC_SYNC, 507 dp->dp_spa, tx, cr, "dataset = %lld", 508 cds->ds_phys->ds_dir_obj); 509 } 510 511 /* ARGSUSED */ 512 static void 513 recv_offline_incremental_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 514 { 515 dsl_dataset_t *ds = arg1; 516 dmu_buf_will_dirty(ds->ds_dbuf, tx); 517 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 518 519 spa_history_internal_log(LOG_DS_REPLAY_INC_SYNC, 520 ds->ds_dir->dd_pool->dp_spa, tx, cr, "dataset = %lld", 521 ds->ds_phys->ds_dir_obj); 522 } 523 524 /* 525 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 526 * succeeds; otherwise we will leak the holds on the datasets. 527 */ 528 int 529 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb, 530 boolean_t force, objset_t *origin, boolean_t online, dmu_recv_cookie_t *drc) 531 { 532 int err = 0; 533 boolean_t byteswap; 534 struct recvbeginsyncarg rbsa; 535 uint64_t version; 536 int flags; 537 dsl_dataset_t *ds; 538 539 if (drrb->drr_magic == DMU_BACKUP_MAGIC) 540 byteswap = FALSE; 541 else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) 542 byteswap = TRUE; 543 else 544 return (EINVAL); 545 546 rbsa.tofs = tofs; 547 rbsa.tosnap = tosnap; 548 rbsa.origin = origin ? origin->os->os_dsl_dataset : NULL; 549 rbsa.fromguid = drrb->drr_fromguid; 550 rbsa.type = drrb->drr_type; 551 rbsa.tag = FTAG; 552 version = drrb->drr_version; 553 flags = drrb->drr_flags; 554 555 if (byteswap) { 556 rbsa.type = BSWAP_32(rbsa.type); 557 rbsa.fromguid = BSWAP_64(rbsa.fromguid); 558 version = BSWAP_64(version); 559 flags = BSWAP_32(flags); 560 } 561 562 if (version != DMU_BACKUP_STREAM_VERSION || 563 rbsa.type >= DMU_OST_NUMTYPES || 564 ((flags & DRR_FLAG_CLONE) && origin == NULL)) 565 return (EINVAL); 566 567 bzero(drc, sizeof (dmu_recv_cookie_t)); 568 drc->drc_drrb = drrb; 569 drc->drc_tosnap = tosnap; 570 drc->drc_force = force; 571 572 /* 573 * Process the begin in syncing context. 574 */ 575 if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE) && !online) { 576 /* offline incremental receive */ 577 err = dsl_dataset_open(tofs, 578 DS_MODE_EXCLUSIVE, dmu_recv_tag, &ds); 579 if (err) 580 return (err); 581 582 /* 583 * Only do the rollback if the most recent snapshot 584 * matches the incremental source 585 */ 586 if (force) { 587 if (ds->ds_prev == NULL || 588 ds->ds_prev->ds_phys->ds_guid != 589 rbsa.fromguid) { 590 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, 591 dmu_recv_tag); 592 return (ENODEV); 593 } 594 (void) dsl_dataset_rollback(ds, DMU_OST_NONE); 595 } 596 rbsa.force = B_FALSE; 597 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 598 recv_incremental_check, 599 recv_offline_incremental_sync, 600 ds, &rbsa, 1); 601 if (err) { 602 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, dmu_recv_tag); 603 return (err); 604 } 605 drc->drc_logical_ds = drc->drc_real_ds = ds; 606 } else if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE)) { 607 /* online incremental receive */ 608 609 /* tmp clone name is: tofs/%tosnap" */ 610 (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname), 611 "%%%s", tosnap); 612 613 /* open the dataset we are logically receiving into */ 614 err = dsl_dataset_open(tofs, 615 DS_MODE_STANDARD, dmu_recv_tag, &ds); 616 if (err) 617 return (err); 618 619 rbsa.force = force; 620 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 621 recv_incremental_check, 622 recv_online_incremental_sync, ds, &rbsa, 5); 623 if (err) { 624 dsl_dataset_close(ds, DS_MODE_STANDARD, dmu_recv_tag); 625 return (err); 626 } 627 drc->drc_logical_ds = ds; 628 drc->drc_real_ds = rbsa.ds; 629 } else { 630 /* create new fs -- full backup or clone */ 631 dsl_dir_t *dd = NULL; 632 const char *tail; 633 634 err = dsl_dir_open(tofs, FTAG, &dd, &tail); 635 if (err) 636 return (err); 637 if (tail == NULL) { 638 if (!force) { 639 dsl_dir_close(dd, FTAG); 640 return (EEXIST); 641 } 642 643 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 644 err = dsl_dataset_open_obj(dd->dd_pool, 645 dd->dd_phys->dd_head_dataset_obj, NULL, 646 DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, 647 FTAG, &ds); 648 rw_exit(&dd->dd_pool->dp_config_rwlock); 649 if (err) { 650 dsl_dir_close(dd, FTAG); 651 return (err); 652 } 653 654 err = dsl_sync_task_do(dd->dd_pool, 655 recv_full_existing_check, 656 recv_full_existing_sync, ds, &rbsa, 5); 657 /* if successful, sync task closes the ds for us */ 658 if (err) 659 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG); 660 } else { 661 err = dsl_sync_task_do(dd->dd_pool, recv_full_check, 662 recv_full_sync, dd, &rbsa, 5); 663 if (err) 664 return (err); 665 } 666 dsl_dir_close(dd, FTAG); 667 if (err) 668 return (err); 669 drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds; 670 drc->drc_newfs = B_TRUE; 671 } 672 673 /* downgrade our hold on the ds from EXCLUSIVE to PRIMARY */ 674 dsl_dataset_downgrade(drc->drc_real_ds, 675 DS_MODE_EXCLUSIVE, DS_MODE_PRIMARY); 676 677 return (0); 678 } 679 680 struct restorearg { 681 int err; 682 int byteswap; 683 vnode_t *vp; 684 char *buf; 685 uint64_t voff; 686 int bufsize; /* amount of memory allocated for buf */ 687 zio_cksum_t cksum; 688 }; 689 690 static void * 691 restore_read(struct restorearg *ra, int len) 692 { 693 void *rv; 694 int done = 0; 695 696 /* some things will require 8-byte alignment, so everything must */ 697 ASSERT3U(len % 8, ==, 0); 698 699 while (done < len) { 700 ssize_t resid; 701 702 ra->err = vn_rdwr(UIO_READ, ra->vp, 703 (caddr_t)ra->buf + done, len - done, 704 ra->voff, UIO_SYSSPACE, FAPPEND, 705 RLIM64_INFINITY, CRED(), &resid); 706 707 if (resid == len - done) 708 ra->err = EINVAL; 709 ra->voff += len - done - resid; 710 done = len - resid; 711 if (ra->err) 712 return (NULL); 713 } 714 715 ASSERT3U(done, ==, len); 716 rv = ra->buf; 717 if (ra->byteswap) 718 fletcher_4_incremental_byteswap(rv, len, &ra->cksum); 719 else 720 fletcher_4_incremental_native(rv, len, &ra->cksum); 721 return (rv); 722 } 723 724 static void 725 backup_byteswap(dmu_replay_record_t *drr) 726 { 727 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 728 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 729 drr->drr_type = BSWAP_32(drr->drr_type); 730 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 731 switch (drr->drr_type) { 732 case DRR_BEGIN: 733 DO64(drr_begin.drr_magic); 734 DO64(drr_begin.drr_version); 735 DO64(drr_begin.drr_creation_time); 736 DO32(drr_begin.drr_type); 737 DO32(drr_begin.drr_flags); 738 DO64(drr_begin.drr_toguid); 739 DO64(drr_begin.drr_fromguid); 740 break; 741 case DRR_OBJECT: 742 DO64(drr_object.drr_object); 743 /* DO64(drr_object.drr_allocation_txg); */ 744 DO32(drr_object.drr_type); 745 DO32(drr_object.drr_bonustype); 746 DO32(drr_object.drr_blksz); 747 DO32(drr_object.drr_bonuslen); 748 break; 749 case DRR_FREEOBJECTS: 750 DO64(drr_freeobjects.drr_firstobj); 751 DO64(drr_freeobjects.drr_numobjs); 752 break; 753 case DRR_WRITE: 754 DO64(drr_write.drr_object); 755 DO32(drr_write.drr_type); 756 DO64(drr_write.drr_offset); 757 DO64(drr_write.drr_length); 758 break; 759 case DRR_FREE: 760 DO64(drr_free.drr_object); 761 DO64(drr_free.drr_offset); 762 DO64(drr_free.drr_length); 763 break; 764 case DRR_END: 765 DO64(drr_end.drr_checksum.zc_word[0]); 766 DO64(drr_end.drr_checksum.zc_word[1]); 767 DO64(drr_end.drr_checksum.zc_word[2]); 768 DO64(drr_end.drr_checksum.zc_word[3]); 769 break; 770 } 771 #undef DO64 772 #undef DO32 773 } 774 775 static int 776 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) 777 { 778 int err; 779 dmu_tx_t *tx; 780 781 err = dmu_object_info(os, drro->drr_object, NULL); 782 783 if (err != 0 && err != ENOENT) 784 return (EINVAL); 785 786 if (drro->drr_type == DMU_OT_NONE || 787 drro->drr_type >= DMU_OT_NUMTYPES || 788 drro->drr_bonustype >= DMU_OT_NUMTYPES || 789 drro->drr_checksum >= ZIO_CHECKSUM_FUNCTIONS || 790 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 791 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 792 drro->drr_blksz < SPA_MINBLOCKSIZE || 793 drro->drr_blksz > SPA_MAXBLOCKSIZE || 794 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 795 return (EINVAL); 796 } 797 798 tx = dmu_tx_create(os); 799 800 if (err == ENOENT) { 801 /* currently free, want to be allocated */ 802 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 803 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1); 804 err = dmu_tx_assign(tx, TXG_WAIT); 805 if (err) { 806 dmu_tx_abort(tx); 807 return (err); 808 } 809 err = dmu_object_claim(os, drro->drr_object, 810 drro->drr_type, drro->drr_blksz, 811 drro->drr_bonustype, drro->drr_bonuslen, tx); 812 } else { 813 /* currently allocated, want to be allocated */ 814 dmu_tx_hold_bonus(tx, drro->drr_object); 815 /* 816 * We may change blocksize, so need to 817 * hold_write 818 */ 819 dmu_tx_hold_write(tx, drro->drr_object, 0, 1); 820 err = dmu_tx_assign(tx, TXG_WAIT); 821 if (err) { 822 dmu_tx_abort(tx); 823 return (err); 824 } 825 826 err = dmu_object_reclaim(os, drro->drr_object, 827 drro->drr_type, drro->drr_blksz, 828 drro->drr_bonustype, drro->drr_bonuslen, tx); 829 } 830 if (err) { 831 dmu_tx_commit(tx); 832 return (EINVAL); 833 } 834 835 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksum, tx); 836 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); 837 838 if (drro->drr_bonuslen) { 839 dmu_buf_t *db; 840 void *data; 841 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); 842 dmu_buf_will_dirty(db, tx); 843 844 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 845 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8)); 846 if (data == NULL) { 847 dmu_tx_commit(tx); 848 return (ra->err); 849 } 850 bcopy(data, db->db_data, drro->drr_bonuslen); 851 if (ra->byteswap) { 852 dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data, 853 drro->drr_bonuslen); 854 } 855 dmu_buf_rele(db, FTAG); 856 } 857 dmu_tx_commit(tx); 858 return (0); 859 } 860 861 /* ARGSUSED */ 862 static int 863 restore_freeobjects(struct restorearg *ra, objset_t *os, 864 struct drr_freeobjects *drrfo) 865 { 866 uint64_t obj; 867 868 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 869 return (EINVAL); 870 871 for (obj = drrfo->drr_firstobj; 872 obj < drrfo->drr_firstobj + drrfo->drr_numobjs; 873 (void) dmu_object_next(os, &obj, FALSE, 0)) { 874 dmu_tx_t *tx; 875 int err; 876 877 if (dmu_object_info(os, obj, NULL) != 0) 878 continue; 879 880 tx = dmu_tx_create(os); 881 dmu_tx_hold_bonus(tx, obj); 882 err = dmu_tx_assign(tx, TXG_WAIT); 883 if (err) { 884 dmu_tx_abort(tx); 885 return (err); 886 } 887 err = dmu_object_free(os, obj, tx); 888 dmu_tx_commit(tx); 889 if (err && err != ENOENT) 890 return (EINVAL); 891 } 892 return (0); 893 } 894 895 static int 896 restore_write(struct restorearg *ra, objset_t *os, 897 struct drr_write *drrw) 898 { 899 dmu_tx_t *tx; 900 void *data; 901 int err; 902 903 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 904 drrw->drr_type >= DMU_OT_NUMTYPES) 905 return (EINVAL); 906 907 data = restore_read(ra, drrw->drr_length); 908 if (data == NULL) 909 return (ra->err); 910 911 if (dmu_object_info(os, drrw->drr_object, NULL) != 0) 912 return (EINVAL); 913 914 tx = dmu_tx_create(os); 915 916 dmu_tx_hold_write(tx, drrw->drr_object, 917 drrw->drr_offset, drrw->drr_length); 918 err = dmu_tx_assign(tx, TXG_WAIT); 919 if (err) { 920 dmu_tx_abort(tx); 921 return (err); 922 } 923 if (ra->byteswap) 924 dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length); 925 dmu_write(os, drrw->drr_object, 926 drrw->drr_offset, drrw->drr_length, data, tx); 927 dmu_tx_commit(tx); 928 return (0); 929 } 930 931 /* ARGSUSED */ 932 static int 933 restore_free(struct restorearg *ra, objset_t *os, 934 struct drr_free *drrf) 935 { 936 dmu_tx_t *tx; 937 int err; 938 939 if (drrf->drr_length != -1ULL && 940 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 941 return (EINVAL); 942 943 if (dmu_object_info(os, drrf->drr_object, NULL) != 0) 944 return (EINVAL); 945 946 tx = dmu_tx_create(os); 947 948 dmu_tx_hold_free(tx, drrf->drr_object, 949 drrf->drr_offset, drrf->drr_length); 950 err = dmu_tx_assign(tx, TXG_WAIT); 951 if (err) { 952 dmu_tx_abort(tx); 953 return (err); 954 } 955 err = dmu_free_range(os, drrf->drr_object, 956 drrf->drr_offset, drrf->drr_length, tx); 957 dmu_tx_commit(tx); 958 return (err); 959 } 960 961 static void 962 recv_abort_cleanup(dmu_recv_cookie_t *drc) 963 { 964 if (drc->drc_newfs || drc->drc_real_ds != drc->drc_logical_ds) { 965 /* 966 * online incremental or new fs: destroy the fs (which 967 * may be a clone) that we created 968 */ 969 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag); 970 if (drc->drc_real_ds != drc->drc_logical_ds) { 971 dsl_dataset_close(drc->drc_logical_ds, 972 DS_MODE_STANDARD, dmu_recv_tag); 973 } 974 } else { 975 /* 976 * offline incremental: rollback to most recent snapshot. 977 */ 978 int lmode = DS_MODE_PRIMARY; 979 if (dsl_dataset_tryupgrade(drc->drc_real_ds, 980 DS_MODE_PRIMARY, DS_MODE_EXCLUSIVE)) { 981 lmode = DS_MODE_EXCLUSIVE; 982 (void) dsl_dataset_rollback(drc->drc_real_ds, 983 DMU_OST_NONE); 984 } 985 dsl_dataset_close(drc->drc_real_ds, lmode, FTAG); 986 } 987 } 988 989 /* 990 * NB: callers *must* call dmu_recv_end() if this succeeds. 991 */ 992 int 993 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp) 994 { 995 struct restorearg ra = { 0 }; 996 dmu_replay_record_t *drr; 997 objset_t *os; 998 zio_cksum_t pcksum; 999 1000 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) 1001 ra.byteswap = TRUE; 1002 1003 { 1004 /* compute checksum of drr_begin record */ 1005 dmu_replay_record_t *drr; 1006 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 1007 1008 drr->drr_type = DRR_BEGIN; 1009 drr->drr_u.drr_begin = *drc->drc_drrb; 1010 if (ra.byteswap) { 1011 fletcher_4_incremental_byteswap(drr, 1012 sizeof (dmu_replay_record_t), &ra.cksum); 1013 } else { 1014 fletcher_4_incremental_native(drr, 1015 sizeof (dmu_replay_record_t), &ra.cksum); 1016 } 1017 kmem_free(drr, sizeof (dmu_replay_record_t)); 1018 } 1019 1020 if (ra.byteswap) { 1021 struct drr_begin *drrb = drc->drc_drrb; 1022 drrb->drr_magic = BSWAP_64(drrb->drr_magic); 1023 drrb->drr_version = BSWAP_64(drrb->drr_version); 1024 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); 1025 drrb->drr_type = BSWAP_32(drrb->drr_type); 1026 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); 1027 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); 1028 } 1029 1030 ra.vp = vp; 1031 ra.voff = *voffp; 1032 ra.bufsize = 1<<20; 1033 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); 1034 1035 /* these were verified in dmu_recv_begin */ 1036 ASSERT(drc->drc_drrb->drr_version == DMU_BACKUP_STREAM_VERSION); 1037 ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES); 1038 1039 /* 1040 * Open the objset we are modifying. 1041 */ 1042 VERIFY(dmu_objset_open_ds(drc->drc_real_ds, DMU_OST_ANY, &os) == 0); 1043 1044 ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT); 1045 1046 /* 1047 * Read records and process them. 1048 */ 1049 pcksum = ra.cksum; 1050 while (ra.err == 0 && 1051 NULL != (drr = restore_read(&ra, sizeof (*drr)))) { 1052 if (issig(JUSTLOOKING) && issig(FORREAL)) { 1053 ra.err = EINTR; 1054 goto out; 1055 } 1056 1057 if (ra.byteswap) 1058 backup_byteswap(drr); 1059 1060 switch (drr->drr_type) { 1061 case DRR_OBJECT: 1062 { 1063 /* 1064 * We need to make a copy of the record header, 1065 * because restore_{object,write} may need to 1066 * restore_read(), which will invalidate drr. 1067 */ 1068 struct drr_object drro = drr->drr_u.drr_object; 1069 ra.err = restore_object(&ra, os, &drro); 1070 break; 1071 } 1072 case DRR_FREEOBJECTS: 1073 { 1074 struct drr_freeobjects drrfo = 1075 drr->drr_u.drr_freeobjects; 1076 ra.err = restore_freeobjects(&ra, os, &drrfo); 1077 break; 1078 } 1079 case DRR_WRITE: 1080 { 1081 struct drr_write drrw = drr->drr_u.drr_write; 1082 ra.err = restore_write(&ra, os, &drrw); 1083 break; 1084 } 1085 case DRR_FREE: 1086 { 1087 struct drr_free drrf = drr->drr_u.drr_free; 1088 ra.err = restore_free(&ra, os, &drrf); 1089 break; 1090 } 1091 case DRR_END: 1092 { 1093 struct drr_end drre = drr->drr_u.drr_end; 1094 /* 1095 * We compare against the *previous* checksum 1096 * value, because the stored checksum is of 1097 * everything before the DRR_END record. 1098 */ 1099 if (drre.drr_checksum.zc_word[0] != 0 && 1100 !ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum)) { 1101 ra.err = ECKSUM; 1102 goto out; 1103 } 1104 goto out; 1105 } 1106 default: 1107 ra.err = EINVAL; 1108 goto out; 1109 } 1110 pcksum = ra.cksum; 1111 } 1112 1113 out: 1114 dmu_objset_close(os); 1115 1116 if (ra.err != 0) { 1117 /* 1118 * rollback or destroy what we created, so we don't 1119 * leave it in the restoring state. 1120 */ 1121 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0); 1122 recv_abort_cleanup(drc); 1123 } 1124 1125 kmem_free(ra.buf, ra.bufsize); 1126 *voffp = ra.voff; 1127 return (ra.err); 1128 } 1129 1130 struct recvendsyncarg { 1131 char *tosnap; 1132 uint64_t creation_time; 1133 uint64_t toguid; 1134 }; 1135 1136 static int 1137 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx) 1138 { 1139 dsl_dataset_t *ds = arg1; 1140 struct recvendsyncarg *resa = arg2; 1141 1142 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx)); 1143 } 1144 1145 static void 1146 recv_end_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1147 { 1148 dsl_dataset_t *ds = arg1; 1149 struct recvendsyncarg *resa = arg2; 1150 1151 dsl_dataset_snapshot_sync(ds, resa->tosnap, cr, tx); 1152 1153 /* set snapshot's creation time and guid */ 1154 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1155 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time; 1156 ds->ds_prev->ds_phys->ds_guid = resa->toguid; 1157 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1158 1159 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1160 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1161 } 1162 1163 int 1164 dmu_recv_end(dmu_recv_cookie_t *drc) 1165 { 1166 int err = 0; 1167 int lmode; 1168 1169 /* 1170 * XXX hack; seems the ds is still dirty and 1171 * dsl_pool_zil_clean() expects it to have a ds_user_ptr (and 1172 * zil), but clone_swap() can close it. 1173 */ 1174 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0); 1175 1176 if (dsl_dataset_tryupgrade(drc->drc_real_ds, 1177 DS_MODE_PRIMARY, DS_MODE_EXCLUSIVE)) { 1178 lmode = DS_MODE_EXCLUSIVE; 1179 } else { 1180 recv_abort_cleanup(drc); 1181 return (EBUSY); 1182 } 1183 1184 if (drc->drc_logical_ds != drc->drc_real_ds) { 1185 if (err == 0 && dsl_dataset_tryupgrade(drc->drc_logical_ds, 1186 DS_MODE_STANDARD, DS_MODE_EXCLUSIVE)) { 1187 lmode = DS_MODE_EXCLUSIVE; 1188 err = dsl_dataset_clone_swap(drc->drc_real_ds, 1189 drc->drc_logical_ds, drc->drc_force); 1190 } else { 1191 lmode = DS_MODE_STANDARD; 1192 err = EBUSY; 1193 } 1194 } 1195 1196 if (err == 0) { 1197 struct recvendsyncarg resa; 1198 1199 resa.creation_time = drc->drc_drrb->drr_creation_time; 1200 resa.toguid = drc->drc_drrb->drr_toguid; 1201 resa.tosnap = drc->drc_tosnap; 1202 1203 err = dsl_sync_task_do(drc->drc_real_ds->ds_dir->dd_pool, 1204 recv_end_check, recv_end_sync, 1205 drc->drc_logical_ds, &resa, 3); 1206 if (err) { 1207 if (drc->drc_newfs) { 1208 ASSERT(drc->drc_logical_ds == drc->drc_real_ds); 1209 (void) dsl_dataset_destroy(drc->drc_real_ds, 1210 dmu_recv_tag); 1211 return (err); 1212 } else { 1213 (void) dsl_dataset_rollback(drc->drc_logical_ds, 1214 DMU_OST_NONE); 1215 } 1216 } 1217 } 1218 1219 if (drc->drc_logical_ds != drc->drc_real_ds) { 1220 /* dsl_dataset_destroy() will close the ds */ 1221 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag); 1222 } 1223 /* close the hold from dmu_recv_begin */ 1224 dsl_dataset_close(drc->drc_logical_ds, lmode, dmu_recv_tag); 1225 return (err); 1226 } 1227