1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_pool.h> 39 #include <sys/dsl_synctask.h> 40 #include <sys/zfs_ioctl.h> 41 #include <sys/zap.h> 42 #include <sys/zio_checksum.h> 43 44 static char *dmu_recv_tag = "dmu_recv_tag"; 45 46 struct backuparg { 47 dmu_replay_record_t *drr; 48 vnode_t *vp; 49 offset_t *off; 50 objset_t *os; 51 zio_cksum_t zc; 52 int err; 53 }; 54 55 static int 56 dump_bytes(struct backuparg *ba, void *buf, int len) 57 { 58 ssize_t resid; /* have to get resid to get detailed errno */ 59 ASSERT3U(len % 8, ==, 0); 60 61 fletcher_4_incremental_native(buf, len, &ba->zc); 62 ba->err = vn_rdwr(UIO_WRITE, ba->vp, 63 (caddr_t)buf, len, 64 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); 65 *ba->off += len; 66 return (ba->err); 67 } 68 69 static int 70 dump_free(struct backuparg *ba, uint64_t object, uint64_t offset, 71 uint64_t length) 72 { 73 /* write a FREE record */ 74 bzero(ba->drr, sizeof (dmu_replay_record_t)); 75 ba->drr->drr_type = DRR_FREE; 76 ba->drr->drr_u.drr_free.drr_object = object; 77 ba->drr->drr_u.drr_free.drr_offset = offset; 78 ba->drr->drr_u.drr_free.drr_length = length; 79 80 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 81 return (EINTR); 82 return (0); 83 } 84 85 static int 86 dump_data(struct backuparg *ba, dmu_object_type_t type, 87 uint64_t object, uint64_t offset, int blksz, void *data) 88 { 89 /* write a DATA record */ 90 bzero(ba->drr, sizeof (dmu_replay_record_t)); 91 ba->drr->drr_type = DRR_WRITE; 92 ba->drr->drr_u.drr_write.drr_object = object; 93 ba->drr->drr_u.drr_write.drr_type = type; 94 ba->drr->drr_u.drr_write.drr_offset = offset; 95 ba->drr->drr_u.drr_write.drr_length = blksz; 96 97 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 98 return (EINTR); 99 if (dump_bytes(ba, data, blksz)) 100 return (EINTR); 101 return (0); 102 } 103 104 static int 105 dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs) 106 { 107 /* write a FREEOBJECTS record */ 108 bzero(ba->drr, sizeof (dmu_replay_record_t)); 109 ba->drr->drr_type = DRR_FREEOBJECTS; 110 ba->drr->drr_u.drr_freeobjects.drr_firstobj = firstobj; 111 ba->drr->drr_u.drr_freeobjects.drr_numobjs = numobjs; 112 113 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 114 return (EINTR); 115 return (0); 116 } 117 118 static int 119 dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp) 120 { 121 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 122 return (dump_freeobjects(ba, object, 1)); 123 124 /* write an OBJECT record */ 125 bzero(ba->drr, sizeof (dmu_replay_record_t)); 126 ba->drr->drr_type = DRR_OBJECT; 127 ba->drr->drr_u.drr_object.drr_object = object; 128 ba->drr->drr_u.drr_object.drr_type = dnp->dn_type; 129 ba->drr->drr_u.drr_object.drr_bonustype = dnp->dn_bonustype; 130 ba->drr->drr_u.drr_object.drr_blksz = 131 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 132 ba->drr->drr_u.drr_object.drr_bonuslen = dnp->dn_bonuslen; 133 ba->drr->drr_u.drr_object.drr_checksum = dnp->dn_checksum; 134 ba->drr->drr_u.drr_object.drr_compress = dnp->dn_compress; 135 136 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 137 return (EINTR); 138 139 if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8))) 140 return (EINTR); 141 142 /* free anything past the end of the file */ 143 if (dump_free(ba, object, (dnp->dn_maxblkid + 1) * 144 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL)) 145 return (EINTR); 146 if (ba->err) 147 return (EINTR); 148 return (0); 149 } 150 151 #define BP_SPAN(dnp, level) \ 152 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ 153 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) 154 155 static int 156 backup_cb(traverse_blk_cache_t *bc, spa_t *spa, void *arg) 157 { 158 struct backuparg *ba = arg; 159 uint64_t object = bc->bc_bookmark.zb_object; 160 int level = bc->bc_bookmark.zb_level; 161 uint64_t blkid = bc->bc_bookmark.zb_blkid; 162 blkptr_t *bp = bc->bc_blkptr.blk_birth ? &bc->bc_blkptr : NULL; 163 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 164 void *data = bc->bc_data; 165 int err = 0; 166 167 if (issig(JUSTLOOKING) && issig(FORREAL)) 168 return (EINTR); 169 170 ASSERT(data || bp == NULL); 171 172 if (bp == NULL && object == 0) { 173 uint64_t span = BP_SPAN(bc->bc_dnode, level); 174 uint64_t dnobj = (blkid * span) >> DNODE_SHIFT; 175 err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT); 176 } else if (bp == NULL) { 177 uint64_t span = BP_SPAN(bc->bc_dnode, level); 178 err = dump_free(ba, object, blkid * span, span); 179 } else if (data && level == 0 && type == DMU_OT_DNODE) { 180 dnode_phys_t *blk = data; 181 int i; 182 int blksz = BP_GET_LSIZE(bp); 183 184 for (i = 0; i < blksz >> DNODE_SHIFT; i++) { 185 uint64_t dnobj = 186 (blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; 187 err = dump_dnode(ba, dnobj, blk+i); 188 if (err) 189 break; 190 } 191 } else if (level == 0 && 192 type != DMU_OT_DNODE && type != DMU_OT_OBJSET) { 193 int blksz = BP_GET_LSIZE(bp); 194 if (data == NULL) { 195 uint32_t aflags = ARC_WAIT; 196 arc_buf_t *abuf; 197 zbookmark_t zb; 198 199 zb.zb_objset = ba->os->os->os_dsl_dataset->ds_object; 200 zb.zb_object = object; 201 zb.zb_level = level; 202 zb.zb_blkid = blkid; 203 (void) arc_read(NULL, spa, bp, 204 dmu_ot[type].ot_byteswap, arc_getbuf_func, &abuf, 205 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_MUSTSUCCEED, 206 &aflags, &zb); 207 208 if (abuf) { 209 err = dump_data(ba, type, object, blkid * blksz, 210 blksz, abuf->b_data); 211 (void) arc_buf_remove_ref(abuf, &abuf); 212 } 213 } else { 214 err = dump_data(ba, type, object, blkid * blksz, 215 blksz, data); 216 } 217 } 218 219 ASSERT(err == 0 || err == EINTR); 220 return (err); 221 } 222 223 int 224 dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin, 225 vnode_t *vp, offset_t *off) 226 { 227 dsl_dataset_t *ds = tosnap->os->os_dsl_dataset; 228 dsl_dataset_t *fromds = fromsnap ? fromsnap->os->os_dsl_dataset : NULL; 229 dmu_replay_record_t *drr; 230 struct backuparg ba; 231 int err; 232 uint64_t fromtxg = 0; 233 234 /* tosnap must be a snapshot */ 235 if (ds->ds_phys->ds_next_snap_obj == 0) 236 return (EINVAL); 237 238 /* fromsnap must be an earlier snapshot from the same fs as tosnap */ 239 if (fromds && (ds->ds_dir != fromds->ds_dir || 240 fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg)) 241 return (EXDEV); 242 243 if (fromorigin) { 244 if (fromsnap) 245 return (EINVAL); 246 247 if (ds->ds_dir->dd_phys->dd_origin_obj != NULL) { 248 dsl_pool_t *dp = ds->ds_dir->dd_pool; 249 rw_enter(&dp->dp_config_rwlock, RW_READER); 250 err = dsl_dataset_open_obj(dp, 251 ds->ds_dir->dd_phys->dd_origin_obj, NULL, 252 DS_MODE_NONE, FTAG, &fromds); 253 rw_exit(&dp->dp_config_rwlock); 254 if (err) 255 return (err); 256 } else { 257 fromorigin = B_FALSE; 258 } 259 } 260 261 262 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 263 drr->drr_type = DRR_BEGIN; 264 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 265 drr->drr_u.drr_begin.drr_version = DMU_BACKUP_STREAM_VERSION; 266 drr->drr_u.drr_begin.drr_creation_time = 267 ds->ds_phys->ds_creation_time; 268 drr->drr_u.drr_begin.drr_type = tosnap->os->os_phys->os_type; 269 if (fromorigin) 270 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 271 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; 272 if (fromds) 273 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid; 274 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); 275 276 if (fromds) 277 fromtxg = fromds->ds_phys->ds_creation_txg; 278 if (fromorigin) 279 dsl_dataset_close(fromds, DS_MODE_NONE, FTAG); 280 281 ba.drr = drr; 282 ba.vp = vp; 283 ba.os = tosnap; 284 ba.off = off; 285 ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0); 286 287 if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) { 288 kmem_free(drr, sizeof (dmu_replay_record_t)); 289 return (ba.err); 290 } 291 292 err = traverse_dsl_dataset(ds, fromtxg, 293 ADVANCE_PRE | ADVANCE_HOLES | ADVANCE_DATA | ADVANCE_NOLOCK, 294 backup_cb, &ba); 295 296 if (err) { 297 if (err == EINTR && ba.err) 298 err = ba.err; 299 kmem_free(drr, sizeof (dmu_replay_record_t)); 300 return (err); 301 } 302 303 bzero(drr, sizeof (dmu_replay_record_t)); 304 drr->drr_type = DRR_END; 305 drr->drr_u.drr_end.drr_checksum = ba.zc; 306 307 if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) { 308 kmem_free(drr, sizeof (dmu_replay_record_t)); 309 return (ba.err); 310 } 311 312 kmem_free(drr, sizeof (dmu_replay_record_t)); 313 314 return (0); 315 } 316 317 struct recvbeginsyncarg { 318 const char *tofs; 319 const char *tosnap; 320 dsl_dataset_t *origin; 321 uint64_t fromguid; 322 dmu_objset_type_t type; 323 void *tag; 324 boolean_t force; 325 char clonelastname[MAXNAMELEN]; 326 dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */ 327 }; 328 329 static dsl_dataset_t * 330 recv_full_sync_impl(dsl_pool_t *dp, uint64_t dsobj, dmu_objset_type_t type, 331 cred_t *cr, dmu_tx_t *tx) 332 { 333 dsl_dataset_t *ds; 334 335 VERIFY(0 == dsl_dataset_open_obj(dp, dsobj, NULL, 336 DS_MODE_EXCLUSIVE, dmu_recv_tag, &ds)); 337 338 if (type != DMU_OST_NONE) { 339 (void) dmu_objset_create_impl(dp->dp_spa, 340 ds, &ds->ds_phys->ds_bp, type, tx); 341 } 342 343 dmu_buf_will_dirty(ds->ds_dbuf, tx); 344 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 345 346 spa_history_internal_log(LOG_DS_REPLAY_FULL_SYNC, 347 ds->ds_dir->dd_pool->dp_spa, tx, cr, "dataset = %lld", 348 ds->ds_phys->ds_dir_obj); 349 350 return (ds); 351 } 352 353 /* ARGSUSED */ 354 static int 355 recv_full_check(void *arg1, void *arg2, dmu_tx_t *tx) 356 { 357 dsl_dir_t *dd = arg1; 358 struct recvbeginsyncarg *rbsa = arg2; 359 objset_t *mos = dd->dd_pool->dp_meta_objset; 360 uint64_t val; 361 int err; 362 363 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, 364 strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val); 365 366 if (err != ENOENT) 367 return (err ? err : EEXIST); 368 369 if (rbsa->origin) { 370 /* make sure it's a snap in the same pool */ 371 if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool) 372 return (EXDEV); 373 if (rbsa->origin->ds_phys->ds_num_children == 0) 374 return (EINVAL); 375 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid) 376 return (ENODEV); 377 } 378 379 return (0); 380 } 381 382 static void 383 recv_full_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 384 { 385 dsl_dir_t *dd = arg1; 386 struct recvbeginsyncarg *rbsa = arg2; 387 uint64_t dsobj; 388 389 dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1, 390 rbsa->origin, cr, tx); 391 392 rbsa->ds = recv_full_sync_impl(dd->dd_pool, dsobj, 393 rbsa->origin ? DMU_OST_NONE : rbsa->type, cr, tx); 394 } 395 396 static int 397 recv_full_existing_check(void *arg1, void *arg2, dmu_tx_t *tx) 398 { 399 dsl_dataset_t *ds = arg1; 400 struct recvbeginsyncarg *rbsa = arg2; 401 int err; 402 403 /* must be a head ds */ 404 if (ds->ds_phys->ds_next_snap_obj != 0) 405 return (EINVAL); 406 407 /* must not be a clone ds */ 408 if (ds->ds_prev != NULL) 409 return (EINVAL); 410 411 err = dsl_dataset_destroy_check(ds, rbsa->tag, tx); 412 if (err) 413 return (err); 414 415 if (rbsa->origin) { 416 /* make sure it's a snap in the same pool */ 417 if (rbsa->origin->ds_dir->dd_pool != ds->ds_dir->dd_pool) 418 return (EXDEV); 419 if (rbsa->origin->ds_phys->ds_num_children == 0) 420 return (EINVAL); 421 if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid) 422 return (ENODEV); 423 } 424 425 return (0); 426 } 427 428 static void 429 recv_full_existing_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 430 { 431 dsl_dataset_t *ds = arg1; 432 struct recvbeginsyncarg *rbsa = arg2; 433 dsl_dir_t *dd = ds->ds_dir; 434 uint64_t dsobj; 435 436 /* 437 * NB: caller must provide an extra hold on the dsl_dir_t, so it 438 * won't go away when dsl_dataset_destroy_sync() closes the 439 * dataset. 440 */ 441 dsl_dataset_destroy_sync(ds, rbsa->tag, cr, tx); 442 443 dsobj = dsl_dataset_create_sync_impl(dd, rbsa->origin, tx); 444 445 rbsa->ds = recv_full_sync_impl(dd->dd_pool, dsobj, 446 rbsa->origin ? DMU_OST_NONE : rbsa->type, cr, tx); 447 } 448 449 /* ARGSUSED */ 450 static int 451 recv_incremental_check(void *arg1, void *arg2, dmu_tx_t *tx) 452 { 453 dsl_dataset_t *ds = arg1; 454 struct recvbeginsyncarg *rbsa = arg2; 455 int err; 456 uint64_t val; 457 458 /* must not have any changes since most recent snapshot */ 459 if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds)) 460 return (ETXTBSY); 461 462 /* must already be a snapshot of this fs */ 463 if (ds->ds_phys->ds_prev_snap_obj == 0) 464 return (ENODEV); 465 466 /* most recent snapshot must match fromguid */ 467 if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) 468 return (ENODEV); 469 470 /* temporary clone name must not exist */ 471 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, 472 ds->ds_dir->dd_phys->dd_child_dir_zapobj, 473 rbsa->clonelastname, 8, 1, &val); 474 if (err == 0) 475 return (EEXIST); 476 if (err != ENOENT) 477 return (err); 478 479 /* new snapshot name must not exist */ 480 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, 481 ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val); 482 if (err == 0) 483 return (EEXIST); 484 if (err != ENOENT) 485 return (err); 486 return (0); 487 } 488 489 /* ARGSUSED */ 490 static void 491 recv_online_incremental_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 492 { 493 dsl_dataset_t *ohds = arg1; 494 struct recvbeginsyncarg *rbsa = arg2; 495 dsl_pool_t *dp = ohds->ds_dir->dd_pool; 496 dsl_dataset_t *ods, *cds; 497 uint64_t dsobj; 498 499 /* create the temporary clone */ 500 VERIFY(0 == dsl_dataset_open_obj(dp, ohds->ds_phys->ds_prev_snap_obj, 501 NULL, DS_MODE_STANDARD, FTAG, &ods)); 502 dsobj = dsl_dataset_create_sync(ohds->ds_dir, 503 rbsa->clonelastname, ods, cr, tx); 504 dsl_dataset_close(ods, DS_MODE_STANDARD, FTAG); 505 506 /* open the temporary clone */ 507 VERIFY(0 == dsl_dataset_open_obj(dp, dsobj, NULL, 508 DS_MODE_EXCLUSIVE, dmu_recv_tag, &cds)); 509 510 /* copy the refquota from the target fs to the clone */ 511 if (ohds->ds_quota > 0) 512 dsl_dataset_set_quota_sync(cds, &ohds->ds_quota, cr, tx); 513 514 dmu_buf_will_dirty(cds->ds_dbuf, tx); 515 cds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 516 517 rbsa->ds = cds; 518 519 spa_history_internal_log(LOG_DS_REPLAY_INC_SYNC, 520 dp->dp_spa, tx, cr, "dataset = %lld", 521 cds->ds_phys->ds_dir_obj); 522 } 523 524 /* ARGSUSED */ 525 static void 526 recv_offline_incremental_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 527 { 528 dsl_dataset_t *ds = arg1; 529 530 dmu_buf_will_dirty(ds->ds_dbuf, tx); 531 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 532 533 spa_history_internal_log(LOG_DS_REPLAY_INC_SYNC, 534 ds->ds_dir->dd_pool->dp_spa, tx, cr, "dataset = %lld", 535 ds->ds_phys->ds_dir_obj); 536 } 537 538 /* 539 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 540 * succeeds; otherwise we will leak the holds on the datasets. 541 */ 542 int 543 dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb, 544 boolean_t force, objset_t *origin, boolean_t online, dmu_recv_cookie_t *drc) 545 { 546 int err = 0; 547 boolean_t byteswap; 548 struct recvbeginsyncarg rbsa; 549 uint64_t version; 550 int flags; 551 dsl_dataset_t *ds; 552 553 if (drrb->drr_magic == DMU_BACKUP_MAGIC) 554 byteswap = FALSE; 555 else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) 556 byteswap = TRUE; 557 else 558 return (EINVAL); 559 560 rbsa.tofs = tofs; 561 rbsa.tosnap = tosnap; 562 rbsa.origin = origin ? origin->os->os_dsl_dataset : NULL; 563 rbsa.fromguid = drrb->drr_fromguid; 564 rbsa.type = drrb->drr_type; 565 rbsa.tag = FTAG; 566 version = drrb->drr_version; 567 flags = drrb->drr_flags; 568 569 if (byteswap) { 570 rbsa.type = BSWAP_32(rbsa.type); 571 rbsa.fromguid = BSWAP_64(rbsa.fromguid); 572 version = BSWAP_64(version); 573 flags = BSWAP_32(flags); 574 } 575 576 if (version != DMU_BACKUP_STREAM_VERSION || 577 rbsa.type >= DMU_OST_NUMTYPES || 578 ((flags & DRR_FLAG_CLONE) && origin == NULL)) 579 return (EINVAL); 580 581 bzero(drc, sizeof (dmu_recv_cookie_t)); 582 drc->drc_drrb = drrb; 583 drc->drc_tosnap = tosnap; 584 drc->drc_force = force; 585 586 /* 587 * Process the begin in syncing context. 588 */ 589 if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE) && !online) { 590 /* offline incremental receive */ 591 err = dsl_dataset_open(tofs, 592 DS_MODE_EXCLUSIVE, dmu_recv_tag, &ds); 593 if (err) 594 return (err); 595 596 /* 597 * Only do the rollback if the most recent snapshot 598 * matches the incremental source 599 */ 600 if (force) { 601 if (ds->ds_prev == NULL || 602 ds->ds_prev->ds_phys->ds_guid != 603 rbsa.fromguid) { 604 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, 605 dmu_recv_tag); 606 return (ENODEV); 607 } 608 (void) dsl_dataset_rollback(ds, DMU_OST_NONE); 609 } 610 rbsa.force = B_FALSE; 611 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 612 recv_incremental_check, 613 recv_offline_incremental_sync, 614 ds, &rbsa, 1); 615 if (err) { 616 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, dmu_recv_tag); 617 return (err); 618 } 619 drc->drc_logical_ds = drc->drc_real_ds = ds; 620 } else if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE)) { 621 /* online incremental receive */ 622 623 /* tmp clone name is: tofs/%tosnap" */ 624 (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname), 625 "%%%s", tosnap); 626 627 /* open the dataset we are logically receiving into */ 628 err = dsl_dataset_open(tofs, 629 DS_MODE_STANDARD, dmu_recv_tag, &ds); 630 if (err) 631 return (err); 632 633 rbsa.force = force; 634 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 635 recv_incremental_check, 636 recv_online_incremental_sync, ds, &rbsa, 5); 637 if (err) { 638 dsl_dataset_close(ds, DS_MODE_STANDARD, dmu_recv_tag); 639 return (err); 640 } 641 drc->drc_logical_ds = ds; 642 drc->drc_real_ds = rbsa.ds; 643 } else { 644 /* create new fs -- full backup or clone */ 645 dsl_dir_t *dd = NULL; 646 const char *tail; 647 648 err = dsl_dir_open(tofs, FTAG, &dd, &tail); 649 if (err) 650 return (err); 651 if (tail == NULL) { 652 if (!force) { 653 dsl_dir_close(dd, FTAG); 654 return (EEXIST); 655 } 656 657 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 658 err = dsl_dataset_open_obj(dd->dd_pool, 659 dd->dd_phys->dd_head_dataset_obj, NULL, 660 DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, 661 FTAG, &ds); 662 rw_exit(&dd->dd_pool->dp_config_rwlock); 663 if (err) { 664 dsl_dir_close(dd, FTAG); 665 return (err); 666 } 667 668 err = dsl_sync_task_do(dd->dd_pool, 669 recv_full_existing_check, 670 recv_full_existing_sync, ds, &rbsa, 5); 671 /* if successful, sync task closes the ds for us */ 672 if (err) 673 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG); 674 } else { 675 err = dsl_sync_task_do(dd->dd_pool, recv_full_check, 676 recv_full_sync, dd, &rbsa, 5); 677 if (err) 678 return (err); 679 } 680 dsl_dir_close(dd, FTAG); 681 if (err) 682 return (err); 683 drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds; 684 drc->drc_newfs = B_TRUE; 685 } 686 687 /* downgrade our hold on the ds from EXCLUSIVE to PRIMARY */ 688 dsl_dataset_downgrade(drc->drc_real_ds, 689 DS_MODE_EXCLUSIVE, DS_MODE_PRIMARY); 690 691 return (0); 692 } 693 694 struct restorearg { 695 int err; 696 int byteswap; 697 vnode_t *vp; 698 char *buf; 699 uint64_t voff; 700 int bufsize; /* amount of memory allocated for buf */ 701 zio_cksum_t cksum; 702 }; 703 704 static void * 705 restore_read(struct restorearg *ra, int len) 706 { 707 void *rv; 708 int done = 0; 709 710 /* some things will require 8-byte alignment, so everything must */ 711 ASSERT3U(len % 8, ==, 0); 712 713 while (done < len) { 714 ssize_t resid; 715 716 ra->err = vn_rdwr(UIO_READ, ra->vp, 717 (caddr_t)ra->buf + done, len - done, 718 ra->voff, UIO_SYSSPACE, FAPPEND, 719 RLIM64_INFINITY, CRED(), &resid); 720 721 if (resid == len - done) 722 ra->err = EINVAL; 723 ra->voff += len - done - resid; 724 done = len - resid; 725 if (ra->err) 726 return (NULL); 727 } 728 729 ASSERT3U(done, ==, len); 730 rv = ra->buf; 731 if (ra->byteswap) 732 fletcher_4_incremental_byteswap(rv, len, &ra->cksum); 733 else 734 fletcher_4_incremental_native(rv, len, &ra->cksum); 735 return (rv); 736 } 737 738 static void 739 backup_byteswap(dmu_replay_record_t *drr) 740 { 741 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 742 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 743 drr->drr_type = BSWAP_32(drr->drr_type); 744 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 745 switch (drr->drr_type) { 746 case DRR_BEGIN: 747 DO64(drr_begin.drr_magic); 748 DO64(drr_begin.drr_version); 749 DO64(drr_begin.drr_creation_time); 750 DO32(drr_begin.drr_type); 751 DO32(drr_begin.drr_flags); 752 DO64(drr_begin.drr_toguid); 753 DO64(drr_begin.drr_fromguid); 754 break; 755 case DRR_OBJECT: 756 DO64(drr_object.drr_object); 757 /* DO64(drr_object.drr_allocation_txg); */ 758 DO32(drr_object.drr_type); 759 DO32(drr_object.drr_bonustype); 760 DO32(drr_object.drr_blksz); 761 DO32(drr_object.drr_bonuslen); 762 break; 763 case DRR_FREEOBJECTS: 764 DO64(drr_freeobjects.drr_firstobj); 765 DO64(drr_freeobjects.drr_numobjs); 766 break; 767 case DRR_WRITE: 768 DO64(drr_write.drr_object); 769 DO32(drr_write.drr_type); 770 DO64(drr_write.drr_offset); 771 DO64(drr_write.drr_length); 772 break; 773 case DRR_FREE: 774 DO64(drr_free.drr_object); 775 DO64(drr_free.drr_offset); 776 DO64(drr_free.drr_length); 777 break; 778 case DRR_END: 779 DO64(drr_end.drr_checksum.zc_word[0]); 780 DO64(drr_end.drr_checksum.zc_word[1]); 781 DO64(drr_end.drr_checksum.zc_word[2]); 782 DO64(drr_end.drr_checksum.zc_word[3]); 783 break; 784 } 785 #undef DO64 786 #undef DO32 787 } 788 789 static int 790 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) 791 { 792 int err; 793 dmu_tx_t *tx; 794 795 err = dmu_object_info(os, drro->drr_object, NULL); 796 797 if (err != 0 && err != ENOENT) 798 return (EINVAL); 799 800 if (drro->drr_type == DMU_OT_NONE || 801 drro->drr_type >= DMU_OT_NUMTYPES || 802 drro->drr_bonustype >= DMU_OT_NUMTYPES || 803 drro->drr_checksum >= ZIO_CHECKSUM_FUNCTIONS || 804 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 805 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 806 drro->drr_blksz < SPA_MINBLOCKSIZE || 807 drro->drr_blksz > SPA_MAXBLOCKSIZE || 808 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 809 return (EINVAL); 810 } 811 812 tx = dmu_tx_create(os); 813 814 if (err == ENOENT) { 815 /* currently free, want to be allocated */ 816 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 817 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1); 818 err = dmu_tx_assign(tx, TXG_WAIT); 819 if (err) { 820 dmu_tx_abort(tx); 821 return (err); 822 } 823 err = dmu_object_claim(os, drro->drr_object, 824 drro->drr_type, drro->drr_blksz, 825 drro->drr_bonustype, drro->drr_bonuslen, tx); 826 } else { 827 /* currently allocated, want to be allocated */ 828 dmu_tx_hold_bonus(tx, drro->drr_object); 829 /* 830 * We may change blocksize, so need to 831 * hold_write 832 */ 833 dmu_tx_hold_write(tx, drro->drr_object, 0, 1); 834 err = dmu_tx_assign(tx, TXG_WAIT); 835 if (err) { 836 dmu_tx_abort(tx); 837 return (err); 838 } 839 840 err = dmu_object_reclaim(os, drro->drr_object, 841 drro->drr_type, drro->drr_blksz, 842 drro->drr_bonustype, drro->drr_bonuslen, tx); 843 } 844 if (err) { 845 dmu_tx_commit(tx); 846 return (EINVAL); 847 } 848 849 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksum, tx); 850 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); 851 852 if (drro->drr_bonuslen) { 853 dmu_buf_t *db; 854 void *data; 855 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); 856 dmu_buf_will_dirty(db, tx); 857 858 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 859 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8)); 860 if (data == NULL) { 861 dmu_tx_commit(tx); 862 return (ra->err); 863 } 864 bcopy(data, db->db_data, drro->drr_bonuslen); 865 if (ra->byteswap) { 866 dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data, 867 drro->drr_bonuslen); 868 } 869 dmu_buf_rele(db, FTAG); 870 } 871 dmu_tx_commit(tx); 872 return (0); 873 } 874 875 /* ARGSUSED */ 876 static int 877 restore_freeobjects(struct restorearg *ra, objset_t *os, 878 struct drr_freeobjects *drrfo) 879 { 880 uint64_t obj; 881 882 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 883 return (EINVAL); 884 885 for (obj = drrfo->drr_firstobj; 886 obj < drrfo->drr_firstobj + drrfo->drr_numobjs; 887 (void) dmu_object_next(os, &obj, FALSE, 0)) { 888 dmu_tx_t *tx; 889 int err; 890 891 if (dmu_object_info(os, obj, NULL) != 0) 892 continue; 893 894 tx = dmu_tx_create(os); 895 dmu_tx_hold_bonus(tx, obj); 896 err = dmu_tx_assign(tx, TXG_WAIT); 897 if (err) { 898 dmu_tx_abort(tx); 899 return (err); 900 } 901 err = dmu_object_free(os, obj, tx); 902 dmu_tx_commit(tx); 903 if (err && err != ENOENT) 904 return (EINVAL); 905 } 906 return (0); 907 } 908 909 static int 910 restore_write(struct restorearg *ra, objset_t *os, 911 struct drr_write *drrw) 912 { 913 dmu_tx_t *tx; 914 void *data; 915 int err; 916 917 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 918 drrw->drr_type >= DMU_OT_NUMTYPES) 919 return (EINVAL); 920 921 data = restore_read(ra, drrw->drr_length); 922 if (data == NULL) 923 return (ra->err); 924 925 if (dmu_object_info(os, drrw->drr_object, NULL) != 0) 926 return (EINVAL); 927 928 tx = dmu_tx_create(os); 929 930 dmu_tx_hold_write(tx, drrw->drr_object, 931 drrw->drr_offset, drrw->drr_length); 932 err = dmu_tx_assign(tx, TXG_WAIT); 933 if (err) { 934 dmu_tx_abort(tx); 935 return (err); 936 } 937 if (ra->byteswap) 938 dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length); 939 dmu_write(os, drrw->drr_object, 940 drrw->drr_offset, drrw->drr_length, data, tx); 941 dmu_tx_commit(tx); 942 return (0); 943 } 944 945 /* ARGSUSED */ 946 static int 947 restore_free(struct restorearg *ra, objset_t *os, 948 struct drr_free *drrf) 949 { 950 dmu_tx_t *tx; 951 int err; 952 953 if (drrf->drr_length != -1ULL && 954 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 955 return (EINVAL); 956 957 if (dmu_object_info(os, drrf->drr_object, NULL) != 0) 958 return (EINVAL); 959 960 tx = dmu_tx_create(os); 961 962 dmu_tx_hold_free(tx, drrf->drr_object, 963 drrf->drr_offset, drrf->drr_length); 964 err = dmu_tx_assign(tx, TXG_WAIT); 965 if (err) { 966 dmu_tx_abort(tx); 967 return (err); 968 } 969 err = dmu_free_range(os, drrf->drr_object, 970 drrf->drr_offset, drrf->drr_length, tx); 971 dmu_tx_commit(tx); 972 return (err); 973 } 974 975 void 976 dmu_recv_abort_cleanup(dmu_recv_cookie_t *drc) 977 { 978 if (drc->drc_newfs || drc->drc_real_ds != drc->drc_logical_ds) { 979 /* 980 * online incremental or new fs: destroy the fs (which 981 * may be a clone) that we created 982 */ 983 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag); 984 if (drc->drc_real_ds != drc->drc_logical_ds) { 985 dsl_dataset_close(drc->drc_logical_ds, 986 DS_MODE_STANDARD, dmu_recv_tag); 987 } 988 } else { 989 /* 990 * offline incremental: rollback to most recent snapshot. 991 */ 992 int lmode = DS_MODE_PRIMARY; 993 if (dsl_dataset_tryupgrade(drc->drc_real_ds, 994 DS_MODE_PRIMARY, DS_MODE_EXCLUSIVE)) { 995 lmode = DS_MODE_EXCLUSIVE; 996 (void) dsl_dataset_rollback(drc->drc_real_ds, 997 DMU_OST_NONE); 998 } 999 dsl_dataset_close(drc->drc_real_ds, lmode, FTAG); 1000 } 1001 } 1002 1003 /* 1004 * NB: callers *must* call dmu_recv_end() if this succeeds. 1005 */ 1006 int 1007 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp) 1008 { 1009 struct restorearg ra = { 0 }; 1010 dmu_replay_record_t *drr; 1011 objset_t *os; 1012 zio_cksum_t pcksum; 1013 1014 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) 1015 ra.byteswap = TRUE; 1016 1017 { 1018 /* compute checksum of drr_begin record */ 1019 dmu_replay_record_t *drr; 1020 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 1021 1022 drr->drr_type = DRR_BEGIN; 1023 drr->drr_u.drr_begin = *drc->drc_drrb; 1024 if (ra.byteswap) { 1025 fletcher_4_incremental_byteswap(drr, 1026 sizeof (dmu_replay_record_t), &ra.cksum); 1027 } else { 1028 fletcher_4_incremental_native(drr, 1029 sizeof (dmu_replay_record_t), &ra.cksum); 1030 } 1031 kmem_free(drr, sizeof (dmu_replay_record_t)); 1032 } 1033 1034 if (ra.byteswap) { 1035 struct drr_begin *drrb = drc->drc_drrb; 1036 drrb->drr_magic = BSWAP_64(drrb->drr_magic); 1037 drrb->drr_version = BSWAP_64(drrb->drr_version); 1038 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); 1039 drrb->drr_type = BSWAP_32(drrb->drr_type); 1040 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); 1041 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); 1042 } 1043 1044 ra.vp = vp; 1045 ra.voff = *voffp; 1046 ra.bufsize = 1<<20; 1047 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); 1048 1049 /* these were verified in dmu_recv_begin */ 1050 ASSERT(drc->drc_drrb->drr_version == DMU_BACKUP_STREAM_VERSION); 1051 ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES); 1052 1053 /* 1054 * Open the objset we are modifying. 1055 */ 1056 VERIFY(dmu_objset_open_ds(drc->drc_real_ds, DMU_OST_ANY, &os) == 0); 1057 1058 ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT); 1059 1060 /* 1061 * Read records and process them. 1062 */ 1063 pcksum = ra.cksum; 1064 while (ra.err == 0 && 1065 NULL != (drr = restore_read(&ra, sizeof (*drr)))) { 1066 if (issig(JUSTLOOKING) && issig(FORREAL)) { 1067 ra.err = EINTR; 1068 goto out; 1069 } 1070 1071 if (ra.byteswap) 1072 backup_byteswap(drr); 1073 1074 switch (drr->drr_type) { 1075 case DRR_OBJECT: 1076 { 1077 /* 1078 * We need to make a copy of the record header, 1079 * because restore_{object,write} may need to 1080 * restore_read(), which will invalidate drr. 1081 */ 1082 struct drr_object drro = drr->drr_u.drr_object; 1083 ra.err = restore_object(&ra, os, &drro); 1084 break; 1085 } 1086 case DRR_FREEOBJECTS: 1087 { 1088 struct drr_freeobjects drrfo = 1089 drr->drr_u.drr_freeobjects; 1090 ra.err = restore_freeobjects(&ra, os, &drrfo); 1091 break; 1092 } 1093 case DRR_WRITE: 1094 { 1095 struct drr_write drrw = drr->drr_u.drr_write; 1096 ra.err = restore_write(&ra, os, &drrw); 1097 break; 1098 } 1099 case DRR_FREE: 1100 { 1101 struct drr_free drrf = drr->drr_u.drr_free; 1102 ra.err = restore_free(&ra, os, &drrf); 1103 break; 1104 } 1105 case DRR_END: 1106 { 1107 struct drr_end drre = drr->drr_u.drr_end; 1108 /* 1109 * We compare against the *previous* checksum 1110 * value, because the stored checksum is of 1111 * everything before the DRR_END record. 1112 */ 1113 if (drre.drr_checksum.zc_word[0] != 0 && 1114 !ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum)) { 1115 ra.err = ECKSUM; 1116 goto out; 1117 } 1118 goto out; 1119 } 1120 default: 1121 ra.err = EINVAL; 1122 goto out; 1123 } 1124 pcksum = ra.cksum; 1125 } 1126 1127 out: 1128 dmu_objset_close(os); 1129 1130 if (ra.err != 0) { 1131 /* 1132 * rollback or destroy what we created, so we don't 1133 * leave it in the restoring state. 1134 */ 1135 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0); 1136 dmu_recv_abort_cleanup(drc); 1137 } 1138 1139 kmem_free(ra.buf, ra.bufsize); 1140 *voffp = ra.voff; 1141 return (ra.err); 1142 } 1143 1144 struct recvendsyncarg { 1145 char *tosnap; 1146 uint64_t creation_time; 1147 uint64_t toguid; 1148 }; 1149 1150 static int 1151 recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx) 1152 { 1153 dsl_dataset_t *ds = arg1; 1154 struct recvendsyncarg *resa = arg2; 1155 1156 return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx)); 1157 } 1158 1159 static void 1160 recv_end_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1161 { 1162 dsl_dataset_t *ds = arg1; 1163 struct recvendsyncarg *resa = arg2; 1164 1165 dsl_dataset_snapshot_sync(ds, resa->tosnap, cr, tx); 1166 1167 /* set snapshot's creation time and guid */ 1168 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1169 ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time; 1170 ds->ds_prev->ds_phys->ds_guid = resa->toguid; 1171 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1172 1173 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1174 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1175 } 1176 1177 int 1178 dmu_recv_end(dmu_recv_cookie_t *drc) 1179 { 1180 int err = 0; 1181 int lmode; 1182 1183 /* 1184 * XXX hack; seems the ds is still dirty and 1185 * dsl_pool_zil_clean() expects it to have a ds_user_ptr (and 1186 * zil), but clone_swap() can close it. 1187 */ 1188 txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0); 1189 1190 if (dsl_dataset_tryupgrade(drc->drc_real_ds, 1191 DS_MODE_PRIMARY, DS_MODE_EXCLUSIVE)) { 1192 lmode = DS_MODE_EXCLUSIVE; 1193 } else { 1194 dmu_recv_abort_cleanup(drc); 1195 return (EBUSY); 1196 } 1197 1198 if (drc->drc_logical_ds != drc->drc_real_ds) { 1199 if (err == 0 && dsl_dataset_tryupgrade(drc->drc_logical_ds, 1200 DS_MODE_STANDARD, DS_MODE_EXCLUSIVE)) { 1201 lmode = DS_MODE_EXCLUSIVE; 1202 err = dsl_dataset_clone_swap(drc->drc_real_ds, 1203 drc->drc_logical_ds, drc->drc_force); 1204 } else { 1205 lmode = DS_MODE_STANDARD; 1206 err = EBUSY; 1207 } 1208 } 1209 1210 if (err == 0) { 1211 struct recvendsyncarg resa; 1212 1213 resa.creation_time = drc->drc_drrb->drr_creation_time; 1214 resa.toguid = drc->drc_drrb->drr_toguid; 1215 resa.tosnap = drc->drc_tosnap; 1216 1217 err = dsl_sync_task_do(drc->drc_real_ds->ds_dir->dd_pool, 1218 recv_end_check, recv_end_sync, 1219 drc->drc_logical_ds, &resa, 3); 1220 if (err) { 1221 if (drc->drc_newfs) { 1222 ASSERT(drc->drc_logical_ds == drc->drc_real_ds); 1223 (void) dsl_dataset_destroy(drc->drc_real_ds, 1224 dmu_recv_tag); 1225 return (err); 1226 } else { 1227 (void) dsl_dataset_rollback(drc->drc_logical_ds, 1228 DMU_OST_NONE); 1229 } 1230 } 1231 } 1232 1233 if (drc->drc_logical_ds != drc->drc_real_ds) { 1234 /* dsl_dataset_destroy() will close the ds */ 1235 (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag); 1236 } 1237 /* close the hold from dmu_recv_begin */ 1238 dsl_dataset_close(drc->drc_logical_ds, lmode, dmu_recv_tag); 1239 return (err); 1240 } 1241