1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu.h> 29 #include <sys/dmu_impl.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dbuf.h> 32 #include <sys/dnode.h> 33 #include <sys/zfs_context.h> 34 #include <sys/dmu_objset.h> 35 #include <sys/dmu_traverse.h> 36 #include <sys/dsl_dataset.h> 37 #include <sys/dsl_dir.h> 38 #include <sys/dsl_pool.h> 39 #include <sys/dsl_synctask.h> 40 #include <sys/dmu_zfetch.h> 41 #include <sys/zfs_ioctl.h> 42 #include <sys/zap.h> 43 #include <sys/zio_checksum.h> 44 45 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 46 { byteswap_uint8_array, TRUE, "unallocated" }, 47 { zap_byteswap, TRUE, "object directory" }, 48 { byteswap_uint64_array, TRUE, "object array" }, 49 { byteswap_uint8_array, TRUE, "packed nvlist" }, 50 { byteswap_uint64_array, TRUE, "packed nvlist size" }, 51 { byteswap_uint64_array, TRUE, "bplist" }, 52 { byteswap_uint64_array, TRUE, "bplist header" }, 53 { byteswap_uint64_array, TRUE, "SPA space map header" }, 54 { byteswap_uint64_array, TRUE, "SPA space map" }, 55 { byteswap_uint64_array, TRUE, "ZIL intent log" }, 56 { dnode_buf_byteswap, TRUE, "DMU dnode" }, 57 { dmu_objset_byteswap, TRUE, "DMU objset" }, 58 { byteswap_uint64_array, TRUE, "DSL directory" }, 59 { zap_byteswap, TRUE, "DSL directory child map"}, 60 { zap_byteswap, TRUE, "DSL dataset snap map" }, 61 { zap_byteswap, TRUE, "DSL props" }, 62 { byteswap_uint64_array, TRUE, "DSL dataset" }, 63 { zfs_znode_byteswap, TRUE, "ZFS znode" }, 64 { zfs_acl_byteswap, TRUE, "ZFS ACL" }, 65 { byteswap_uint8_array, FALSE, "ZFS plain file" }, 66 { zap_byteswap, TRUE, "ZFS directory" }, 67 { zap_byteswap, TRUE, "ZFS master node" }, 68 { zap_byteswap, TRUE, "ZFS delete queue" }, 69 { byteswap_uint8_array, FALSE, "zvol object" }, 70 { zap_byteswap, TRUE, "zvol prop" }, 71 { byteswap_uint8_array, FALSE, "other uint8[]" }, 72 { byteswap_uint64_array, FALSE, "other uint64[]" }, 73 { zap_byteswap, TRUE, "other ZAP" }, 74 { zap_byteswap, TRUE, "persistent error log" }, 75 }; 76 77 int 78 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 79 void *tag, dmu_buf_t **dbp) 80 { 81 dnode_t *dn; 82 uint64_t blkid; 83 dmu_buf_impl_t *db; 84 int err; 85 86 err = dnode_hold(os->os, object, FTAG, &dn); 87 if (err) 88 return (err); 89 blkid = dbuf_whichblock(dn, offset); 90 rw_enter(&dn->dn_struct_rwlock, RW_READER); 91 db = dbuf_hold(dn, blkid, tag); 92 rw_exit(&dn->dn_struct_rwlock); 93 if (db == NULL) { 94 err = EIO; 95 } else { 96 err = dbuf_read(db, NULL, DB_RF_CANFAIL); 97 if (err) { 98 dbuf_rele(db, tag); 99 db = NULL; 100 } 101 } 102 103 dnode_rele(dn, FTAG); 104 *dbp = &db->db; 105 return (err); 106 } 107 108 int 109 dmu_bonus_max(void) 110 { 111 return (DN_MAX_BONUSLEN); 112 } 113 114 /* 115 * returns ENOENT, EIO, or 0. 116 */ 117 int 118 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) 119 { 120 dnode_t *dn; 121 int err, count; 122 dmu_buf_impl_t *db; 123 124 err = dnode_hold(os->os, object, FTAG, &dn); 125 if (err) 126 return (err); 127 128 rw_enter(&dn->dn_struct_rwlock, RW_READER); 129 if (dn->dn_bonus == NULL) { 130 rw_exit(&dn->dn_struct_rwlock); 131 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 132 if (dn->dn_bonus == NULL) 133 dn->dn_bonus = dbuf_create_bonus(dn); 134 } 135 db = dn->dn_bonus; 136 rw_exit(&dn->dn_struct_rwlock); 137 mutex_enter(&db->db_mtx); 138 count = refcount_add(&db->db_holds, tag); 139 mutex_exit(&db->db_mtx); 140 if (count == 1) 141 dnode_add_ref(dn, db); 142 dnode_rele(dn, FTAG); 143 144 VERIFY(0 == dbuf_read(db, NULL, DB_RF_MUST_SUCCEED)); 145 146 *dbp = &db->db; 147 return (0); 148 } 149 150 /* 151 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 152 * to take a held dnode rather than <os, object> -- the lookup is wasteful, 153 * and can induce severe lock contention when writing to several files 154 * whose dnodes are in the same block. 155 */ 156 static int 157 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, 158 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 159 { 160 dmu_buf_t **dbp; 161 uint64_t blkid, nblks, i; 162 uint32_t flags; 163 int err; 164 zio_t *zio; 165 166 ASSERT(length <= DMU_MAX_ACCESS); 167 168 flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT; 169 if (length > zfetch_array_rd_sz) 170 flags |= DB_RF_NOPREFETCH; 171 172 rw_enter(&dn->dn_struct_rwlock, RW_READER); 173 if (dn->dn_datablkshift) { 174 int blkshift = dn->dn_datablkshift; 175 nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) - 176 P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift; 177 } else { 178 ASSERT3U(offset + length, <=, dn->dn_datablksz); 179 nblks = 1; 180 } 181 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 182 183 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, TRUE); 184 blkid = dbuf_whichblock(dn, offset); 185 for (i = 0; i < nblks; i++) { 186 dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag); 187 if (db == NULL) { 188 rw_exit(&dn->dn_struct_rwlock); 189 dmu_buf_rele_array(dbp, nblks, tag); 190 zio_nowait(zio); 191 return (EIO); 192 } 193 /* initiate async i/o */ 194 if (read) { 195 rw_exit(&dn->dn_struct_rwlock); 196 (void) dbuf_read(db, zio, flags); 197 rw_enter(&dn->dn_struct_rwlock, RW_READER); 198 } 199 dbp[i] = &db->db; 200 } 201 rw_exit(&dn->dn_struct_rwlock); 202 203 /* wait for async i/o */ 204 err = zio_wait(zio); 205 if (err) { 206 dmu_buf_rele_array(dbp, nblks, tag); 207 return (err); 208 } 209 210 /* wait for other io to complete */ 211 if (read) { 212 for (i = 0; i < nblks; i++) { 213 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 214 mutex_enter(&db->db_mtx); 215 while (db->db_state == DB_READ || 216 db->db_state == DB_FILL) 217 cv_wait(&db->db_changed, &db->db_mtx); 218 if (db->db_state == DB_UNCACHED) 219 err = EIO; 220 mutex_exit(&db->db_mtx); 221 if (err) { 222 dmu_buf_rele_array(dbp, nblks, tag); 223 return (err); 224 } 225 } 226 } 227 228 *numbufsp = nblks; 229 *dbpp = dbp; 230 return (0); 231 } 232 233 int 234 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 235 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 236 { 237 dnode_t *dn; 238 int err; 239 240 err = dnode_hold(os->os, object, FTAG, &dn); 241 if (err) 242 return (err); 243 244 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 245 numbufsp, dbpp); 246 247 dnode_rele(dn, FTAG); 248 249 return (err); 250 } 251 252 int 253 dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, 254 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp) 255 { 256 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 257 int err; 258 259 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 260 numbufsp, dbpp); 261 262 return (err); 263 } 264 265 void 266 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag) 267 { 268 int i; 269 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 270 271 if (numbufs == 0) 272 return; 273 274 for (i = 0; i < numbufs; i++) { 275 if (dbp[i]) 276 dbuf_rele(dbp[i], tag); 277 } 278 279 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 280 } 281 282 void 283 dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len) 284 { 285 dnode_t *dn; 286 uint64_t blkid; 287 int nblks, i, err; 288 289 if (len == 0) { /* they're interested in the bonus buffer */ 290 dn = os->os->os_meta_dnode; 291 292 if (object == 0 || object >= DN_MAX_OBJECT) 293 return; 294 295 rw_enter(&dn->dn_struct_rwlock, RW_READER); 296 blkid = dbuf_whichblock(dn, object * sizeof (dnode_phys_t)); 297 dbuf_prefetch(dn, blkid); 298 rw_exit(&dn->dn_struct_rwlock); 299 return; 300 } 301 302 /* 303 * XXX - Note, if the dnode for the requested object is not 304 * already cached, we will do a *synchronous* read in the 305 * dnode_hold() call. The same is true for any indirects. 306 */ 307 err = dnode_hold(os->os, object, FTAG, &dn); 308 if (err != 0) 309 return; 310 311 rw_enter(&dn->dn_struct_rwlock, RW_READER); 312 if (dn->dn_datablkshift) { 313 int blkshift = dn->dn_datablkshift; 314 nblks = (P2ROUNDUP(offset+len, 1<<blkshift) - 315 P2ALIGN(offset, 1<<blkshift)) >> blkshift; 316 } else { 317 nblks = (offset < dn->dn_datablksz); 318 } 319 320 if (nblks != 0) { 321 blkid = dbuf_whichblock(dn, offset); 322 for (i = 0; i < nblks; i++) 323 dbuf_prefetch(dn, blkid+i); 324 } 325 326 rw_exit(&dn->dn_struct_rwlock); 327 328 dnode_rele(dn, FTAG); 329 } 330 331 int 332 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 333 uint64_t size, dmu_tx_t *tx) 334 { 335 dnode_t *dn; 336 int err = dnode_hold(os->os, object, FTAG, &dn); 337 if (err) 338 return (err); 339 ASSERT(offset < UINT64_MAX); 340 ASSERT(size == -1ULL || size <= UINT64_MAX - offset); 341 dnode_free_range(dn, offset, size, tx); 342 dnode_rele(dn, FTAG); 343 return (0); 344 } 345 346 int 347 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 348 void *buf) 349 { 350 dnode_t *dn; 351 dmu_buf_t **dbp; 352 int numbufs, i, err; 353 354 /* 355 * Deal with odd block sizes, where there can't be data past the 356 * first block. 357 */ 358 err = dnode_hold(os->os, object, FTAG, &dn); 359 if (err) 360 return (err); 361 if (dn->dn_datablkshift == 0) { 362 int newsz = offset > dn->dn_datablksz ? 0 : 363 MIN(size, dn->dn_datablksz - offset); 364 bzero((char *)buf + newsz, size - newsz); 365 size = newsz; 366 } 367 dnode_rele(dn, FTAG); 368 369 while (size > 0) { 370 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 371 int err; 372 373 /* 374 * NB: we could do this block-at-a-time, but it's nice 375 * to be reading in parallel. 376 */ 377 err = dmu_buf_hold_array(os, object, offset, mylen, 378 TRUE, FTAG, &numbufs, &dbp); 379 if (err) 380 return (err); 381 382 for (i = 0; i < numbufs; i++) { 383 int tocpy; 384 int bufoff; 385 dmu_buf_t *db = dbp[i]; 386 387 ASSERT(size > 0); 388 389 bufoff = offset - db->db_offset; 390 tocpy = (int)MIN(db->db_size - bufoff, size); 391 392 bcopy((char *)db->db_data + bufoff, buf, tocpy); 393 394 offset += tocpy; 395 size -= tocpy; 396 buf = (char *)buf + tocpy; 397 } 398 dmu_buf_rele_array(dbp, numbufs, FTAG); 399 } 400 return (0); 401 } 402 403 void 404 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 405 const void *buf, dmu_tx_t *tx) 406 { 407 dmu_buf_t **dbp; 408 int numbufs, i; 409 410 if (size == 0) 411 return; 412 413 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 414 FALSE, FTAG, &numbufs, &dbp)); 415 416 for (i = 0; i < numbufs; i++) { 417 int tocpy; 418 int bufoff; 419 dmu_buf_t *db = dbp[i]; 420 421 ASSERT(size > 0); 422 423 bufoff = offset - db->db_offset; 424 tocpy = (int)MIN(db->db_size - bufoff, size); 425 426 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 427 428 if (tocpy == db->db_size) 429 dmu_buf_will_fill(db, tx); 430 else 431 dmu_buf_will_dirty(db, tx); 432 433 bcopy(buf, (char *)db->db_data + bufoff, tocpy); 434 435 if (tocpy == db->db_size) 436 dmu_buf_fill_done(db, tx); 437 438 offset += tocpy; 439 size -= tocpy; 440 buf = (char *)buf + tocpy; 441 } 442 dmu_buf_rele_array(dbp, numbufs, FTAG); 443 } 444 445 #ifdef _KERNEL 446 int 447 dmu_write_uio(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 448 uio_t *uio, dmu_tx_t *tx) 449 { 450 dmu_buf_t **dbp; 451 int numbufs, i; 452 int err = 0; 453 454 if (size == 0) 455 return (0); 456 457 err = dmu_buf_hold_array(os, object, offset, size, 458 FALSE, FTAG, &numbufs, &dbp); 459 if (err) 460 return (err); 461 462 for (i = 0; i < numbufs; i++) { 463 int tocpy; 464 int bufoff; 465 dmu_buf_t *db = dbp[i]; 466 467 ASSERT(size > 0); 468 469 bufoff = offset - db->db_offset; 470 tocpy = (int)MIN(db->db_size - bufoff, size); 471 472 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 473 474 if (tocpy == db->db_size) 475 dmu_buf_will_fill(db, tx); 476 else 477 dmu_buf_will_dirty(db, tx); 478 479 /* 480 * XXX uiomove could block forever (eg. nfs-backed 481 * pages). There needs to be a uiolockdown() function 482 * to lock the pages in memory, so that uiomove won't 483 * block. 484 */ 485 err = uiomove((char *)db->db_data + bufoff, tocpy, 486 UIO_WRITE, uio); 487 488 if (tocpy == db->db_size) 489 dmu_buf_fill_done(db, tx); 490 491 if (err) 492 break; 493 494 offset += tocpy; 495 size -= tocpy; 496 } 497 dmu_buf_rele_array(dbp, numbufs, FTAG); 498 return (err); 499 } 500 #endif 501 502 /* 503 * XXX move send/recv stuff to its own new file! 504 */ 505 506 struct backuparg { 507 dmu_replay_record_t *drr; 508 vnode_t *vp; 509 objset_t *os; 510 zio_cksum_t zc; 511 int err; 512 }; 513 514 static int 515 dump_bytes(struct backuparg *ba, void *buf, int len) 516 { 517 ssize_t resid; /* have to get resid to get detailed errno */ 518 ASSERT3U(len % 8, ==, 0); 519 520 fletcher_4_incremental_native(buf, len, &ba->zc); 521 ba->err = vn_rdwr(UIO_WRITE, ba->vp, 522 (caddr_t)buf, len, 523 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid); 524 return (ba->err); 525 } 526 527 static int 528 dump_free(struct backuparg *ba, uint64_t object, uint64_t offset, 529 uint64_t length) 530 { 531 /* write a FREE record */ 532 bzero(ba->drr, sizeof (dmu_replay_record_t)); 533 ba->drr->drr_type = DRR_FREE; 534 ba->drr->drr_u.drr_free.drr_object = object; 535 ba->drr->drr_u.drr_free.drr_offset = offset; 536 ba->drr->drr_u.drr_free.drr_length = length; 537 538 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 539 return (EINTR); 540 return (0); 541 } 542 543 static int 544 dump_data(struct backuparg *ba, dmu_object_type_t type, 545 uint64_t object, uint64_t offset, int blksz, void *data) 546 { 547 /* write a DATA record */ 548 bzero(ba->drr, sizeof (dmu_replay_record_t)); 549 ba->drr->drr_type = DRR_WRITE; 550 ba->drr->drr_u.drr_write.drr_object = object; 551 ba->drr->drr_u.drr_write.drr_type = type; 552 ba->drr->drr_u.drr_write.drr_offset = offset; 553 ba->drr->drr_u.drr_write.drr_length = blksz; 554 555 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 556 return (EINTR); 557 if (dump_bytes(ba, data, blksz)) 558 return (EINTR); 559 return (0); 560 } 561 562 static int 563 dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs) 564 { 565 /* write a FREEOBJECTS record */ 566 bzero(ba->drr, sizeof (dmu_replay_record_t)); 567 ba->drr->drr_type = DRR_FREEOBJECTS; 568 ba->drr->drr_u.drr_freeobjects.drr_firstobj = firstobj; 569 ba->drr->drr_u.drr_freeobjects.drr_numobjs = numobjs; 570 571 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 572 return (EINTR); 573 return (0); 574 } 575 576 static int 577 dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp) 578 { 579 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 580 return (dump_freeobjects(ba, object, 1)); 581 582 /* write an OBJECT record */ 583 bzero(ba->drr, sizeof (dmu_replay_record_t)); 584 ba->drr->drr_type = DRR_OBJECT; 585 ba->drr->drr_u.drr_object.drr_object = object; 586 ba->drr->drr_u.drr_object.drr_type = dnp->dn_type; 587 ba->drr->drr_u.drr_object.drr_bonustype = dnp->dn_bonustype; 588 ba->drr->drr_u.drr_object.drr_blksz = 589 dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 590 ba->drr->drr_u.drr_object.drr_bonuslen = dnp->dn_bonuslen; 591 ba->drr->drr_u.drr_object.drr_checksum = dnp->dn_checksum; 592 ba->drr->drr_u.drr_object.drr_compress = dnp->dn_compress; 593 594 if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t))) 595 return (EINTR); 596 597 if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8))) 598 return (EINTR); 599 600 /* free anything past the end of the file */ 601 if (dump_free(ba, object, (dnp->dn_maxblkid + 1) * 602 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL)) 603 return (EINTR); 604 if (ba->err) 605 return (EINTR); 606 return (0); 607 } 608 609 #define BP_SPAN(dnp, level) \ 610 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ 611 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) 612 613 static int 614 backup_cb(traverse_blk_cache_t *bc, spa_t *spa, void *arg) 615 { 616 struct backuparg *ba = arg; 617 uint64_t object = bc->bc_bookmark.zb_object; 618 int level = bc->bc_bookmark.zb_level; 619 uint64_t blkid = bc->bc_bookmark.zb_blkid; 620 blkptr_t *bp = bc->bc_blkptr.blk_birth ? &bc->bc_blkptr : NULL; 621 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 622 void *data = bc->bc_data; 623 int err = 0; 624 625 if (issig(JUSTLOOKING) && issig(FORREAL)) 626 return (EINTR); 627 628 ASSERT(data || bp == NULL); 629 630 if (bp == NULL && object == 0) { 631 uint64_t span = BP_SPAN(bc->bc_dnode, level); 632 uint64_t dnobj = (blkid * span) >> DNODE_SHIFT; 633 err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT); 634 } else if (bp == NULL) { 635 uint64_t span = BP_SPAN(bc->bc_dnode, level); 636 err = dump_free(ba, object, blkid * span, span); 637 } else if (data && level == 0 && type == DMU_OT_DNODE) { 638 dnode_phys_t *blk = data; 639 int i; 640 int blksz = BP_GET_LSIZE(bp); 641 642 for (i = 0; i < blksz >> DNODE_SHIFT; i++) { 643 uint64_t dnobj = 644 (blkid << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; 645 err = dump_dnode(ba, dnobj, blk+i); 646 if (err) 647 break; 648 } 649 } else if (level == 0 && 650 type != DMU_OT_DNODE && type != DMU_OT_OBJSET) { 651 int blksz = BP_GET_LSIZE(bp); 652 if (data == NULL) { 653 uint32_t aflags = ARC_WAIT; 654 arc_buf_t *abuf; 655 zbookmark_t zb; 656 657 zb.zb_objset = ba->os->os->os_dsl_dataset->ds_object; 658 zb.zb_object = object; 659 zb.zb_level = level; 660 zb.zb_blkid = blkid; 661 (void) arc_read(NULL, spa, bp, 662 dmu_ot[type].ot_byteswap, arc_getbuf_func, &abuf, 663 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_MUSTSUCCEED, 664 &aflags, &zb); 665 666 if (abuf) { 667 err = dump_data(ba, type, object, blkid * blksz, 668 blksz, abuf->b_data); 669 (void) arc_buf_remove_ref(abuf, &abuf); 670 } 671 } else { 672 err = dump_data(ba, type, object, blkid * blksz, 673 blksz, data); 674 } 675 } 676 677 ASSERT(err == 0 || err == EINTR); 678 return (err); 679 } 680 681 int 682 dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, vnode_t *vp) 683 { 684 dsl_dataset_t *ds = tosnap->os->os_dsl_dataset; 685 dsl_dataset_t *fromds = fromsnap ? fromsnap->os->os_dsl_dataset : NULL; 686 dmu_replay_record_t *drr; 687 struct backuparg ba; 688 int err; 689 690 /* tosnap must be a snapshot */ 691 if (ds->ds_phys->ds_next_snap_obj == 0) 692 return (EINVAL); 693 694 /* fromsnap must be an earlier snapshot from the same fs as tosnap */ 695 if (fromds && (ds->ds_dir != fromds->ds_dir || 696 fromds->ds_phys->ds_creation_txg >= 697 ds->ds_phys->ds_creation_txg)) 698 return (EXDEV); 699 700 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 701 drr->drr_type = DRR_BEGIN; 702 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 703 drr->drr_u.drr_begin.drr_version = DMU_BACKUP_VERSION; 704 drr->drr_u.drr_begin.drr_creation_time = 705 ds->ds_phys->ds_creation_time; 706 drr->drr_u.drr_begin.drr_type = tosnap->os->os_phys->os_type; 707 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; 708 if (fromds) 709 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid; 710 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); 711 712 ba.drr = drr; 713 ba.vp = vp; 714 ba.os = tosnap; 715 ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0); 716 717 if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) { 718 kmem_free(drr, sizeof (dmu_replay_record_t)); 719 return (ba.err); 720 } 721 722 err = traverse_dsl_dataset(ds, 723 fromds ? fromds->ds_phys->ds_creation_txg : 0, 724 ADVANCE_PRE | ADVANCE_HOLES | ADVANCE_DATA | ADVANCE_NOLOCK, 725 backup_cb, &ba); 726 727 if (err) { 728 if (err == EINTR && ba.err) 729 err = ba.err; 730 return (err); 731 } 732 733 bzero(drr, sizeof (dmu_replay_record_t)); 734 drr->drr_type = DRR_END; 735 drr->drr_u.drr_end.drr_checksum = ba.zc; 736 737 if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t))) 738 return (ba.err); 739 740 kmem_free(drr, sizeof (dmu_replay_record_t)); 741 742 return (0); 743 } 744 745 struct restorearg { 746 int err; 747 int byteswap; 748 vnode_t *vp; 749 char *buf; 750 uint64_t voff; 751 int buflen; /* number of valid bytes in buf */ 752 int bufoff; /* next offset to read */ 753 int bufsize; /* amount of memory allocated for buf */ 754 zio_cksum_t zc; 755 }; 756 757 /* ARGSUSED */ 758 static int 759 replay_incremental_check(void *arg1, void *arg2, dmu_tx_t *tx) 760 { 761 dsl_dataset_t *ds = arg1; 762 struct drr_begin *drrb = arg2; 763 const char *snapname; 764 int err; 765 uint64_t val; 766 767 /* must already be a snapshot of this fs */ 768 if (ds->ds_phys->ds_prev_snap_obj == 0) 769 return (ENODEV); 770 771 /* most recent snapshot must match fromguid */ 772 if (ds->ds_prev->ds_phys->ds_guid != drrb->drr_fromguid) 773 return (ENODEV); 774 /* must not have any changes since most recent snapshot */ 775 if (ds->ds_phys->ds_bp.blk_birth > 776 ds->ds_prev->ds_phys->ds_creation_txg) 777 return (ETXTBSY); 778 779 /* new snapshot name must not exist */ 780 snapname = strrchr(drrb->drr_toname, '@'); 781 if (snapname == NULL) 782 return (EEXIST); 783 784 snapname++; 785 err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, 786 ds->ds_phys->ds_snapnames_zapobj, snapname, 8, 1, &val); 787 if (err == 0) 788 return (EEXIST); 789 if (err != ENOENT) 790 return (err); 791 792 return (0); 793 } 794 795 /* ARGSUSED */ 796 static void 797 replay_incremental_sync(void *arg1, void *arg2, dmu_tx_t *tx) 798 { 799 dsl_dataset_t *ds = arg1; 800 dmu_buf_will_dirty(ds->ds_dbuf, tx); 801 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 802 } 803 804 /* ARGSUSED */ 805 static int 806 replay_full_check(void *arg1, void *arg2, dmu_tx_t *tx) 807 { 808 dsl_dir_t *dd = arg1; 809 struct drr_begin *drrb = arg2; 810 objset_t *mos = dd->dd_pool->dp_meta_objset; 811 char *cp; 812 uint64_t val; 813 int err; 814 815 cp = strchr(drrb->drr_toname, '@'); 816 *cp = '\0'; 817 err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj, 818 strrchr(drrb->drr_toname, '/') + 1, 819 sizeof (uint64_t), 1, &val); 820 *cp = '@'; 821 822 if (err != ENOENT) 823 return (err ? err : EEXIST); 824 825 return (0); 826 } 827 828 static void 829 replay_full_sync(void *arg1, void *arg2, dmu_tx_t *tx) 830 { 831 dsl_dir_t *dd = arg1; 832 struct drr_begin *drrb = arg2; 833 char *cp; 834 dsl_dataset_t *ds; 835 uint64_t dsobj; 836 837 cp = strchr(drrb->drr_toname, '@'); 838 *cp = '\0'; 839 dsobj = dsl_dataset_create_sync(dd, strrchr(drrb->drr_toname, '/') + 1, 840 NULL, tx); 841 *cp = '@'; 842 843 VERIFY(0 == dsl_dataset_open_obj(dd->dd_pool, dsobj, NULL, 844 DS_MODE_EXCLUSIVE, FTAG, &ds)); 845 846 (void) dmu_objset_create_impl(dsl_dataset_get_spa(ds), 847 ds, drrb->drr_type, tx); 848 849 dmu_buf_will_dirty(ds->ds_dbuf, tx); 850 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 851 852 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG); 853 } 854 855 static int 856 replay_end_check(void *arg1, void *arg2, dmu_tx_t *tx) 857 { 858 objset_t *os = arg1; 859 struct drr_begin *drrb = arg2; 860 char *snapname; 861 862 /* XXX verify that drr_toname is in dd */ 863 864 snapname = strchr(drrb->drr_toname, '@'); 865 if (snapname == NULL) 866 return (EINVAL); 867 snapname++; 868 869 return (dsl_dataset_snapshot_check(os, snapname, tx)); 870 } 871 872 static void 873 replay_end_sync(void *arg1, void *arg2, dmu_tx_t *tx) 874 { 875 objset_t *os = arg1; 876 struct drr_begin *drrb = arg2; 877 char *snapname; 878 dsl_dataset_t *ds, *hds; 879 880 snapname = strchr(drrb->drr_toname, '@') + 1; 881 882 dsl_dataset_snapshot_sync(os, snapname, tx); 883 884 /* set snapshot's creation time and guid */ 885 hds = os->os->os_dsl_dataset; 886 VERIFY(0 == dsl_dataset_open_obj(hds->ds_dir->dd_pool, 887 hds->ds_phys->ds_prev_snap_obj, NULL, 888 DS_MODE_PRIMARY | DS_MODE_READONLY | DS_MODE_INCONSISTENT, 889 FTAG, &ds)); 890 891 dmu_buf_will_dirty(ds->ds_dbuf, tx); 892 ds->ds_phys->ds_creation_time = drrb->drr_creation_time; 893 ds->ds_phys->ds_guid = drrb->drr_toguid; 894 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 895 896 dsl_dataset_close(ds, DS_MODE_PRIMARY, FTAG); 897 898 dmu_buf_will_dirty(hds->ds_dbuf, tx); 899 hds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 900 } 901 902 void * 903 restore_read(struct restorearg *ra, int len) 904 { 905 void *rv; 906 907 /* some things will require 8-byte alignment, so everything must */ 908 ASSERT3U(len % 8, ==, 0); 909 910 while (ra->buflen - ra->bufoff < len) { 911 ssize_t resid; 912 int leftover = ra->buflen - ra->bufoff; 913 914 (void) memmove(ra->buf, ra->buf + ra->bufoff, leftover); 915 ra->err = vn_rdwr(UIO_READ, ra->vp, 916 (caddr_t)ra->buf + leftover, ra->bufsize - leftover, 917 ra->voff, UIO_SYSSPACE, FAPPEND, 918 RLIM64_INFINITY, CRED(), &resid); 919 920 ra->voff += ra->bufsize - leftover - resid; 921 ra->buflen = ra->bufsize - resid; 922 ra->bufoff = 0; 923 if (resid == ra->bufsize - leftover) 924 ra->err = EINVAL; 925 if (ra->err) 926 return (NULL); 927 /* Could compute checksum here? */ 928 } 929 930 ASSERT3U(ra->bufoff % 8, ==, 0); 931 ASSERT3U(ra->buflen - ra->bufoff, >=, len); 932 rv = ra->buf + ra->bufoff; 933 ra->bufoff += len; 934 if (ra->byteswap) 935 fletcher_4_incremental_byteswap(rv, len, &ra->zc); 936 else 937 fletcher_4_incremental_native(rv, len, &ra->zc); 938 return (rv); 939 } 940 941 static void 942 backup_byteswap(dmu_replay_record_t *drr) 943 { 944 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 945 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 946 drr->drr_type = BSWAP_32(drr->drr_type); 947 switch (drr->drr_type) { 948 case DRR_BEGIN: 949 DO64(drr_begin.drr_magic); 950 DO64(drr_begin.drr_version); 951 DO64(drr_begin.drr_creation_time); 952 DO32(drr_begin.drr_type); 953 DO64(drr_begin.drr_toguid); 954 DO64(drr_begin.drr_fromguid); 955 break; 956 case DRR_OBJECT: 957 DO64(drr_object.drr_object); 958 /* DO64(drr_object.drr_allocation_txg); */ 959 DO32(drr_object.drr_type); 960 DO32(drr_object.drr_bonustype); 961 DO32(drr_object.drr_blksz); 962 DO32(drr_object.drr_bonuslen); 963 break; 964 case DRR_FREEOBJECTS: 965 DO64(drr_freeobjects.drr_firstobj); 966 DO64(drr_freeobjects.drr_numobjs); 967 break; 968 case DRR_WRITE: 969 DO64(drr_write.drr_object); 970 DO32(drr_write.drr_type); 971 DO64(drr_write.drr_offset); 972 DO64(drr_write.drr_length); 973 break; 974 case DRR_FREE: 975 DO64(drr_free.drr_object); 976 DO64(drr_free.drr_offset); 977 DO64(drr_free.drr_length); 978 break; 979 case DRR_END: 980 DO64(drr_end.drr_checksum.zc_word[0]); 981 DO64(drr_end.drr_checksum.zc_word[1]); 982 DO64(drr_end.drr_checksum.zc_word[2]); 983 DO64(drr_end.drr_checksum.zc_word[3]); 984 break; 985 } 986 #undef DO64 987 #undef DO32 988 } 989 990 static int 991 restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) 992 { 993 int err; 994 dmu_tx_t *tx; 995 996 err = dmu_object_info(os, drro->drr_object, NULL); 997 998 if (err != 0 && err != ENOENT) 999 return (EINVAL); 1000 1001 if (drro->drr_type == DMU_OT_NONE || 1002 drro->drr_type >= DMU_OT_NUMTYPES || 1003 drro->drr_bonustype >= DMU_OT_NUMTYPES || 1004 drro->drr_checksum >= ZIO_CHECKSUM_FUNCTIONS || 1005 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 1006 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 1007 drro->drr_blksz < SPA_MINBLOCKSIZE || 1008 drro->drr_blksz > SPA_MAXBLOCKSIZE || 1009 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 1010 return (EINVAL); 1011 } 1012 1013 tx = dmu_tx_create(os); 1014 1015 if (err == ENOENT) { 1016 /* currently free, want to be allocated */ 1017 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1018 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1); 1019 err = dmu_tx_assign(tx, TXG_WAIT); 1020 if (err) { 1021 dmu_tx_abort(tx); 1022 return (err); 1023 } 1024 err = dmu_object_claim(os, drro->drr_object, 1025 drro->drr_type, drro->drr_blksz, 1026 drro->drr_bonustype, drro->drr_bonuslen, tx); 1027 } else { 1028 /* currently allocated, want to be allocated */ 1029 dmu_tx_hold_bonus(tx, drro->drr_object); 1030 /* 1031 * We may change blocksize, so need to 1032 * hold_write 1033 */ 1034 dmu_tx_hold_write(tx, drro->drr_object, 0, 1); 1035 err = dmu_tx_assign(tx, TXG_WAIT); 1036 if (err) { 1037 dmu_tx_abort(tx); 1038 return (err); 1039 } 1040 1041 err = dmu_object_reclaim(os, drro->drr_object, 1042 drro->drr_type, drro->drr_blksz, 1043 drro->drr_bonustype, drro->drr_bonuslen, tx); 1044 } 1045 if (err) { 1046 dmu_tx_commit(tx); 1047 return (EINVAL); 1048 } 1049 1050 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksum, tx); 1051 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); 1052 1053 if (drro->drr_bonuslen) { 1054 dmu_buf_t *db; 1055 void *data; 1056 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); 1057 dmu_buf_will_dirty(db, tx); 1058 1059 ASSERT3U(db->db_size, ==, drro->drr_bonuslen); 1060 data = restore_read(ra, P2ROUNDUP(db->db_size, 8)); 1061 if (data == NULL) { 1062 dmu_tx_commit(tx); 1063 return (ra->err); 1064 } 1065 bcopy(data, db->db_data, db->db_size); 1066 if (ra->byteswap) { 1067 dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data, 1068 drro->drr_bonuslen); 1069 } 1070 dmu_buf_rele(db, FTAG); 1071 } 1072 dmu_tx_commit(tx); 1073 return (0); 1074 } 1075 1076 /* ARGSUSED */ 1077 static int 1078 restore_freeobjects(struct restorearg *ra, objset_t *os, 1079 struct drr_freeobjects *drrfo) 1080 { 1081 uint64_t obj; 1082 1083 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 1084 return (EINVAL); 1085 1086 for (obj = drrfo->drr_firstobj; 1087 obj < drrfo->drr_firstobj + drrfo->drr_numobjs; obj++) { 1088 dmu_tx_t *tx; 1089 int err; 1090 1091 if (dmu_object_info(os, obj, NULL) != 0) 1092 continue; 1093 1094 tx = dmu_tx_create(os); 1095 dmu_tx_hold_bonus(tx, obj); 1096 err = dmu_tx_assign(tx, TXG_WAIT); 1097 if (err) { 1098 dmu_tx_abort(tx); 1099 return (err); 1100 } 1101 err = dmu_object_free(os, obj, tx); 1102 dmu_tx_commit(tx); 1103 if (err && err != ENOENT) 1104 return (EINVAL); 1105 } 1106 return (0); 1107 } 1108 1109 static int 1110 restore_write(struct restorearg *ra, objset_t *os, 1111 struct drr_write *drrw) 1112 { 1113 dmu_tx_t *tx; 1114 void *data; 1115 int err; 1116 1117 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 1118 drrw->drr_type >= DMU_OT_NUMTYPES) 1119 return (EINVAL); 1120 1121 data = restore_read(ra, drrw->drr_length); 1122 if (data == NULL) 1123 return (ra->err); 1124 1125 if (dmu_object_info(os, drrw->drr_object, NULL) != 0) 1126 return (EINVAL); 1127 1128 tx = dmu_tx_create(os); 1129 1130 dmu_tx_hold_write(tx, drrw->drr_object, 1131 drrw->drr_offset, drrw->drr_length); 1132 err = dmu_tx_assign(tx, TXG_WAIT); 1133 if (err) { 1134 dmu_tx_abort(tx); 1135 return (err); 1136 } 1137 if (ra->byteswap) 1138 dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length); 1139 dmu_write(os, drrw->drr_object, 1140 drrw->drr_offset, drrw->drr_length, data, tx); 1141 dmu_tx_commit(tx); 1142 return (0); 1143 } 1144 1145 /* ARGSUSED */ 1146 static int 1147 restore_free(struct restorearg *ra, objset_t *os, 1148 struct drr_free *drrf) 1149 { 1150 dmu_tx_t *tx; 1151 int err; 1152 1153 if (drrf->drr_length != -1ULL && 1154 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 1155 return (EINVAL); 1156 1157 if (dmu_object_info(os, drrf->drr_object, NULL) != 0) 1158 return (EINVAL); 1159 1160 tx = dmu_tx_create(os); 1161 1162 dmu_tx_hold_free(tx, drrf->drr_object, 1163 drrf->drr_offset, drrf->drr_length); 1164 err = dmu_tx_assign(tx, TXG_WAIT); 1165 if (err) { 1166 dmu_tx_abort(tx); 1167 return (err); 1168 } 1169 err = dmu_free_range(os, drrf->drr_object, 1170 drrf->drr_offset, drrf->drr_length, tx); 1171 dmu_tx_commit(tx); 1172 return (err); 1173 } 1174 1175 int 1176 dmu_recvbackup(char *tosnap, struct drr_begin *drrb, uint64_t *sizep, 1177 boolean_t force, vnode_t *vp, uint64_t voffset) 1178 { 1179 struct restorearg ra; 1180 dmu_replay_record_t *drr; 1181 char *cp; 1182 objset_t *os = NULL; 1183 zio_cksum_t pzc; 1184 1185 bzero(&ra, sizeof (ra)); 1186 ra.vp = vp; 1187 ra.voff = voffset; 1188 ra.bufsize = 1<<20; 1189 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); 1190 1191 if (drrb->drr_magic == DMU_BACKUP_MAGIC) { 1192 ra.byteswap = FALSE; 1193 } else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { 1194 ra.byteswap = TRUE; 1195 } else { 1196 ra.err = EINVAL; 1197 goto out; 1198 } 1199 1200 /* 1201 * NB: this assumes that struct drr_begin will be the largest in 1202 * dmu_replay_record_t's drr_u, and thus we don't need to pad it 1203 * with zeros to make it the same length as we wrote out. 1204 */ 1205 ((dmu_replay_record_t *)ra.buf)->drr_type = DRR_BEGIN; 1206 ((dmu_replay_record_t *)ra.buf)->drr_pad = 0; 1207 ((dmu_replay_record_t *)ra.buf)->drr_u.drr_begin = *drrb; 1208 if (ra.byteswap) { 1209 fletcher_4_incremental_byteswap(ra.buf, 1210 sizeof (dmu_replay_record_t), &ra.zc); 1211 } else { 1212 fletcher_4_incremental_native(ra.buf, 1213 sizeof (dmu_replay_record_t), &ra.zc); 1214 } 1215 (void) strcpy(drrb->drr_toname, tosnap); /* for the sync funcs */ 1216 1217 if (ra.byteswap) { 1218 drrb->drr_magic = BSWAP_64(drrb->drr_magic); 1219 drrb->drr_version = BSWAP_64(drrb->drr_version); 1220 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); 1221 drrb->drr_type = BSWAP_32(drrb->drr_type); 1222 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); 1223 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); 1224 } 1225 1226 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1227 1228 if (drrb->drr_version != DMU_BACKUP_VERSION || 1229 drrb->drr_type >= DMU_OST_NUMTYPES || 1230 strchr(drrb->drr_toname, '@') == NULL) { 1231 ra.err = EINVAL; 1232 goto out; 1233 } 1234 1235 /* 1236 * Process the begin in syncing context. 1237 */ 1238 if (drrb->drr_fromguid) { 1239 /* incremental backup */ 1240 dsl_dataset_t *ds = NULL; 1241 1242 cp = strchr(tosnap, '@'); 1243 *cp = '\0'; 1244 ra.err = dsl_dataset_open(tosnap, DS_MODE_EXCLUSIVE, FTAG, &ds); 1245 *cp = '@'; 1246 if (ra.err) 1247 goto out; 1248 1249 /* 1250 * Only do the rollback if the most recent snapshot 1251 * matches the incremental source 1252 */ 1253 if (force) { 1254 if (ds->ds_prev->ds_phys->ds_guid != 1255 drrb->drr_fromguid) { 1256 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG); 1257 return (ENODEV); 1258 } 1259 (void) dsl_dataset_rollback(ds); 1260 } 1261 ra.err = dsl_sync_task_do(ds->ds_dir->dd_pool, 1262 replay_incremental_check, replay_incremental_sync, 1263 ds, drrb, 1); 1264 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG); 1265 } else { 1266 /* full backup */ 1267 dsl_dir_t *dd = NULL; 1268 const char *tail; 1269 1270 /* can't restore full backup into topmost fs, for now */ 1271 if (strrchr(drrb->drr_toname, '/') == NULL) { 1272 ra.err = EINVAL; 1273 goto out; 1274 } 1275 1276 cp = strchr(tosnap, '@'); 1277 *cp = '\0'; 1278 ra.err = dsl_dir_open(tosnap, FTAG, &dd, &tail); 1279 *cp = '@'; 1280 if (ra.err) 1281 goto out; 1282 if (tail == NULL) { 1283 ra.err = EEXIST; 1284 goto out; 1285 } 1286 1287 ra.err = dsl_sync_task_do(dd->dd_pool, replay_full_check, 1288 replay_full_sync, dd, drrb, 5); 1289 dsl_dir_close(dd, FTAG); 1290 } 1291 if (ra.err) 1292 goto out; 1293 1294 /* 1295 * Open the objset we are modifying. 1296 */ 1297 1298 cp = strchr(tosnap, '@'); 1299 *cp = '\0'; 1300 ra.err = dmu_objset_open(tosnap, DMU_OST_ANY, 1301 DS_MODE_PRIMARY | DS_MODE_INCONSISTENT, &os); 1302 *cp = '@'; 1303 ASSERT3U(ra.err, ==, 0); 1304 1305 /* 1306 * Read records and process them. 1307 */ 1308 pzc = ra.zc; 1309 while (ra.err == 0 && 1310 NULL != (drr = restore_read(&ra, sizeof (*drr)))) { 1311 if (issig(JUSTLOOKING) && issig(FORREAL)) { 1312 ra.err = EINTR; 1313 goto out; 1314 } 1315 1316 if (ra.byteswap) 1317 backup_byteswap(drr); 1318 1319 switch (drr->drr_type) { 1320 case DRR_OBJECT: 1321 { 1322 /* 1323 * We need to make a copy of the record header, 1324 * because restore_{object,write} may need to 1325 * restore_read(), which will invalidate drr. 1326 */ 1327 struct drr_object drro = drr->drr_u.drr_object; 1328 ra.err = restore_object(&ra, os, &drro); 1329 break; 1330 } 1331 case DRR_FREEOBJECTS: 1332 { 1333 struct drr_freeobjects drrfo = 1334 drr->drr_u.drr_freeobjects; 1335 ra.err = restore_freeobjects(&ra, os, &drrfo); 1336 break; 1337 } 1338 case DRR_WRITE: 1339 { 1340 struct drr_write drrw = drr->drr_u.drr_write; 1341 ra.err = restore_write(&ra, os, &drrw); 1342 break; 1343 } 1344 case DRR_FREE: 1345 { 1346 struct drr_free drrf = drr->drr_u.drr_free; 1347 ra.err = restore_free(&ra, os, &drrf); 1348 break; 1349 } 1350 case DRR_END: 1351 { 1352 struct drr_end drre = drr->drr_u.drr_end; 1353 /* 1354 * We compare against the *previous* checksum 1355 * value, because the stored checksum is of 1356 * everything before the DRR_END record. 1357 */ 1358 if (drre.drr_checksum.zc_word[0] != 0 && 1359 ((drre.drr_checksum.zc_word[0] - pzc.zc_word[0]) | 1360 (drre.drr_checksum.zc_word[1] - pzc.zc_word[1]) | 1361 (drre.drr_checksum.zc_word[2] - pzc.zc_word[2]) | 1362 (drre.drr_checksum.zc_word[3] - pzc.zc_word[3]))) { 1363 ra.err = ECKSUM; 1364 goto out; 1365 } 1366 1367 ra.err = dsl_sync_task_do(dmu_objset_ds(os)-> 1368 ds_dir->dd_pool, replay_end_check, replay_end_sync, 1369 os, drrb, 3); 1370 goto out; 1371 } 1372 default: 1373 ra.err = EINVAL; 1374 goto out; 1375 } 1376 pzc = ra.zc; 1377 } 1378 1379 out: 1380 if (os) 1381 dmu_objset_close(os); 1382 1383 /* 1384 * Make sure we don't rollback/destroy unless we actually 1385 * processed the begin properly. 'os' will only be set if this 1386 * is the case. 1387 */ 1388 if (ra.err && os && tosnap && strchr(tosnap, '@')) { 1389 /* 1390 * rollback or destroy what we created, so we don't 1391 * leave it in the restoring state. 1392 */ 1393 dsl_dataset_t *ds; 1394 int err; 1395 1396 cp = strchr(tosnap, '@'); 1397 *cp = '\0'; 1398 err = dsl_dataset_open(tosnap, 1399 DS_MODE_EXCLUSIVE | DS_MODE_INCONSISTENT, 1400 FTAG, &ds); 1401 if (err == 0) { 1402 txg_wait_synced(ds->ds_dir->dd_pool, 0); 1403 if (drrb->drr_fromguid) { 1404 /* incremental: rollback to most recent snap */ 1405 (void) dsl_dataset_rollback(ds); 1406 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG); 1407 } else { 1408 /* full: destroy whole fs */ 1409 dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG); 1410 (void) dsl_dataset_destroy(tosnap); 1411 } 1412 } 1413 *cp = '@'; 1414 } 1415 1416 kmem_free(ra.buf, ra.bufsize); 1417 if (sizep) 1418 *sizep = ra.voff; 1419 return (ra.err); 1420 } 1421 1422 typedef struct { 1423 uint64_t txg; 1424 dmu_buf_impl_t *db; 1425 dmu_sync_cb_t *done; 1426 void *arg; 1427 } dmu_sync_cbin_t; 1428 1429 typedef union { 1430 dmu_sync_cbin_t data; 1431 blkptr_t blk; 1432 } dmu_sync_cbarg_t; 1433 1434 /* ARGSUSED */ 1435 static void 1436 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1437 { 1438 dmu_sync_cbin_t *in = (dmu_sync_cbin_t *)varg; 1439 dmu_buf_impl_t *db = in->db; 1440 uint64_t txg = in->txg; 1441 dmu_sync_cb_t *done = in->done; 1442 void *arg = in->arg; 1443 blkptr_t *blk = (blkptr_t *)varg; 1444 1445 if (!BP_IS_HOLE(zio->io_bp)) { 1446 zio->io_bp->blk_fill = 1; 1447 BP_SET_TYPE(zio->io_bp, db->db_dnode->dn_type); 1448 BP_SET_LEVEL(zio->io_bp, 0); 1449 } 1450 1451 *blk = *zio->io_bp; /* structure assignment */ 1452 1453 mutex_enter(&db->db_mtx); 1454 ASSERT(db->db_d.db_overridden_by[txg&TXG_MASK] == IN_DMU_SYNC); 1455 db->db_d.db_overridden_by[txg&TXG_MASK] = blk; 1456 cv_broadcast(&db->db_changed); 1457 mutex_exit(&db->db_mtx); 1458 1459 if (done) 1460 done(&(db->db), arg); 1461 } 1462 1463 /* 1464 * Intent log support: sync the block associated with db to disk. 1465 * N.B. and XXX: the caller is responsible for making sure that the 1466 * data isn't changing while dmu_sync() is writing it. 1467 * 1468 * Return values: 1469 * 1470 * EEXIST: this txg has already been synced, so there's nothing to to. 1471 * The caller should not log the write. 1472 * 1473 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1474 * The caller should not log the write. 1475 * 1476 * EALREADY: this block is already in the process of being synced. 1477 * The caller should track its progress (somehow). 1478 * 1479 * EINPROGRESS: the IO has been initiated. 1480 * The caller should log this blkptr in the callback. 1481 * 1482 * 0: completed. Sets *bp to the blkptr just written. 1483 * The caller should log this blkptr immediately. 1484 */ 1485 int 1486 dmu_sync(zio_t *pio, dmu_buf_t *db_fake, 1487 blkptr_t *bp, uint64_t txg, dmu_sync_cb_t *done, void *arg) 1488 { 1489 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1490 objset_impl_t *os = db->db_objset; 1491 dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool; 1492 tx_state_t *tx = &dp->dp_tx; 1493 dmu_sync_cbin_t *in; 1494 blkptr_t *blk; 1495 zbookmark_t zb; 1496 uint32_t arc_flag; 1497 int err; 1498 1499 ASSERT(BP_IS_HOLE(bp)); 1500 ASSERT(txg != 0); 1501 1502 1503 dprintf("dmu_sync txg=%llu, s,o,q %llu %llu %llu\n", 1504 txg, tx->tx_synced_txg, tx->tx_open_txg, tx->tx_quiesced_txg); 1505 1506 /* 1507 * XXX - would be nice if we could do this without suspending... 1508 */ 1509 txg_suspend(dp); 1510 1511 /* 1512 * If this txg already synced, there's nothing to do. 1513 */ 1514 if (txg <= tx->tx_synced_txg) { 1515 txg_resume(dp); 1516 /* 1517 * If we're running ziltest, we need the blkptr regardless. 1518 */ 1519 if (txg > spa_freeze_txg(dp->dp_spa)) { 1520 /* if db_blkptr == NULL, this was an empty write */ 1521 if (db->db_blkptr) 1522 *bp = *db->db_blkptr; /* structure assignment */ 1523 return (0); 1524 } 1525 return (EEXIST); 1526 } 1527 1528 mutex_enter(&db->db_mtx); 1529 1530 blk = db->db_d.db_overridden_by[txg&TXG_MASK]; 1531 if (blk == IN_DMU_SYNC) { 1532 /* 1533 * We have already issued a sync write for this buffer. 1534 */ 1535 mutex_exit(&db->db_mtx); 1536 txg_resume(dp); 1537 return (EALREADY); 1538 } else if (blk != NULL) { 1539 /* 1540 * This buffer had already been synced. It could not 1541 * have been dirtied since, or we would have cleared blk. 1542 */ 1543 *bp = *blk; /* structure assignment */ 1544 mutex_exit(&db->db_mtx); 1545 txg_resume(dp); 1546 return (0); 1547 } 1548 1549 if (txg == tx->tx_syncing_txg) { 1550 while (db->db_data_pending) { 1551 /* 1552 * IO is in-progress. Wait for it to finish. 1553 * XXX - would be nice to be able to somehow "attach" 1554 * this zio to the parent zio passed in. 1555 */ 1556 cv_wait(&db->db_changed, &db->db_mtx); 1557 if (!db->db_data_pending && 1558 db->db_blkptr && BP_IS_HOLE(db->db_blkptr)) { 1559 /* 1560 * IO was compressed away 1561 */ 1562 *bp = *db->db_blkptr; /* structure assignment */ 1563 mutex_exit(&db->db_mtx); 1564 txg_resume(dp); 1565 return (0); 1566 } 1567 ASSERT(db->db_data_pending || 1568 (db->db_blkptr && db->db_blkptr->blk_birth == txg)); 1569 } 1570 1571 if (db->db_blkptr && db->db_blkptr->blk_birth == txg) { 1572 /* 1573 * IO is already completed. 1574 */ 1575 *bp = *db->db_blkptr; /* structure assignment */ 1576 mutex_exit(&db->db_mtx); 1577 txg_resume(dp); 1578 return (0); 1579 } 1580 } 1581 1582 if (db->db_d.db_data_old[txg&TXG_MASK] == NULL) { 1583 /* 1584 * This dbuf isn't dirty, must have been free_range'd. 1585 * There's no need to log writes to freed blocks, so we're done. 1586 */ 1587 mutex_exit(&db->db_mtx); 1588 txg_resume(dp); 1589 return (ENOENT); 1590 } 1591 1592 ASSERT(db->db_d.db_overridden_by[txg&TXG_MASK] == NULL); 1593 db->db_d.db_overridden_by[txg&TXG_MASK] = IN_DMU_SYNC; 1594 /* 1595 * XXX - a little ugly to stash the blkptr in the callback 1596 * buffer. We always need to make sure the following is true: 1597 * ASSERT(sizeof(blkptr_t) >= sizeof(dmu_sync_cbin_t)); 1598 */ 1599 in = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 1600 in->db = db; 1601 in->txg = txg; 1602 in->done = done; 1603 in->arg = arg; 1604 mutex_exit(&db->db_mtx); 1605 txg_resume(dp); 1606 1607 arc_flag = pio == NULL ? ARC_WAIT : ARC_NOWAIT; 1608 zb.zb_objset = os->os_dsl_dataset->ds_object; 1609 zb.zb_object = db->db.db_object; 1610 zb.zb_level = db->db_level; 1611 zb.zb_blkid = db->db_blkid; 1612 err = arc_write(pio, os->os_spa, 1613 zio_checksum_select(db->db_dnode->dn_checksum, os->os_checksum), 1614 zio_compress_select(db->db_dnode->dn_compress, os->os_compress), 1615 dmu_get_replication_level(os->os_spa, &zb, db->db_dnode->dn_type), 1616 txg, bp, db->db_d.db_data_old[txg&TXG_MASK], dmu_sync_done, in, 1617 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, arc_flag, &zb); 1618 ASSERT(err == 0); 1619 1620 return (arc_flag == ARC_NOWAIT ? EINPROGRESS : 0); 1621 } 1622 1623 uint64_t 1624 dmu_object_max_nonzero_offset(objset_t *os, uint64_t object) 1625 { 1626 dnode_t *dn; 1627 1628 /* XXX assumes dnode_hold will not get an i/o error */ 1629 (void) dnode_hold(os->os, object, FTAG, &dn); 1630 uint64_t rv = dnode_max_nonzero_offset(dn); 1631 dnode_rele(dn, FTAG); 1632 return (rv); 1633 } 1634 1635 int 1636 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 1637 dmu_tx_t *tx) 1638 { 1639 dnode_t *dn; 1640 int err; 1641 1642 err = dnode_hold(os->os, object, FTAG, &dn); 1643 if (err) 1644 return (err); 1645 err = dnode_set_blksz(dn, size, ibs, tx); 1646 dnode_rele(dn, FTAG); 1647 return (err); 1648 } 1649 1650 void 1651 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 1652 dmu_tx_t *tx) 1653 { 1654 dnode_t *dn; 1655 1656 /* XXX assumes dnode_hold will not get an i/o error */ 1657 (void) dnode_hold(os->os, object, FTAG, &dn); 1658 ASSERT(checksum < ZIO_CHECKSUM_FUNCTIONS); 1659 dn->dn_checksum = checksum; 1660 dnode_setdirty(dn, tx); 1661 dnode_rele(dn, FTAG); 1662 } 1663 1664 void 1665 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 1666 dmu_tx_t *tx) 1667 { 1668 dnode_t *dn; 1669 1670 /* XXX assumes dnode_hold will not get an i/o error */ 1671 (void) dnode_hold(os->os, object, FTAG, &dn); 1672 ASSERT(compress < ZIO_COMPRESS_FUNCTIONS); 1673 dn->dn_compress = compress; 1674 dnode_setdirty(dn, tx); 1675 dnode_rele(dn, FTAG); 1676 } 1677 1678 /* 1679 * XXX - eventually, this should take into account per-dataset (or 1680 * even per-object?) user requests for higher levels of replication. 1681 */ 1682 int 1683 dmu_get_replication_level(spa_t *spa, zbookmark_t *zb, dmu_object_type_t ot) 1684 { 1685 int ncopies = 1; 1686 1687 if (dmu_ot[ot].ot_metadata) 1688 ncopies++; 1689 if (zb->zb_level != 0) 1690 ncopies++; 1691 if (zb->zb_objset == 0 && zb->zb_object == 0) 1692 ncopies++; 1693 return (MIN(ncopies, spa_max_replication(spa))); 1694 } 1695 1696 int 1697 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 1698 { 1699 dnode_t *dn; 1700 int i, err; 1701 1702 err = dnode_hold(os->os, object, FTAG, &dn); 1703 if (err) 1704 return (err); 1705 /* 1706 * Sync any current changes before 1707 * we go trundling through the block pointers. 1708 */ 1709 for (i = 0; i < TXG_SIZE; i++) { 1710 if (list_link_active(&dn->dn_dirty_link[i])) 1711 break; 1712 } 1713 if (i != TXG_SIZE) { 1714 dnode_rele(dn, FTAG); 1715 txg_wait_synced(dmu_objset_pool(os), 0); 1716 err = dnode_hold(os->os, object, FTAG, &dn); 1717 if (err) 1718 return (err); 1719 } 1720 1721 err = dnode_next_offset(dn, hole, off, 1, 1); 1722 dnode_rele(dn, FTAG); 1723 1724 return (err); 1725 } 1726 1727 void 1728 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 1729 { 1730 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1731 mutex_enter(&dn->dn_mtx); 1732 1733 doi->doi_data_block_size = dn->dn_datablksz; 1734 doi->doi_metadata_block_size = dn->dn_indblkshift ? 1735 1ULL << dn->dn_indblkshift : 0; 1736 doi->doi_indirection = dn->dn_nlevels; 1737 doi->doi_checksum = dn->dn_checksum; 1738 doi->doi_compress = dn->dn_compress; 1739 doi->doi_physical_blks = (DN_USED_BYTES(dn->dn_phys) + 1740 SPA_MINBLOCKSIZE/2) >> SPA_MINBLOCKSHIFT; 1741 doi->doi_max_block_offset = dn->dn_phys->dn_maxblkid; 1742 doi->doi_type = dn->dn_type; 1743 doi->doi_bonus_size = dn->dn_bonuslen; 1744 doi->doi_bonus_type = dn->dn_bonustype; 1745 1746 mutex_exit(&dn->dn_mtx); 1747 rw_exit(&dn->dn_struct_rwlock); 1748 } 1749 1750 /* 1751 * Get information on a DMU object. 1752 * If doi is NULL, just indicates whether the object exists. 1753 */ 1754 int 1755 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 1756 { 1757 dnode_t *dn; 1758 int err = dnode_hold(os->os, object, FTAG, &dn); 1759 1760 if (err) 1761 return (err); 1762 1763 if (doi != NULL) 1764 dmu_object_info_from_dnode(dn, doi); 1765 1766 dnode_rele(dn, FTAG); 1767 return (0); 1768 } 1769 1770 /* 1771 * As above, but faster; can be used when you have a held dbuf in hand. 1772 */ 1773 void 1774 dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi) 1775 { 1776 dmu_object_info_from_dnode(((dmu_buf_impl_t *)db)->db_dnode, doi); 1777 } 1778 1779 /* 1780 * Faster still when you only care about the size. 1781 * This is specifically optimized for zfs_getattr(). 1782 */ 1783 void 1784 dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, u_longlong_t *nblk512) 1785 { 1786 dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode; 1787 1788 *blksize = dn->dn_datablksz; 1789 /* add 1 for dnode space */ 1790 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 1791 SPA_MINBLOCKSHIFT) + 1; 1792 } 1793 1794 /* 1795 * Given a bookmark, return the name of the dataset, object, and range in 1796 * human-readable format. 1797 */ 1798 int 1799 spa_bookmark_name(spa_t *spa, zbookmark_t *zb, char *dsname, size_t dslen, 1800 char *objname, size_t objlen, char *range, size_t rangelen) 1801 { 1802 dsl_pool_t *dp; 1803 dsl_dataset_t *ds = NULL; 1804 objset_t *os = NULL; 1805 dnode_t *dn = NULL; 1806 int err, shift; 1807 1808 if (dslen < MAXNAMELEN || objlen < 32 || rangelen < 64) 1809 return (ENOSPC); 1810 1811 dp = spa_get_dsl(spa); 1812 if (zb->zb_objset != 0) { 1813 rw_enter(&dp->dp_config_rwlock, RW_READER); 1814 err = dsl_dataset_open_obj(dp, zb->zb_objset, 1815 NULL, DS_MODE_NONE, FTAG, &ds); 1816 if (err) { 1817 rw_exit(&dp->dp_config_rwlock); 1818 return (err); 1819 } 1820 dsl_dataset_name(ds, dsname); 1821 dsl_dataset_close(ds, DS_MODE_NONE, FTAG); 1822 rw_exit(&dp->dp_config_rwlock); 1823 1824 err = dmu_objset_open(dsname, DMU_OST_ANY, DS_MODE_NONE, &os); 1825 if (err) 1826 goto out; 1827 1828 } else { 1829 dsl_dataset_name(NULL, dsname); 1830 os = dp->dp_meta_objset; 1831 } 1832 1833 1834 if (zb->zb_object == DMU_META_DNODE_OBJECT) { 1835 (void) strncpy(objname, "mdn", objlen); 1836 } else { 1837 (void) snprintf(objname, objlen, "%lld", 1838 (longlong_t)zb->zb_object); 1839 } 1840 1841 err = dnode_hold(os->os, zb->zb_object, FTAG, &dn); 1842 if (err) 1843 goto out; 1844 1845 shift = (dn->dn_datablkshift?dn->dn_datablkshift:SPA_MAXBLOCKSHIFT) + 1846 zb->zb_level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 1847 (void) snprintf(range, rangelen, "%llu-%llu", 1848 (u_longlong_t)(zb->zb_blkid << shift), 1849 (u_longlong_t)((zb->zb_blkid+1) << shift)); 1850 1851 out: 1852 if (dn) 1853 dnode_rele(dn, FTAG); 1854 if (os && os != dp->dp_meta_objset) 1855 dmu_objset_close(os); 1856 return (err); 1857 } 1858 1859 void 1860 byteswap_uint64_array(void *vbuf, size_t size) 1861 { 1862 uint64_t *buf = vbuf; 1863 size_t count = size >> 3; 1864 int i; 1865 1866 ASSERT((size & 7) == 0); 1867 1868 for (i = 0; i < count; i++) 1869 buf[i] = BSWAP_64(buf[i]); 1870 } 1871 1872 void 1873 byteswap_uint32_array(void *vbuf, size_t size) 1874 { 1875 uint32_t *buf = vbuf; 1876 size_t count = size >> 2; 1877 int i; 1878 1879 ASSERT((size & 3) == 0); 1880 1881 for (i = 0; i < count; i++) 1882 buf[i] = BSWAP_32(buf[i]); 1883 } 1884 1885 void 1886 byteswap_uint16_array(void *vbuf, size_t size) 1887 { 1888 uint16_t *buf = vbuf; 1889 size_t count = size >> 1; 1890 int i; 1891 1892 ASSERT((size & 1) == 0); 1893 1894 for (i = 0; i < count; i++) 1895 buf[i] = BSWAP_16(buf[i]); 1896 } 1897 1898 /* ARGSUSED */ 1899 void 1900 byteswap_uint8_array(void *vbuf, size_t size) 1901 { 1902 } 1903 1904 void 1905 dmu_init(void) 1906 { 1907 dbuf_init(); 1908 dnode_init(); 1909 arc_init(); 1910 } 1911 1912 void 1913 dmu_fini(void) 1914 { 1915 arc_fini(); 1916 dnode_fini(); 1917 dbuf_fini(); 1918 } 1919