1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu_objset.h> 29 #include <sys/dsl_dataset.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/dsl_prop.h> 32 #include <sys/dsl_synctask.h> 33 #include <sys/dmu_traverse.h> 34 #include <sys/dmu_tx.h> 35 #include <sys/arc.h> 36 #include <sys/zio.h> 37 #include <sys/zap.h> 38 #include <sys/unique.h> 39 #include <sys/zfs_context.h> 40 #include <sys/zfs_ioctl.h> 41 #include <sys/spa.h> 42 #include <sys/zfs_znode.h> 43 #include <sys/sunddi.h> 44 45 static char *dsl_reaper = "the grim reaper"; 46 47 static dsl_checkfunc_t dsl_dataset_destroy_begin_check; 48 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync; 49 static dsl_checkfunc_t dsl_dataset_rollback_check; 50 static dsl_syncfunc_t dsl_dataset_rollback_sync; 51 static dsl_syncfunc_t dsl_dataset_set_reservation_sync; 52 53 #define DS_REF_MAX (1ULL << 62) 54 55 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE 56 57 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper) 58 59 60 /* 61 * Figure out how much of this delta should be propogated to the dsl_dir 62 * layer. If there's a refreservation, that space has already been 63 * partially accounted for in our ancestors. 64 */ 65 static int64_t 66 parent_delta(dsl_dataset_t *ds, int64_t delta) 67 { 68 uint64_t old_bytes, new_bytes; 69 70 if (ds->ds_reserved == 0) 71 return (delta); 72 73 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 74 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved); 75 76 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta)); 77 return (new_bytes - old_bytes); 78 } 79 80 void 81 dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) 82 { 83 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp); 84 int compressed = BP_GET_PSIZE(bp); 85 int uncompressed = BP_GET_UCSIZE(bp); 86 int64_t delta; 87 88 dprintf_bp(bp, "born, ds=%p\n", ds); 89 90 ASSERT(dmu_tx_is_syncing(tx)); 91 /* It could have been compressed away to nothing */ 92 if (BP_IS_HOLE(bp)) 93 return; 94 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE); 95 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES); 96 if (ds == NULL) { 97 /* 98 * Account for the meta-objset space in its placeholder 99 * dsl_dir. 100 */ 101 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */ 102 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, 103 used, compressed, uncompressed, tx); 104 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 105 return; 106 } 107 dmu_buf_will_dirty(ds->ds_dbuf, tx); 108 mutex_enter(&ds->ds_lock); 109 delta = parent_delta(ds, used); 110 ds->ds_phys->ds_used_bytes += used; 111 ds->ds_phys->ds_compressed_bytes += compressed; 112 ds->ds_phys->ds_uncompressed_bytes += uncompressed; 113 ds->ds_phys->ds_unique_bytes += used; 114 mutex_exit(&ds->ds_lock); 115 dsl_dir_diduse_space(ds->ds_dir, delta, compressed, uncompressed, tx); 116 } 117 118 int 119 dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio, 120 dmu_tx_t *tx) 121 { 122 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp); 123 int compressed = BP_GET_PSIZE(bp); 124 int uncompressed = BP_GET_UCSIZE(bp); 125 126 ASSERT(dmu_tx_is_syncing(tx)); 127 /* No block pointer => nothing to free */ 128 if (BP_IS_HOLE(bp)) 129 return (0); 130 131 ASSERT(used > 0); 132 if (ds == NULL) { 133 int err; 134 /* 135 * Account for the meta-objset space in its placeholder 136 * dataset. 137 */ 138 err = dsl_free(pio, tx->tx_pool, 139 tx->tx_txg, bp, NULL, NULL, pio ? ARC_NOWAIT: ARC_WAIT); 140 ASSERT(err == 0); 141 142 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, 143 -used, -compressed, -uncompressed, tx); 144 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 145 return (used); 146 } 147 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool); 148 149 dmu_buf_will_dirty(ds->ds_dbuf, tx); 150 151 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) { 152 int err; 153 int64_t delta; 154 155 dprintf_bp(bp, "freeing: %s", ""); 156 err = dsl_free(pio, tx->tx_pool, 157 tx->tx_txg, bp, NULL, NULL, pio ? ARC_NOWAIT: ARC_WAIT); 158 ASSERT(err == 0); 159 160 mutex_enter(&ds->ds_lock); 161 ASSERT(ds->ds_phys->ds_unique_bytes >= used || 162 !DS_UNIQUE_IS_ACCURATE(ds)); 163 delta = parent_delta(ds, -used); 164 ds->ds_phys->ds_unique_bytes -= used; 165 mutex_exit(&ds->ds_lock); 166 dsl_dir_diduse_space(ds->ds_dir, 167 delta, -compressed, -uncompressed, tx); 168 } else { 169 dprintf_bp(bp, "putting on dead list: %s", ""); 170 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx)); 171 ASSERT3U(ds->ds_prev->ds_object, ==, 172 ds->ds_phys->ds_prev_snap_obj); 173 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0); 174 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */ 175 if (ds->ds_prev->ds_phys->ds_next_snap_obj == 176 ds->ds_object && bp->blk_birth > 177 ds->ds_prev->ds_phys->ds_prev_snap_txg) { 178 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 179 mutex_enter(&ds->ds_prev->ds_lock); 180 ds->ds_prev->ds_phys->ds_unique_bytes += used; 181 mutex_exit(&ds->ds_prev->ds_lock); 182 } 183 } 184 mutex_enter(&ds->ds_lock); 185 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used); 186 ds->ds_phys->ds_used_bytes -= used; 187 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed); 188 ds->ds_phys->ds_compressed_bytes -= compressed; 189 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed); 190 ds->ds_phys->ds_uncompressed_bytes -= uncompressed; 191 mutex_exit(&ds->ds_lock); 192 193 return (used); 194 } 195 196 uint64_t 197 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds) 198 { 199 uint64_t trysnap = 0; 200 201 if (ds == NULL) 202 return (0); 203 /* 204 * The snapshot creation could fail, but that would cause an 205 * incorrect FALSE return, which would only result in an 206 * overestimation of the amount of space that an operation would 207 * consume, which is OK. 208 * 209 * There's also a small window where we could miss a pending 210 * snapshot, because we could set the sync task in the quiescing 211 * phase. So this should only be used as a guess. 212 */ 213 if (ds->ds_trysnap_txg > 214 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa)) 215 trysnap = ds->ds_trysnap_txg; 216 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap)); 217 } 218 219 int 220 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth) 221 { 222 return (blk_birth > dsl_dataset_prev_snap_txg(ds)); 223 } 224 225 /* ARGSUSED */ 226 static void 227 dsl_dataset_evict(dmu_buf_t *db, void *dsv) 228 { 229 dsl_dataset_t *ds = dsv; 230 231 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds)); 232 233 dprintf_ds(ds, "evicting %s\n", ""); 234 235 unique_remove(ds->ds_fsid_guid); 236 237 if (ds->ds_user_ptr != NULL) 238 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 239 240 if (ds->ds_prev) { 241 dsl_dataset_drop_ref(ds->ds_prev, ds); 242 ds->ds_prev = NULL; 243 } 244 245 bplist_close(&ds->ds_deadlist); 246 if (ds->ds_dir) 247 dsl_dir_close(ds->ds_dir, ds); 248 249 ASSERT(!list_link_active(&ds->ds_synced_link)); 250 251 mutex_destroy(&ds->ds_lock); 252 mutex_destroy(&ds->ds_opening_lock); 253 mutex_destroy(&ds->ds_deadlist.bpl_lock); 254 rw_destroy(&ds->ds_rwlock); 255 cv_destroy(&ds->ds_exclusive_cv); 256 257 kmem_free(ds, sizeof (dsl_dataset_t)); 258 } 259 260 static int 261 dsl_dataset_get_snapname(dsl_dataset_t *ds) 262 { 263 dsl_dataset_phys_t *headphys; 264 int err; 265 dmu_buf_t *headdbuf; 266 dsl_pool_t *dp = ds->ds_dir->dd_pool; 267 objset_t *mos = dp->dp_meta_objset; 268 269 if (ds->ds_snapname[0]) 270 return (0); 271 if (ds->ds_phys->ds_next_snap_obj == 0) 272 return (0); 273 274 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj, 275 FTAG, &headdbuf); 276 if (err) 277 return (err); 278 headphys = headdbuf->db_data; 279 err = zap_value_search(dp->dp_meta_objset, 280 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname); 281 dmu_buf_rele(headdbuf, FTAG); 282 return (err); 283 } 284 285 static int 286 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value) 287 { 288 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 289 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 290 matchtype_t mt; 291 int err; 292 293 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 294 mt = MT_FIRST; 295 else 296 mt = MT_EXACT; 297 298 err = zap_lookup_norm(mos, snapobj, name, 8, 1, 299 value, mt, NULL, 0, NULL); 300 if (err == ENOTSUP && mt == MT_FIRST) 301 err = zap_lookup(mos, snapobj, name, 8, 1, value); 302 return (err); 303 } 304 305 static int 306 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx) 307 { 308 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 309 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 310 matchtype_t mt; 311 int err; 312 313 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 314 mt = MT_FIRST; 315 else 316 mt = MT_EXACT; 317 318 err = zap_remove_norm(mos, snapobj, name, mt, tx); 319 if (err == ENOTSUP && mt == MT_FIRST) 320 err = zap_remove(mos, snapobj, name, tx); 321 return (err); 322 } 323 324 static int 325 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag, 326 dsl_dataset_t **dsp) 327 { 328 objset_t *mos = dp->dp_meta_objset; 329 dmu_buf_t *dbuf; 330 dsl_dataset_t *ds; 331 int err; 332 333 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 334 dsl_pool_sync_context(dp)); 335 336 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf); 337 if (err) 338 return (err); 339 ds = dmu_buf_get_user(dbuf); 340 if (ds == NULL) { 341 dsl_dataset_t *winner; 342 343 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP); 344 ds->ds_dbuf = dbuf; 345 ds->ds_object = dsobj; 346 ds->ds_phys = dbuf->db_data; 347 348 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL); 349 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL); 350 mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT, 351 NULL); 352 rw_init(&ds->ds_rwlock, 0, 0, 0); 353 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL); 354 355 err = bplist_open(&ds->ds_deadlist, 356 mos, ds->ds_phys->ds_deadlist_obj); 357 if (err == 0) { 358 err = dsl_dir_open_obj(dp, 359 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir); 360 } 361 if (err) { 362 /* 363 * we don't really need to close the blist if we 364 * just opened it. 365 */ 366 mutex_destroy(&ds->ds_lock); 367 mutex_destroy(&ds->ds_opening_lock); 368 mutex_destroy(&ds->ds_deadlist.bpl_lock); 369 rw_destroy(&ds->ds_rwlock); 370 cv_destroy(&ds->ds_exclusive_cv); 371 kmem_free(ds, sizeof (dsl_dataset_t)); 372 dmu_buf_rele(dbuf, tag); 373 return (err); 374 } 375 376 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == dsobj) { 377 ds->ds_snapname[0] = '\0'; 378 if (ds->ds_phys->ds_prev_snap_obj) { 379 err = dsl_dataset_get_ref(dp, 380 ds->ds_phys->ds_prev_snap_obj, 381 ds, &ds->ds_prev); 382 } 383 } else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) { 384 err = dsl_dataset_get_snapname(ds); 385 } 386 387 if (!dsl_dataset_is_snapshot(ds)) { 388 /* 389 * In sync context, we're called with either no lock 390 * or with the write lock. If we're not syncing, 391 * we're always called with the read lock held. 392 */ 393 boolean_t need_lock = 394 !RW_WRITE_HELD(&dp->dp_config_rwlock) && 395 dsl_pool_sync_context(dp); 396 397 if (need_lock) 398 rw_enter(&dp->dp_config_rwlock, RW_READER); 399 400 err = dsl_prop_get_ds_locked(ds->ds_dir, 401 "refreservation", sizeof (uint64_t), 1, 402 &ds->ds_reserved, NULL); 403 if (err == 0) { 404 err = dsl_prop_get_ds_locked(ds->ds_dir, 405 "refquota", sizeof (uint64_t), 1, 406 &ds->ds_quota, NULL); 407 } 408 409 if (need_lock) 410 rw_exit(&dp->dp_config_rwlock); 411 } else { 412 ds->ds_reserved = ds->ds_quota = 0; 413 } 414 415 if (err == 0) { 416 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys, 417 dsl_dataset_evict); 418 } 419 if (err || winner) { 420 bplist_close(&ds->ds_deadlist); 421 if (ds->ds_prev) 422 dsl_dataset_drop_ref(ds->ds_prev, ds); 423 dsl_dir_close(ds->ds_dir, ds); 424 mutex_destroy(&ds->ds_lock); 425 mutex_destroy(&ds->ds_opening_lock); 426 mutex_destroy(&ds->ds_deadlist.bpl_lock); 427 rw_destroy(&ds->ds_rwlock); 428 cv_destroy(&ds->ds_exclusive_cv); 429 kmem_free(ds, sizeof (dsl_dataset_t)); 430 if (err) { 431 dmu_buf_rele(dbuf, tag); 432 return (err); 433 } 434 ds = winner; 435 } else { 436 ds->ds_fsid_guid = 437 unique_insert(ds->ds_phys->ds_fsid_guid); 438 } 439 } 440 ASSERT3P(ds->ds_dbuf, ==, dbuf); 441 ASSERT3P(ds->ds_phys, ==, dbuf->db_data); 442 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 || 443 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN || 444 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap); 445 mutex_enter(&ds->ds_lock); 446 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) { 447 mutex_exit(&ds->ds_lock); 448 dmu_buf_rele(ds->ds_dbuf, tag); 449 return (ENOENT); 450 } 451 mutex_exit(&ds->ds_lock); 452 *dsp = ds; 453 return (0); 454 } 455 456 static int 457 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag) 458 { 459 dsl_pool_t *dp = ds->ds_dir->dd_pool; 460 461 /* 462 * In syncing context we don't want the rwlock lock: there 463 * may be an existing writer waiting for sync phase to 464 * finish. We don't need to worry about such writers, since 465 * sync phase is single-threaded, so the writer can't be 466 * doing anything while we are active. 467 */ 468 if (dsl_pool_sync_context(dp)) { 469 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 470 return (0); 471 } 472 473 /* 474 * Normal users will hold the ds_rwlock as a READER until they 475 * are finished (i.e., call dsl_dataset_rele()). "Owners" will 476 * drop their READER lock after they set the ds_owner field. 477 * 478 * If the dataset is being destroyed, the destroy thread will 479 * obtain a WRITER lock for exclusive access after it's done its 480 * open-context work and then change the ds_owner to 481 * dsl_reaper once destruction is assured. So threads 482 * may block here temporarily, until the "destructability" of 483 * the dataset is determined. 484 */ 485 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock)); 486 mutex_enter(&ds->ds_lock); 487 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) { 488 rw_exit(&dp->dp_config_rwlock); 489 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock); 490 if (DSL_DATASET_IS_DESTROYED(ds)) { 491 mutex_exit(&ds->ds_lock); 492 dsl_dataset_drop_ref(ds, tag); 493 rw_enter(&dp->dp_config_rwlock, RW_READER); 494 return (ENOENT); 495 } 496 rw_enter(&dp->dp_config_rwlock, RW_READER); 497 } 498 mutex_exit(&ds->ds_lock); 499 return (0); 500 } 501 502 int 503 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, 504 dsl_dataset_t **dsp) 505 { 506 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp); 507 508 if (err) 509 return (err); 510 return (dsl_dataset_hold_ref(*dsp, tag)); 511 } 512 513 int 514 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner, 515 dsl_dataset_t **dsp) 516 { 517 int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp); 518 519 ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER); 520 521 if (err) 522 return (err); 523 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) { 524 dsl_dataset_rele(*dsp, owner); 525 return (EBUSY); 526 } 527 return (0); 528 } 529 530 int 531 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp) 532 { 533 dsl_dir_t *dd; 534 dsl_pool_t *dp; 535 const char *snapname; 536 uint64_t obj; 537 int err = 0; 538 539 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname); 540 if (err) 541 return (err); 542 543 dp = dd->dd_pool; 544 obj = dd->dd_phys->dd_head_dataset_obj; 545 rw_enter(&dp->dp_config_rwlock, RW_READER); 546 if (obj) 547 err = dsl_dataset_get_ref(dp, obj, tag, dsp); 548 else 549 err = ENOENT; 550 if (err) 551 goto out; 552 553 err = dsl_dataset_hold_ref(*dsp, tag); 554 555 /* we may be looking for a snapshot */ 556 if (err == 0 && snapname != NULL) { 557 dsl_dataset_t *ds = NULL; 558 559 if (*snapname++ != '@') { 560 dsl_dataset_rele(*dsp, tag); 561 err = ENOENT; 562 goto out; 563 } 564 565 dprintf("looking for snapshot '%s'\n", snapname); 566 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj); 567 if (err == 0) 568 err = dsl_dataset_get_ref(dp, obj, tag, &ds); 569 dsl_dataset_rele(*dsp, tag); 570 571 ASSERT3U((err == 0), ==, (ds != NULL)); 572 573 if (ds) { 574 mutex_enter(&ds->ds_lock); 575 if (ds->ds_snapname[0] == 0) 576 (void) strlcpy(ds->ds_snapname, snapname, 577 sizeof (ds->ds_snapname)); 578 mutex_exit(&ds->ds_lock); 579 err = dsl_dataset_hold_ref(ds, tag); 580 *dsp = err ? NULL : ds; 581 } 582 } 583 out: 584 rw_exit(&dp->dp_config_rwlock); 585 dsl_dir_close(dd, FTAG); 586 return (err); 587 } 588 589 int 590 dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp) 591 { 592 int err = dsl_dataset_hold(name, owner, dsp); 593 if (err) 594 return (err); 595 if ((*dsp)->ds_phys->ds_num_children > 0 && 596 !DS_MODE_IS_READONLY(flags)) { 597 dsl_dataset_rele(*dsp, owner); 598 return (EROFS); 599 } 600 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) { 601 dsl_dataset_rele(*dsp, owner); 602 return (EBUSY); 603 } 604 return (0); 605 } 606 607 void 608 dsl_dataset_name(dsl_dataset_t *ds, char *name) 609 { 610 if (ds == NULL) { 611 (void) strcpy(name, "mos"); 612 } else { 613 dsl_dir_name(ds->ds_dir, name); 614 VERIFY(0 == dsl_dataset_get_snapname(ds)); 615 if (ds->ds_snapname[0]) { 616 (void) strcat(name, "@"); 617 /* 618 * We use a "recursive" mutex so that we 619 * can call dprintf_ds() with ds_lock held. 620 */ 621 if (!MUTEX_HELD(&ds->ds_lock)) { 622 mutex_enter(&ds->ds_lock); 623 (void) strcat(name, ds->ds_snapname); 624 mutex_exit(&ds->ds_lock); 625 } else { 626 (void) strcat(name, ds->ds_snapname); 627 } 628 } 629 } 630 } 631 632 static int 633 dsl_dataset_namelen(dsl_dataset_t *ds) 634 { 635 int result; 636 637 if (ds == NULL) { 638 result = 3; /* "mos" */ 639 } else { 640 result = dsl_dir_namelen(ds->ds_dir); 641 VERIFY(0 == dsl_dataset_get_snapname(ds)); 642 if (ds->ds_snapname[0]) { 643 ++result; /* adding one for the @-sign */ 644 if (!MUTEX_HELD(&ds->ds_lock)) { 645 mutex_enter(&ds->ds_lock); 646 result += strlen(ds->ds_snapname); 647 mutex_exit(&ds->ds_lock); 648 } else { 649 result += strlen(ds->ds_snapname); 650 } 651 } 652 } 653 654 return (result); 655 } 656 657 void 658 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag) 659 { 660 dmu_buf_rele(ds->ds_dbuf, tag); 661 } 662 663 void 664 dsl_dataset_rele(dsl_dataset_t *ds, void *tag) 665 { 666 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) { 667 rw_exit(&ds->ds_rwlock); 668 } 669 dsl_dataset_drop_ref(ds, tag); 670 } 671 672 void 673 dsl_dataset_disown(dsl_dataset_t *ds, void *owner) 674 { 675 ASSERT((ds->ds_owner == owner && ds->ds_dbuf) || 676 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL)); 677 678 mutex_enter(&ds->ds_lock); 679 ds->ds_owner = NULL; 680 if (RW_WRITE_HELD(&ds->ds_rwlock)) { 681 rw_exit(&ds->ds_rwlock); 682 cv_broadcast(&ds->ds_exclusive_cv); 683 } 684 mutex_exit(&ds->ds_lock); 685 if (ds->ds_dbuf) 686 dsl_dataset_drop_ref(ds, owner); 687 else 688 dsl_dataset_evict(ds->ds_dbuf, ds); 689 } 690 691 boolean_t 692 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner) 693 { 694 boolean_t gotit = FALSE; 695 696 mutex_enter(&ds->ds_lock); 697 if (ds->ds_owner == NULL && 698 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) { 699 ds->ds_owner = owner; 700 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) 701 rw_exit(&ds->ds_rwlock); 702 gotit = TRUE; 703 } 704 mutex_exit(&ds->ds_lock); 705 return (gotit); 706 } 707 708 void 709 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner) 710 { 711 ASSERT3P(owner, ==, ds->ds_owner); 712 if (!RW_WRITE_HELD(&ds->ds_rwlock)) 713 rw_enter(&ds->ds_rwlock, RW_WRITER); 714 } 715 716 uint64_t 717 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, 718 uint64_t flags, dmu_tx_t *tx) 719 { 720 dsl_pool_t *dp = dd->dd_pool; 721 dmu_buf_t *dbuf; 722 dsl_dataset_phys_t *dsphys; 723 uint64_t dsobj; 724 objset_t *mos = dp->dp_meta_objset; 725 726 if (origin == NULL) 727 origin = dp->dp_origin_snap; 728 729 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp); 730 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0); 731 ASSERT(dmu_tx_is_syncing(tx)); 732 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); 733 734 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 735 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 736 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 737 dmu_buf_will_dirty(dbuf, tx); 738 dsphys = dbuf->db_data; 739 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 740 dsphys->ds_dir_obj = dd->dd_object; 741 dsphys->ds_flags = flags; 742 dsphys->ds_fsid_guid = unique_create(); 743 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 744 sizeof (dsphys->ds_guid)); 745 dsphys->ds_snapnames_zapobj = 746 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP, 747 DMU_OT_NONE, 0, tx); 748 dsphys->ds_creation_time = gethrestime_sec(); 749 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg; 750 dsphys->ds_deadlist_obj = 751 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); 752 753 if (origin) { 754 dsphys->ds_prev_snap_obj = origin->ds_object; 755 dsphys->ds_prev_snap_txg = 756 origin->ds_phys->ds_creation_txg; 757 dsphys->ds_used_bytes = 758 origin->ds_phys->ds_used_bytes; 759 dsphys->ds_compressed_bytes = 760 origin->ds_phys->ds_compressed_bytes; 761 dsphys->ds_uncompressed_bytes = 762 origin->ds_phys->ds_uncompressed_bytes; 763 dsphys->ds_bp = origin->ds_phys->ds_bp; 764 dsphys->ds_flags |= origin->ds_phys->ds_flags; 765 766 dmu_buf_will_dirty(origin->ds_dbuf, tx); 767 origin->ds_phys->ds_num_children++; 768 769 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) { 770 if (origin->ds_phys->ds_next_clones_obj == 0) { 771 origin->ds_phys->ds_next_clones_obj = 772 zap_create(mos, 773 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 774 } 775 VERIFY(0 == zap_add_int(mos, 776 origin->ds_phys->ds_next_clones_obj, 777 dsobj, tx)); 778 } 779 780 dmu_buf_will_dirty(dd->dd_dbuf, tx); 781 dd->dd_phys->dd_origin_obj = origin->ds_object; 782 } 783 784 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 785 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 786 787 dmu_buf_rele(dbuf, FTAG); 788 789 dmu_buf_will_dirty(dd->dd_dbuf, tx); 790 dd->dd_phys->dd_head_dataset_obj = dsobj; 791 792 return (dsobj); 793 } 794 795 uint64_t 796 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname, 797 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx) 798 { 799 dsl_pool_t *dp = pdd->dd_pool; 800 uint64_t dsobj, ddobj; 801 dsl_dir_t *dd; 802 803 ASSERT(lastname[0] != '@'); 804 805 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx); 806 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd)); 807 808 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx); 809 810 dsl_deleg_set_create_perms(dd, tx, cr); 811 812 dsl_dir_close(dd, FTAG); 813 814 return (dsobj); 815 } 816 817 struct destroyarg { 818 dsl_sync_task_group_t *dstg; 819 char *snapname; 820 char *failed; 821 }; 822 823 static int 824 dsl_snapshot_destroy_one(char *name, void *arg) 825 { 826 struct destroyarg *da = arg; 827 dsl_dataset_t *ds; 828 char *cp; 829 int err; 830 831 (void) strcat(name, "@"); 832 (void) strcat(name, da->snapname); 833 err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT, 834 da->dstg, &ds); 835 cp = strchr(name, '@'); 836 *cp = '\0'; 837 if (err == 0) { 838 dsl_dataset_make_exclusive(ds, da->dstg); 839 if (ds->ds_user_ptr) { 840 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 841 ds->ds_user_ptr = NULL; 842 } 843 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check, 844 dsl_dataset_destroy_sync, ds, da->dstg, 0); 845 } else if (err == ENOENT) { 846 err = 0; 847 } else { 848 (void) strcpy(da->failed, name); 849 } 850 return (err); 851 } 852 853 /* 854 * Destroy 'snapname' in all descendants of 'fsname'. 855 */ 856 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy 857 int 858 dsl_snapshots_destroy(char *fsname, char *snapname) 859 { 860 int err; 861 struct destroyarg da; 862 dsl_sync_task_t *dst; 863 spa_t *spa; 864 865 err = spa_open(fsname, &spa, FTAG); 866 if (err) 867 return (err); 868 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 869 da.snapname = snapname; 870 da.failed = fsname; 871 872 err = dmu_objset_find(fsname, 873 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN); 874 875 if (err == 0) 876 err = dsl_sync_task_group_wait(da.dstg); 877 878 for (dst = list_head(&da.dstg->dstg_tasks); dst; 879 dst = list_next(&da.dstg->dstg_tasks, dst)) { 880 dsl_dataset_t *ds = dst->dst_arg1; 881 /* 882 * Return the file system name that triggered the error 883 */ 884 if (dst->dst_err) { 885 dsl_dataset_name(ds, fsname); 886 *strchr(fsname, '@') = '\0'; 887 } 888 dsl_dataset_disown(ds, da.dstg); 889 } 890 891 dsl_sync_task_group_destroy(da.dstg); 892 spa_close(spa, FTAG); 893 return (err); 894 } 895 896 /* 897 * ds must be opened as OWNER. On return (whether successful or not), 898 * ds will be closed and caller can no longer dereference it. 899 */ 900 int 901 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag) 902 { 903 int err; 904 dsl_sync_task_group_t *dstg; 905 objset_t *os; 906 dsl_dir_t *dd; 907 uint64_t obj; 908 909 if (dsl_dataset_is_snapshot(ds)) { 910 /* Destroying a snapshot is simpler */ 911 dsl_dataset_make_exclusive(ds, tag); 912 913 if (ds->ds_user_ptr) { 914 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 915 ds->ds_user_ptr = NULL; 916 } 917 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 918 dsl_dataset_destroy_check, dsl_dataset_destroy_sync, 919 ds, tag, 0); 920 goto out; 921 } 922 923 dd = ds->ds_dir; 924 925 /* 926 * Check for errors and mark this ds as inconsistent, in 927 * case we crash while freeing the objects. 928 */ 929 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check, 930 dsl_dataset_destroy_begin_sync, ds, NULL, 0); 931 if (err) 932 goto out; 933 934 err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os); 935 if (err) 936 goto out; 937 938 /* 939 * remove the objects in open context, so that we won't 940 * have too much to do in syncing context. 941 */ 942 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 943 ds->ds_phys->ds_prev_snap_txg)) { 944 /* 945 * Ignore errors, if there is not enough disk space 946 * we will deal with it in dsl_dataset_destroy_sync(). 947 */ 948 (void) dmu_free_object(os, obj); 949 } 950 951 dmu_objset_close(os); 952 if (err != ESRCH) 953 goto out; 954 955 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 956 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd); 957 rw_exit(&dd->dd_pool->dp_config_rwlock); 958 959 if (err) 960 goto out; 961 962 if (ds->ds_user_ptr) { 963 /* 964 * We need to sync out all in-flight IO before we try 965 * to evict (the dataset evict func is trying to clear 966 * the cached entries for this dataset in the ARC). 967 */ 968 txg_wait_synced(dd->dd_pool, 0); 969 } 970 971 /* 972 * Blow away the dsl_dir + head dataset. 973 */ 974 dsl_dataset_make_exclusive(ds, tag); 975 if (ds->ds_user_ptr) { 976 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 977 ds->ds_user_ptr = NULL; 978 } 979 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool); 980 dsl_sync_task_create(dstg, dsl_dataset_destroy_check, 981 dsl_dataset_destroy_sync, ds, tag, 0); 982 dsl_sync_task_create(dstg, dsl_dir_destroy_check, 983 dsl_dir_destroy_sync, dd, FTAG, 0); 984 err = dsl_sync_task_group_wait(dstg); 985 dsl_sync_task_group_destroy(dstg); 986 /* if it is successful, dsl_dir_destroy_sync will close the dd */ 987 if (err) 988 dsl_dir_close(dd, FTAG); 989 out: 990 dsl_dataset_disown(ds, tag); 991 return (err); 992 } 993 994 int 995 dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost) 996 { 997 ASSERT(ds->ds_owner); 998 999 return (dsl_sync_task_do(ds->ds_dir->dd_pool, 1000 dsl_dataset_rollback_check, dsl_dataset_rollback_sync, 1001 ds, &ost, 0)); 1002 } 1003 1004 void * 1005 dsl_dataset_set_user_ptr(dsl_dataset_t *ds, 1006 void *p, dsl_dataset_evict_func_t func) 1007 { 1008 void *old; 1009 1010 mutex_enter(&ds->ds_lock); 1011 old = ds->ds_user_ptr; 1012 if (old == NULL) { 1013 ds->ds_user_ptr = p; 1014 ds->ds_user_evict_func = func; 1015 } 1016 mutex_exit(&ds->ds_lock); 1017 return (old); 1018 } 1019 1020 void * 1021 dsl_dataset_get_user_ptr(dsl_dataset_t *ds) 1022 { 1023 return (ds->ds_user_ptr); 1024 } 1025 1026 1027 blkptr_t * 1028 dsl_dataset_get_blkptr(dsl_dataset_t *ds) 1029 { 1030 return (&ds->ds_phys->ds_bp); 1031 } 1032 1033 void 1034 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) 1035 { 1036 ASSERT(dmu_tx_is_syncing(tx)); 1037 /* If it's the meta-objset, set dp_meta_rootbp */ 1038 if (ds == NULL) { 1039 tx->tx_pool->dp_meta_rootbp = *bp; 1040 } else { 1041 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1042 ds->ds_phys->ds_bp = *bp; 1043 } 1044 } 1045 1046 spa_t * 1047 dsl_dataset_get_spa(dsl_dataset_t *ds) 1048 { 1049 return (ds->ds_dir->dd_pool->dp_spa); 1050 } 1051 1052 void 1053 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx) 1054 { 1055 dsl_pool_t *dp; 1056 1057 if (ds == NULL) /* this is the meta-objset */ 1058 return; 1059 1060 ASSERT(ds->ds_user_ptr != NULL); 1061 1062 if (ds->ds_phys->ds_next_snap_obj != 0) 1063 panic("dirtying snapshot!"); 1064 1065 dp = ds->ds_dir->dd_pool; 1066 1067 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) { 1068 /* up the hold count until we can be written out */ 1069 dmu_buf_add_ref(ds->ds_dbuf, ds); 1070 } 1071 } 1072 1073 /* 1074 * The unique space in the head dataset can be calculated by subtracting 1075 * the space used in the most recent snapshot, that is still being used 1076 * in this file system, from the space currently in use. To figure out 1077 * the space in the most recent snapshot still in use, we need to take 1078 * the total space used in the snapshot and subtract out the space that 1079 * has been freed up since the snapshot was taken. 1080 */ 1081 static void 1082 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds) 1083 { 1084 uint64_t mrs_used; 1085 uint64_t dlused, dlcomp, dluncomp; 1086 1087 ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj); 1088 1089 if (ds->ds_phys->ds_prev_snap_obj != 0) 1090 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes; 1091 else 1092 mrs_used = 0; 1093 1094 VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp, 1095 &dluncomp)); 1096 1097 ASSERT3U(dlused, <=, mrs_used); 1098 ds->ds_phys->ds_unique_bytes = 1099 ds->ds_phys->ds_used_bytes - (mrs_used - dlused); 1100 1101 if (!DS_UNIQUE_IS_ACCURATE(ds) && 1102 spa_version(ds->ds_dir->dd_pool->dp_spa) >= 1103 SPA_VERSION_UNIQUE_ACCURATE) 1104 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 1105 } 1106 1107 static uint64_t 1108 dsl_dataset_unique(dsl_dataset_t *ds) 1109 { 1110 if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds)) 1111 dsl_dataset_recalc_head_uniq(ds); 1112 1113 return (ds->ds_phys->ds_unique_bytes); 1114 } 1115 1116 struct killarg { 1117 int64_t *usedp; 1118 int64_t *compressedp; 1119 int64_t *uncompressedp; 1120 zio_t *zio; 1121 dmu_tx_t *tx; 1122 }; 1123 1124 static int 1125 kill_blkptr(traverse_blk_cache_t *bc, spa_t *spa, void *arg) 1126 { 1127 struct killarg *ka = arg; 1128 blkptr_t *bp = &bc->bc_blkptr; 1129 1130 ASSERT3U(bc->bc_errno, ==, 0); 1131 1132 /* 1133 * Since this callback is not called concurrently, no lock is 1134 * needed on the accounting values. 1135 */ 1136 *ka->usedp += bp_get_dasize(spa, bp); 1137 *ka->compressedp += BP_GET_PSIZE(bp); 1138 *ka->uncompressedp += BP_GET_UCSIZE(bp); 1139 /* XXX check for EIO? */ 1140 (void) dsl_free(ka->zio, spa_get_dsl(spa), ka->tx->tx_txg, 1141 bp, NULL, NULL, ARC_NOWAIT); 1142 return (0); 1143 } 1144 1145 /* ARGSUSED */ 1146 static int 1147 dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx) 1148 { 1149 dsl_dataset_t *ds = arg1; 1150 dmu_objset_type_t *ost = arg2; 1151 1152 /* 1153 * We can only roll back to emptyness if it is a ZPL objset. 1154 */ 1155 if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0) 1156 return (EINVAL); 1157 1158 /* 1159 * This must not be a snapshot. 1160 */ 1161 if (ds->ds_phys->ds_next_snap_obj != 0) 1162 return (EINVAL); 1163 1164 /* 1165 * If we made changes this txg, traverse_dsl_dataset won't find 1166 * them. Try again. 1167 */ 1168 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) 1169 return (EAGAIN); 1170 1171 return (0); 1172 } 1173 1174 /* ARGSUSED */ 1175 static void 1176 dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1177 { 1178 dsl_dataset_t *ds = arg1; 1179 dmu_objset_type_t *ost = arg2; 1180 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1181 1182 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1183 1184 /* 1185 * Before the roll back destroy the zil. 1186 */ 1187 if (ds->ds_user_ptr != NULL) { 1188 zil_rollback_destroy( 1189 ((objset_impl_t *)ds->ds_user_ptr)->os_zil, tx); 1190 1191 /* 1192 * We need to make sure that the objset_impl_t is reopened after 1193 * we do the rollback, otherwise it will have the wrong 1194 * objset_phys_t. Normally this would happen when this 1195 * dataset-open is closed, thus causing the 1196 * dataset to be immediately evicted. But when doing "zfs recv 1197 * -F", we reopen the objset before that, so that there is no 1198 * window where the dataset is closed and inconsistent. 1199 */ 1200 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 1201 ds->ds_user_ptr = NULL; 1202 } 1203 1204 /* Zero out the deadlist. */ 1205 bplist_close(&ds->ds_deadlist); 1206 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx); 1207 ds->ds_phys->ds_deadlist_obj = 1208 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); 1209 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos, 1210 ds->ds_phys->ds_deadlist_obj)); 1211 1212 { 1213 /* Free blkptrs that we gave birth to */ 1214 zio_t *zio; 1215 int64_t used = 0, compressed = 0, uncompressed = 0; 1216 struct killarg ka; 1217 int64_t delta; 1218 1219 zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL, 1220 ZIO_FLAG_MUSTSUCCEED); 1221 ka.usedp = &used; 1222 ka.compressedp = &compressed; 1223 ka.uncompressedp = &uncompressed; 1224 ka.zio = zio; 1225 ka.tx = tx; 1226 (void) traverse_dsl_dataset(ds, ds->ds_phys->ds_prev_snap_txg, 1227 ADVANCE_POST, kill_blkptr, &ka); 1228 (void) zio_wait(zio); 1229 1230 /* only deduct space beyond any refreservation */ 1231 delta = parent_delta(ds, -used); 1232 dsl_dir_diduse_space(ds->ds_dir, 1233 delta, -compressed, -uncompressed, tx); 1234 } 1235 1236 if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) { 1237 /* Change our contents to that of the prev snapshot */ 1238 ASSERT3U(ds->ds_prev->ds_object, ==, 1239 ds->ds_phys->ds_prev_snap_obj); 1240 ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp; 1241 ds->ds_phys->ds_used_bytes = 1242 ds->ds_prev->ds_phys->ds_used_bytes; 1243 ds->ds_phys->ds_compressed_bytes = 1244 ds->ds_prev->ds_phys->ds_compressed_bytes; 1245 ds->ds_phys->ds_uncompressed_bytes = 1246 ds->ds_prev->ds_phys->ds_uncompressed_bytes; 1247 ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags; 1248 ds->ds_phys->ds_unique_bytes = 0; 1249 1250 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { 1251 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1252 ds->ds_prev->ds_phys->ds_unique_bytes = 0; 1253 } 1254 } else { 1255 objset_impl_t *osi; 1256 1257 /* Zero out our contents, recreate objset */ 1258 bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t)); 1259 ds->ds_phys->ds_used_bytes = 0; 1260 ds->ds_phys->ds_compressed_bytes = 0; 1261 ds->ds_phys->ds_uncompressed_bytes = 0; 1262 ds->ds_phys->ds_flags = 0; 1263 ds->ds_phys->ds_unique_bytes = 0; 1264 osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds, 1265 &ds->ds_phys->ds_bp, *ost, tx); 1266 #ifdef _KERNEL 1267 zfs_create_fs(&osi->os, kcred, NULL, tx); 1268 #endif 1269 } 1270 1271 spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa, 1272 tx, cr, "dataset = %llu", ds->ds_object); 1273 } 1274 1275 /* ARGSUSED */ 1276 static int 1277 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx) 1278 { 1279 dsl_dataset_t *ds = arg1; 1280 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1281 uint64_t count; 1282 int err; 1283 1284 /* 1285 * Can't delete a head dataset if there are snapshots of it. 1286 * (Except if the only snapshots are from the branch we cloned 1287 * from.) 1288 */ 1289 if (ds->ds_prev != NULL && 1290 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1291 return (EINVAL); 1292 1293 /* 1294 * This is really a dsl_dir thing, but check it here so that 1295 * we'll be less likely to leave this dataset inconsistent & 1296 * nearly destroyed. 1297 */ 1298 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count); 1299 if (err) 1300 return (err); 1301 if (count != 0) 1302 return (EEXIST); 1303 1304 return (0); 1305 } 1306 1307 /* ARGSUSED */ 1308 static void 1309 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1310 { 1311 dsl_dataset_t *ds = arg1; 1312 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1313 1314 /* Mark it as inconsistent on-disk, in case we crash */ 1315 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1316 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 1317 1318 spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx, 1319 cr, "dataset = %llu", ds->ds_object); 1320 } 1321 1322 /* ARGSUSED */ 1323 int 1324 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) 1325 { 1326 dsl_dataset_t *ds = arg1; 1327 1328 /* we have an owner hold, so noone else can destroy us */ 1329 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 1330 1331 /* Can't delete a branch point. */ 1332 if (ds->ds_phys->ds_num_children > 1) 1333 return (EEXIST); 1334 1335 /* 1336 * Can't delete a head dataset if there are snapshots of it. 1337 * (Except if the only snapshots are from the branch we cloned 1338 * from.) 1339 */ 1340 if (ds->ds_prev != NULL && 1341 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1342 return (EINVAL); 1343 1344 /* 1345 * If we made changes this txg, traverse_dsl_dataset won't find 1346 * them. Try again. 1347 */ 1348 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) 1349 return (EAGAIN); 1350 1351 /* XXX we should do some i/o error checking... */ 1352 return (0); 1353 } 1354 1355 struct refsarg { 1356 kmutex_t lock; 1357 boolean_t gone; 1358 kcondvar_t cv; 1359 }; 1360 1361 /* ARGSUSED */ 1362 static void 1363 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv) 1364 { 1365 struct refsarg *arg = argv; 1366 1367 mutex_enter(&arg->lock); 1368 arg->gone = TRUE; 1369 cv_signal(&arg->cv); 1370 mutex_exit(&arg->lock); 1371 } 1372 1373 static void 1374 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag) 1375 { 1376 struct refsarg arg; 1377 1378 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL); 1379 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL); 1380 arg.gone = FALSE; 1381 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys, 1382 dsl_dataset_refs_gone); 1383 dmu_buf_rele(ds->ds_dbuf, tag); 1384 mutex_enter(&arg.lock); 1385 while (!arg.gone) 1386 cv_wait(&arg.cv, &arg.lock); 1387 ASSERT(arg.gone); 1388 mutex_exit(&arg.lock); 1389 ds->ds_dbuf = NULL; 1390 ds->ds_phys = NULL; 1391 mutex_destroy(&arg.lock); 1392 cv_destroy(&arg.cv); 1393 } 1394 1395 void 1396 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx) 1397 { 1398 dsl_dataset_t *ds = arg1; 1399 int64_t used = 0, compressed = 0, uncompressed = 0; 1400 zio_t *zio; 1401 int err; 1402 int after_branch_point = FALSE; 1403 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1404 objset_t *mos = dp->dp_meta_objset; 1405 dsl_dataset_t *ds_prev = NULL; 1406 uint64_t obj; 1407 1408 ASSERT(ds->ds_owner); 1409 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); 1410 ASSERT(ds->ds_prev == NULL || 1411 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object); 1412 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg); 1413 1414 /* signal any waiters that this dataset is going away */ 1415 mutex_enter(&ds->ds_lock); 1416 ds->ds_owner = dsl_reaper; 1417 cv_broadcast(&ds->ds_exclusive_cv); 1418 mutex_exit(&ds->ds_lock); 1419 1420 /* Remove our reservation */ 1421 if (ds->ds_reserved != 0) { 1422 uint64_t val = 0; 1423 dsl_dataset_set_reservation_sync(ds, &val, cr, tx); 1424 ASSERT3U(ds->ds_reserved, ==, 0); 1425 } 1426 1427 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 1428 1429 dsl_pool_ds_destroyed(ds, tx); 1430 1431 obj = ds->ds_object; 1432 1433 if (ds->ds_phys->ds_prev_snap_obj != 0) { 1434 if (ds->ds_prev) { 1435 ds_prev = ds->ds_prev; 1436 } else { 1437 VERIFY(0 == dsl_dataset_hold_obj(dp, 1438 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev)); 1439 } 1440 after_branch_point = 1441 (ds_prev->ds_phys->ds_next_snap_obj != obj); 1442 1443 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx); 1444 if (after_branch_point && 1445 ds_prev->ds_phys->ds_next_clones_obj != 0) { 1446 VERIFY(0 == zap_remove_int(mos, 1447 ds_prev->ds_phys->ds_next_clones_obj, obj, tx)); 1448 if (ds->ds_phys->ds_next_snap_obj != 0) { 1449 VERIFY(0 == zap_add_int(mos, 1450 ds_prev->ds_phys->ds_next_clones_obj, 1451 ds->ds_phys->ds_next_snap_obj, tx)); 1452 } 1453 } 1454 if (after_branch_point && 1455 ds->ds_phys->ds_next_snap_obj == 0) { 1456 /* This clone is toast. */ 1457 ASSERT(ds_prev->ds_phys->ds_num_children > 1); 1458 ds_prev->ds_phys->ds_num_children--; 1459 } else if (!after_branch_point) { 1460 ds_prev->ds_phys->ds_next_snap_obj = 1461 ds->ds_phys->ds_next_snap_obj; 1462 } 1463 } 1464 1465 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 1466 1467 if (ds->ds_phys->ds_next_snap_obj != 0) { 1468 blkptr_t bp; 1469 dsl_dataset_t *ds_next; 1470 uint64_t itor = 0; 1471 uint64_t old_unique; 1472 1473 VERIFY(0 == dsl_dataset_hold_obj(dp, 1474 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next)); 1475 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj); 1476 1477 old_unique = dsl_dataset_unique(ds_next); 1478 1479 dmu_buf_will_dirty(ds_next->ds_dbuf, tx); 1480 ds_next->ds_phys->ds_prev_snap_obj = 1481 ds->ds_phys->ds_prev_snap_obj; 1482 ds_next->ds_phys->ds_prev_snap_txg = 1483 ds->ds_phys->ds_prev_snap_txg; 1484 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 1485 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0); 1486 1487 /* 1488 * Transfer to our deadlist (which will become next's 1489 * new deadlist) any entries from next's current 1490 * deadlist which were born before prev, and free the 1491 * other entries. 1492 * 1493 * XXX we're doing this long task with the config lock held 1494 */ 1495 while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) { 1496 if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) { 1497 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, 1498 &bp, tx)); 1499 if (ds_prev && !after_branch_point && 1500 bp.blk_birth > 1501 ds_prev->ds_phys->ds_prev_snap_txg) { 1502 ds_prev->ds_phys->ds_unique_bytes += 1503 bp_get_dasize(dp->dp_spa, &bp); 1504 } 1505 } else { 1506 used += bp_get_dasize(dp->dp_spa, &bp); 1507 compressed += BP_GET_PSIZE(&bp); 1508 uncompressed += BP_GET_UCSIZE(&bp); 1509 /* XXX check return value? */ 1510 (void) dsl_free(zio, dp, tx->tx_txg, 1511 &bp, NULL, NULL, ARC_NOWAIT); 1512 } 1513 } 1514 1515 /* free next's deadlist */ 1516 bplist_close(&ds_next->ds_deadlist); 1517 bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx); 1518 1519 /* set next's deadlist to our deadlist */ 1520 bplist_close(&ds->ds_deadlist); 1521 ds_next->ds_phys->ds_deadlist_obj = 1522 ds->ds_phys->ds_deadlist_obj; 1523 VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos, 1524 ds_next->ds_phys->ds_deadlist_obj)); 1525 ds->ds_phys->ds_deadlist_obj = 0; 1526 1527 if (ds_next->ds_phys->ds_next_snap_obj != 0) { 1528 /* 1529 * Update next's unique to include blocks which 1530 * were previously shared by only this snapshot 1531 * and it. Those blocks will be born after the 1532 * prev snap and before this snap, and will have 1533 * died after the next snap and before the one 1534 * after that (ie. be on the snap after next's 1535 * deadlist). 1536 * 1537 * XXX we're doing this long task with the 1538 * config lock held 1539 */ 1540 dsl_dataset_t *ds_after_next; 1541 1542 VERIFY(0 == dsl_dataset_hold_obj(dp, 1543 ds_next->ds_phys->ds_next_snap_obj, 1544 FTAG, &ds_after_next)); 1545 itor = 0; 1546 while (bplist_iterate(&ds_after_next->ds_deadlist, 1547 &itor, &bp) == 0) { 1548 if (bp.blk_birth > 1549 ds->ds_phys->ds_prev_snap_txg && 1550 bp.blk_birth <= 1551 ds->ds_phys->ds_creation_txg) { 1552 ds_next->ds_phys->ds_unique_bytes += 1553 bp_get_dasize(dp->dp_spa, &bp); 1554 } 1555 } 1556 1557 dsl_dataset_rele(ds_after_next, FTAG); 1558 ASSERT3P(ds_next->ds_prev, ==, NULL); 1559 } else { 1560 ASSERT3P(ds_next->ds_prev, ==, ds); 1561 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next); 1562 ds_next->ds_prev = NULL; 1563 if (ds_prev) { 1564 VERIFY(0 == dsl_dataset_get_ref(dp, 1565 ds->ds_phys->ds_prev_snap_obj, 1566 ds_next, &ds_next->ds_prev)); 1567 } 1568 1569 dsl_dataset_recalc_head_uniq(ds_next); 1570 1571 /* 1572 * Reduce the amount of our unconsmed refreservation 1573 * being charged to our parent by the amount of 1574 * new unique data we have gained. 1575 */ 1576 if (old_unique < ds_next->ds_reserved) { 1577 int64_t mrsdelta; 1578 uint64_t new_unique = 1579 ds_next->ds_phys->ds_unique_bytes; 1580 1581 ASSERT(old_unique <= new_unique); 1582 mrsdelta = MIN(new_unique - old_unique, 1583 ds_next->ds_reserved - old_unique); 1584 dsl_dir_diduse_space(ds->ds_dir, -mrsdelta, 1585 0, 0, tx); 1586 } 1587 } 1588 dsl_dataset_rele(ds_next, FTAG); 1589 1590 /* 1591 * NB: unique_bytes might not be accurate for the head objset. 1592 * Before SPA_VERSION 9, we didn't update its value when we 1593 * deleted the most recent snapshot. 1594 */ 1595 ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes); 1596 } else { 1597 /* 1598 * There's no next snapshot, so this is a head dataset. 1599 * Destroy the deadlist. Unless it's a clone, the 1600 * deadlist should be empty. (If it's a clone, it's 1601 * safe to ignore the deadlist contents.) 1602 */ 1603 struct killarg ka; 1604 1605 ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist)); 1606 bplist_close(&ds->ds_deadlist); 1607 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx); 1608 ds->ds_phys->ds_deadlist_obj = 0; 1609 1610 /* 1611 * Free everything that we point to (that's born after 1612 * the previous snapshot, if we are a clone) 1613 * 1614 * XXX we're doing this long task with the config lock held 1615 */ 1616 ka.usedp = &used; 1617 ka.compressedp = &compressed; 1618 ka.uncompressedp = &uncompressed; 1619 ka.zio = zio; 1620 ka.tx = tx; 1621 err = traverse_dsl_dataset(ds, ds->ds_phys->ds_prev_snap_txg, 1622 ADVANCE_POST, kill_blkptr, &ka); 1623 ASSERT3U(err, ==, 0); 1624 ASSERT(spa_version(dp->dp_spa) < 1625 SPA_VERSION_UNIQUE_ACCURATE || 1626 used == ds->ds_phys->ds_unique_bytes); 1627 } 1628 1629 err = zio_wait(zio); 1630 ASSERT3U(err, ==, 0); 1631 1632 dsl_dir_diduse_space(ds->ds_dir, -used, -compressed, -uncompressed, tx); 1633 1634 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) { 1635 /* Erase the link in the dir */ 1636 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 1637 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0; 1638 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0); 1639 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx); 1640 ASSERT(err == 0); 1641 } else { 1642 /* remove from snapshot namespace */ 1643 dsl_dataset_t *ds_head; 1644 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0); 1645 VERIFY(0 == dsl_dataset_hold_obj(dp, 1646 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head)); 1647 VERIFY(0 == dsl_dataset_get_snapname(ds)); 1648 #ifdef ZFS_DEBUG 1649 { 1650 uint64_t val; 1651 1652 err = dsl_dataset_snap_lookup(ds_head, 1653 ds->ds_snapname, &val); 1654 ASSERT3U(err, ==, 0); 1655 ASSERT3U(val, ==, obj); 1656 } 1657 #endif 1658 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx); 1659 ASSERT(err == 0); 1660 dsl_dataset_rele(ds_head, FTAG); 1661 } 1662 1663 if (ds_prev && ds->ds_prev != ds_prev) 1664 dsl_dataset_rele(ds_prev, FTAG); 1665 1666 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx); 1667 spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx, 1668 cr, "dataset = %llu", ds->ds_object); 1669 1670 if (ds->ds_phys->ds_next_clones_obj != 0) { 1671 uint64_t count; 1672 ASSERT(0 == zap_count(mos, 1673 ds->ds_phys->ds_next_clones_obj, &count) && count == 0); 1674 VERIFY(0 == dmu_object_free(mos, 1675 ds->ds_phys->ds_next_clones_obj, tx)); 1676 } 1677 dsl_dir_close(ds->ds_dir, ds); 1678 ds->ds_dir = NULL; 1679 dsl_dataset_drain_refs(ds, tag); 1680 VERIFY(0 == dmu_object_free(mos, obj, tx)); 1681 } 1682 1683 static int 1684 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx) 1685 { 1686 uint64_t asize; 1687 1688 if (!dmu_tx_is_syncing(tx)) 1689 return (0); 1690 1691 /* 1692 * If there's an fs-only reservation, any blocks that might become 1693 * owned by the snapshot dataset must be accommodated by space 1694 * outside of the reservation. 1695 */ 1696 asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved); 1697 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE)) 1698 return (ENOSPC); 1699 1700 /* 1701 * Propogate any reserved space for this snapshot to other 1702 * snapshot checks in this sync group. 1703 */ 1704 if (asize > 0) 1705 dsl_dir_willuse_space(ds->ds_dir, asize, tx); 1706 1707 return (0); 1708 } 1709 1710 /* ARGSUSED */ 1711 int 1712 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx) 1713 { 1714 dsl_dataset_t *ds = arg1; 1715 const char *snapname = arg2; 1716 int err; 1717 uint64_t value; 1718 1719 /* 1720 * We don't allow multiple snapshots of the same txg. If there 1721 * is already one, try again. 1722 */ 1723 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg) 1724 return (EAGAIN); 1725 1726 /* 1727 * Check for conflicting name snapshot name. 1728 */ 1729 err = dsl_dataset_snap_lookup(ds, snapname, &value); 1730 if (err == 0) 1731 return (EEXIST); 1732 if (err != ENOENT) 1733 return (err); 1734 1735 /* 1736 * Check that the dataset's name is not too long. Name consists 1737 * of the dataset's length + 1 for the @-sign + snapshot name's length 1738 */ 1739 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN) 1740 return (ENAMETOOLONG); 1741 1742 err = dsl_dataset_snapshot_reserve_space(ds, tx); 1743 if (err) 1744 return (err); 1745 1746 ds->ds_trysnap_txg = tx->tx_txg; 1747 return (0); 1748 } 1749 1750 void 1751 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1752 { 1753 dsl_dataset_t *ds = arg1; 1754 const char *snapname = arg2; 1755 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1756 dmu_buf_t *dbuf; 1757 dsl_dataset_phys_t *dsphys; 1758 uint64_t dsobj, crtxg; 1759 objset_t *mos = dp->dp_meta_objset; 1760 int err; 1761 1762 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 1763 1764 /* 1765 * The origin's ds_creation_txg has to be < TXG_INITIAL 1766 */ 1767 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0) 1768 crtxg = 1; 1769 else 1770 crtxg = tx->tx_txg; 1771 1772 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 1773 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 1774 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 1775 dmu_buf_will_dirty(dbuf, tx); 1776 dsphys = dbuf->db_data; 1777 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 1778 dsphys->ds_dir_obj = ds->ds_dir->dd_object; 1779 dsphys->ds_fsid_guid = unique_create(); 1780 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 1781 sizeof (dsphys->ds_guid)); 1782 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj; 1783 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg; 1784 dsphys->ds_next_snap_obj = ds->ds_object; 1785 dsphys->ds_num_children = 1; 1786 dsphys->ds_creation_time = gethrestime_sec(); 1787 dsphys->ds_creation_txg = crtxg; 1788 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj; 1789 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes; 1790 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes; 1791 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes; 1792 dsphys->ds_flags = ds->ds_phys->ds_flags; 1793 dsphys->ds_bp = ds->ds_phys->ds_bp; 1794 dmu_buf_rele(dbuf, FTAG); 1795 1796 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0); 1797 if (ds->ds_prev) { 1798 uint64_t next_clones_obj = 1799 ds->ds_prev->ds_phys->ds_next_clones_obj; 1800 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj == 1801 ds->ds_object || 1802 ds->ds_prev->ds_phys->ds_num_children > 1); 1803 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { 1804 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1805 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 1806 ds->ds_prev->ds_phys->ds_creation_txg); 1807 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj; 1808 } else if (next_clones_obj != 0) { 1809 VERIFY3U(0, ==, zap_remove_int(mos, 1810 next_clones_obj, dsphys->ds_next_snap_obj, tx)); 1811 VERIFY3U(0, ==, zap_add_int(mos, 1812 next_clones_obj, dsobj, tx)); 1813 } 1814 } 1815 1816 /* 1817 * If we have a reference-reservation on this dataset, we will 1818 * need to increase the amount of refreservation being charged 1819 * since our unique space is going to zero. 1820 */ 1821 if (ds->ds_reserved) { 1822 int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved); 1823 dsl_dir_diduse_space(ds->ds_dir, add, 0, 0, tx); 1824 } 1825 1826 bplist_close(&ds->ds_deadlist); 1827 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1828 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg); 1829 ds->ds_phys->ds_prev_snap_obj = dsobj; 1830 ds->ds_phys->ds_prev_snap_txg = crtxg; 1831 ds->ds_phys->ds_unique_bytes = 0; 1832 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 1833 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 1834 ds->ds_phys->ds_deadlist_obj = 1835 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); 1836 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos, 1837 ds->ds_phys->ds_deadlist_obj)); 1838 1839 dprintf("snap '%s' -> obj %llu\n", snapname, dsobj); 1840 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj, 1841 snapname, 8, 1, &dsobj, tx); 1842 ASSERT(err == 0); 1843 1844 if (ds->ds_prev) 1845 dsl_dataset_drop_ref(ds->ds_prev, ds); 1846 VERIFY(0 == dsl_dataset_get_ref(dp, 1847 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 1848 1849 dsl_pool_ds_snapshotted(ds, tx); 1850 1851 spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr, 1852 "dataset = %llu", dsobj); 1853 } 1854 1855 void 1856 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx) 1857 { 1858 ASSERT(dmu_tx_is_syncing(tx)); 1859 ASSERT(ds->ds_user_ptr != NULL); 1860 ASSERT(ds->ds_phys->ds_next_snap_obj == 0); 1861 1862 /* 1863 * in case we had to change ds_fsid_guid when we opened it, 1864 * sync it out now. 1865 */ 1866 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1867 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid; 1868 1869 dsl_dir_dirty(ds->ds_dir, tx); 1870 dmu_objset_sync(ds->ds_user_ptr, zio, tx); 1871 } 1872 1873 void 1874 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv) 1875 { 1876 uint64_t refd, avail, uobjs, aobjs; 1877 1878 dsl_dir_stats(ds->ds_dir, nv); 1879 1880 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs); 1881 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail); 1882 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd); 1883 1884 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION, 1885 ds->ds_phys->ds_creation_time); 1886 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG, 1887 ds->ds_phys->ds_creation_txg); 1888 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA, 1889 ds->ds_quota); 1890 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION, 1891 ds->ds_reserved); 1892 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID, 1893 ds->ds_phys->ds_guid); 1894 1895 if (ds->ds_phys->ds_next_snap_obj) { 1896 /* 1897 * This is a snapshot; override the dd's space used with 1898 * our unique space and compression ratio. 1899 */ 1900 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, 1901 ds->ds_phys->ds_unique_bytes); 1902 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, 1903 ds->ds_phys->ds_compressed_bytes == 0 ? 100 : 1904 (ds->ds_phys->ds_uncompressed_bytes * 100 / 1905 ds->ds_phys->ds_compressed_bytes)); 1906 } 1907 } 1908 1909 void 1910 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat) 1911 { 1912 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg; 1913 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT; 1914 stat->dds_guid = ds->ds_phys->ds_guid; 1915 if (ds->ds_phys->ds_next_snap_obj) { 1916 stat->dds_is_snapshot = B_TRUE; 1917 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1; 1918 } 1919 1920 /* clone origin is really a dsl_dir thing... */ 1921 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); 1922 if (dsl_dir_is_clone(ds->ds_dir)) { 1923 dsl_dataset_t *ods; 1924 1925 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool, 1926 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods)); 1927 dsl_dataset_name(ods, stat->dds_origin); 1928 dsl_dataset_drop_ref(ods, FTAG); 1929 } 1930 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); 1931 } 1932 1933 uint64_t 1934 dsl_dataset_fsid_guid(dsl_dataset_t *ds) 1935 { 1936 return (ds->ds_fsid_guid); 1937 } 1938 1939 void 1940 dsl_dataset_space(dsl_dataset_t *ds, 1941 uint64_t *refdbytesp, uint64_t *availbytesp, 1942 uint64_t *usedobjsp, uint64_t *availobjsp) 1943 { 1944 *refdbytesp = ds->ds_phys->ds_used_bytes; 1945 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE); 1946 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) 1947 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes; 1948 if (ds->ds_quota != 0) { 1949 /* 1950 * Adjust available bytes according to refquota 1951 */ 1952 if (*refdbytesp < ds->ds_quota) 1953 *availbytesp = MIN(*availbytesp, 1954 ds->ds_quota - *refdbytesp); 1955 else 1956 *availbytesp = 0; 1957 } 1958 *usedobjsp = ds->ds_phys->ds_bp.blk_fill; 1959 *availobjsp = DN_MAX_OBJECT - *usedobjsp; 1960 } 1961 1962 boolean_t 1963 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds) 1964 { 1965 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1966 1967 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 1968 dsl_pool_sync_context(dp)); 1969 if (ds->ds_prev == NULL) 1970 return (B_FALSE); 1971 if (ds->ds_phys->ds_bp.blk_birth > 1972 ds->ds_prev->ds_phys->ds_creation_txg) 1973 return (B_TRUE); 1974 return (B_FALSE); 1975 } 1976 1977 /* ARGSUSED */ 1978 static int 1979 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) 1980 { 1981 dsl_dataset_t *ds = arg1; 1982 char *newsnapname = arg2; 1983 dsl_dir_t *dd = ds->ds_dir; 1984 dsl_dataset_t *hds; 1985 uint64_t val; 1986 int err; 1987 1988 err = dsl_dataset_hold_obj(dd->dd_pool, 1989 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds); 1990 if (err) 1991 return (err); 1992 1993 /* new name better not be in use */ 1994 err = dsl_dataset_snap_lookup(hds, newsnapname, &val); 1995 dsl_dataset_rele(hds, FTAG); 1996 1997 if (err == 0) 1998 err = EEXIST; 1999 else if (err == ENOENT) 2000 err = 0; 2001 2002 /* dataset name + 1 for the "@" + the new snapshot name must fit */ 2003 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN) 2004 err = ENAMETOOLONG; 2005 2006 return (err); 2007 } 2008 2009 static void 2010 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, 2011 cred_t *cr, dmu_tx_t *tx) 2012 { 2013 dsl_dataset_t *ds = arg1; 2014 const char *newsnapname = arg2; 2015 dsl_dir_t *dd = ds->ds_dir; 2016 objset_t *mos = dd->dd_pool->dp_meta_objset; 2017 dsl_dataset_t *hds; 2018 int err; 2019 2020 ASSERT(ds->ds_phys->ds_next_snap_obj != 0); 2021 2022 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, 2023 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds)); 2024 2025 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2026 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx); 2027 ASSERT3U(err, ==, 0); 2028 mutex_enter(&ds->ds_lock); 2029 (void) strcpy(ds->ds_snapname, newsnapname); 2030 mutex_exit(&ds->ds_lock); 2031 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj, 2032 ds->ds_snapname, 8, 1, &ds->ds_object, tx); 2033 ASSERT3U(err, ==, 0); 2034 2035 spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx, 2036 cr, "dataset = %llu", ds->ds_object); 2037 dsl_dataset_rele(hds, FTAG); 2038 } 2039 2040 struct renamesnaparg { 2041 dsl_sync_task_group_t *dstg; 2042 char failed[MAXPATHLEN]; 2043 char *oldsnap; 2044 char *newsnap; 2045 }; 2046 2047 static int 2048 dsl_snapshot_rename_one(char *name, void *arg) 2049 { 2050 struct renamesnaparg *ra = arg; 2051 dsl_dataset_t *ds = NULL; 2052 char *cp; 2053 int err; 2054 2055 cp = name + strlen(name); 2056 *cp = '@'; 2057 (void) strcpy(cp + 1, ra->oldsnap); 2058 2059 /* 2060 * For recursive snapshot renames the parent won't be changing 2061 * so we just pass name for both the to/from argument. 2062 */ 2063 if (err = zfs_secpolicy_rename_perms(name, name, CRED())) { 2064 (void) strcpy(ra->failed, name); 2065 return (err); 2066 } 2067 2068 #ifdef _KERNEL 2069 /* 2070 * For all filesystems undergoing rename, we'll need to unmount it. 2071 */ 2072 (void) zfs_unmount_snap(name, NULL); 2073 #endif 2074 err = dsl_dataset_hold(name, ra->dstg, &ds); 2075 *cp = '\0'; 2076 if (err == ENOENT) { 2077 return (0); 2078 } else if (err) { 2079 (void) strcpy(ra->failed, name); 2080 return (err); 2081 } 2082 2083 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check, 2084 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0); 2085 2086 return (0); 2087 } 2088 2089 static int 2090 dsl_recursive_rename(char *oldname, const char *newname) 2091 { 2092 int err; 2093 struct renamesnaparg *ra; 2094 dsl_sync_task_t *dst; 2095 spa_t *spa; 2096 char *cp, *fsname = spa_strdup(oldname); 2097 int len = strlen(oldname); 2098 2099 /* truncate the snapshot name to get the fsname */ 2100 cp = strchr(fsname, '@'); 2101 *cp = '\0'; 2102 2103 err = spa_open(fsname, &spa, FTAG); 2104 if (err) { 2105 kmem_free(fsname, len + 1); 2106 return (err); 2107 } 2108 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP); 2109 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 2110 2111 ra->oldsnap = strchr(oldname, '@') + 1; 2112 ra->newsnap = strchr(newname, '@') + 1; 2113 *ra->failed = '\0'; 2114 2115 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra, 2116 DS_FIND_CHILDREN); 2117 kmem_free(fsname, len + 1); 2118 2119 if (err == 0) { 2120 err = dsl_sync_task_group_wait(ra->dstg); 2121 } 2122 2123 for (dst = list_head(&ra->dstg->dstg_tasks); dst; 2124 dst = list_next(&ra->dstg->dstg_tasks, dst)) { 2125 dsl_dataset_t *ds = dst->dst_arg1; 2126 if (dst->dst_err) { 2127 dsl_dir_name(ds->ds_dir, ra->failed); 2128 (void) strcat(ra->failed, "@"); 2129 (void) strcat(ra->failed, ra->newsnap); 2130 } 2131 dsl_dataset_rele(ds, ra->dstg); 2132 } 2133 2134 if (err) 2135 (void) strcpy(oldname, ra->failed); 2136 2137 dsl_sync_task_group_destroy(ra->dstg); 2138 kmem_free(ra, sizeof (struct renamesnaparg)); 2139 spa_close(spa, FTAG); 2140 return (err); 2141 } 2142 2143 static int 2144 dsl_valid_rename(char *oldname, void *arg) 2145 { 2146 int delta = *(int *)arg; 2147 2148 if (strlen(oldname) + delta >= MAXNAMELEN) 2149 return (ENAMETOOLONG); 2150 2151 return (0); 2152 } 2153 2154 #pragma weak dmu_objset_rename = dsl_dataset_rename 2155 int 2156 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive) 2157 { 2158 dsl_dir_t *dd; 2159 dsl_dataset_t *ds; 2160 const char *tail; 2161 int err; 2162 2163 err = dsl_dir_open(oldname, FTAG, &dd, &tail); 2164 if (err) 2165 return (err); 2166 if (tail == NULL) { 2167 int delta = strlen(newname) - strlen(oldname); 2168 2169 /* if we're growing, validate child name lengths */ 2170 if (delta > 0) 2171 err = dmu_objset_find(oldname, dsl_valid_rename, 2172 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 2173 2174 if (!err) 2175 err = dsl_dir_rename(dd, newname); 2176 dsl_dir_close(dd, FTAG); 2177 return (err); 2178 } 2179 if (tail[0] != '@') { 2180 /* the name ended in a nonexistant component */ 2181 dsl_dir_close(dd, FTAG); 2182 return (ENOENT); 2183 } 2184 2185 dsl_dir_close(dd, FTAG); 2186 2187 /* new name must be snapshot in same filesystem */ 2188 tail = strchr(newname, '@'); 2189 if (tail == NULL) 2190 return (EINVAL); 2191 tail++; 2192 if (strncmp(oldname, newname, tail - newname) != 0) 2193 return (EXDEV); 2194 2195 if (recursive) { 2196 err = dsl_recursive_rename(oldname, newname); 2197 } else { 2198 err = dsl_dataset_hold(oldname, FTAG, &ds); 2199 if (err) 2200 return (err); 2201 2202 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2203 dsl_dataset_snapshot_rename_check, 2204 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1); 2205 2206 dsl_dataset_rele(ds, FTAG); 2207 } 2208 2209 return (err); 2210 } 2211 2212 struct promotenode { 2213 list_node_t link; 2214 dsl_dataset_t *ds; 2215 }; 2216 2217 struct promotearg { 2218 list_t snap_list; 2219 dsl_dataset_t *clone_origin, *old_head; 2220 uint64_t used, comp, uncomp, unique; 2221 uint64_t newnext_obj; 2222 }; 2223 2224 /* ARGSUSED */ 2225 static int 2226 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx) 2227 { 2228 dsl_dataset_t *hds = arg1; 2229 struct promotearg *pa = arg2; 2230 struct promotenode *snap = list_head(&pa->snap_list); 2231 dsl_pool_t *dp = hds->ds_dir->dd_pool; 2232 dsl_dataset_t *origin_ds = snap->ds; 2233 dsl_dataset_t *newnext_ds; 2234 char *name; 2235 uint64_t itor = 0; 2236 blkptr_t bp; 2237 int err; 2238 2239 /* Check that it is a real clone */ 2240 if (!dsl_dir_is_clone(hds->ds_dir)) 2241 return (EINVAL); 2242 2243 /* Since this is so expensive, don't do the preliminary check */ 2244 if (!dmu_tx_is_syncing(tx)) 2245 return (0); 2246 2247 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) 2248 return (EXDEV); 2249 2250 /* find origin's new next ds */ 2251 newnext_ds = hds; 2252 while (newnext_ds->ds_phys->ds_prev_snap_obj != origin_ds->ds_object) { 2253 dsl_dataset_t *prev; 2254 2255 err = dsl_dataset_hold_obj(dp, 2256 newnext_ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); 2257 if (newnext_ds != hds) 2258 dsl_dataset_rele(newnext_ds, FTAG); 2259 if (err) 2260 return (err); 2261 newnext_ds = prev; 2262 } 2263 pa->newnext_obj = newnext_ds->ds_object; 2264 2265 /* compute origin's new unique space */ 2266 pa->unique = 0; 2267 while ((err = bplist_iterate(&newnext_ds->ds_deadlist, 2268 &itor, &bp)) == 0) { 2269 if (bp.blk_birth > origin_ds->ds_phys->ds_prev_snap_txg) 2270 pa->unique += bp_get_dasize(dp->dp_spa, &bp); 2271 } 2272 if (newnext_ds != hds) 2273 dsl_dataset_rele(newnext_ds, FTAG); 2274 if (err != ENOENT) 2275 return (err); 2276 2277 name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2278 2279 /* 2280 * Walk the snapshots that we are moving 2281 * 2282 * Compute space to transfer. Each snapshot gave birth to: 2283 * (my used) - (prev's used) + (deadlist's used) 2284 * So a sequence would look like: 2285 * uN - u(N-1) + dN + ... + u1 - u0 + d1 + u0 - 0 + d0 2286 * Which simplifies to: 2287 * uN + dN + ... + d1 + d0 2288 * Note however, if we stop before we reach the ORIGIN we get: 2289 * uN + dN + ... + dM - uM-1 2290 */ 2291 pa->used = origin_ds->ds_phys->ds_used_bytes; 2292 pa->comp = origin_ds->ds_phys->ds_compressed_bytes; 2293 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes; 2294 do { 2295 uint64_t val, dlused, dlcomp, dluncomp; 2296 dsl_dataset_t *ds = snap->ds; 2297 2298 /* Check that the snapshot name does not conflict */ 2299 dsl_dataset_name(ds, name); 2300 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val); 2301 if (err == 0) 2302 err = EEXIST; 2303 if (err != ENOENT) 2304 break; 2305 err = 0; 2306 2307 /* The very first snapshot does not have a deadlist */ 2308 if (ds->ds_phys->ds_prev_snap_obj != 0) { 2309 if (err = bplist_space(&ds->ds_deadlist, 2310 &dlused, &dlcomp, &dluncomp)) 2311 break; 2312 pa->used += dlused; 2313 pa->comp += dlcomp; 2314 pa->uncomp += dluncomp; 2315 } 2316 } while (snap = list_next(&pa->snap_list, snap)); 2317 2318 /* 2319 * If we are a clone of a clone then we never reached ORIGIN, 2320 * so we need to subtract out the clone origin's used space. 2321 */ 2322 if (pa->clone_origin) { 2323 pa->used -= pa->clone_origin->ds_phys->ds_used_bytes; 2324 pa->comp -= pa->clone_origin->ds_phys->ds_compressed_bytes; 2325 pa->uncomp -= pa->clone_origin->ds_phys->ds_uncompressed_bytes; 2326 } 2327 2328 kmem_free(name, MAXPATHLEN); 2329 2330 /* Check that there is enough space here */ 2331 if (err == 0) { 2332 dsl_dir_t *odd = origin_ds->ds_dir; 2333 err = dsl_dir_transfer_possible(odd, hds->ds_dir, pa->used); 2334 } 2335 2336 return (err); 2337 } 2338 2339 static void 2340 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 2341 { 2342 dsl_dataset_t *hds = arg1; 2343 struct promotearg *pa = arg2; 2344 struct promotenode *snap = list_head(&pa->snap_list); 2345 dsl_dataset_t *origin_ds = snap->ds; 2346 dsl_dir_t *dd = hds->ds_dir; 2347 dsl_pool_t *dp = hds->ds_dir->dd_pool; 2348 dsl_dir_t *odd = NULL; 2349 char *name; 2350 uint64_t oldnext_obj; 2351 2352 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)); 2353 2354 /* 2355 * We need to explicitly open odd, since origin_ds's dd will be 2356 * changing. 2357 */ 2358 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object, 2359 NULL, FTAG, &odd)); 2360 2361 /* change origin's next snap */ 2362 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx); 2363 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj; 2364 origin_ds->ds_phys->ds_next_snap_obj = pa->newnext_obj; 2365 2366 /* change the origin's next clone */ 2367 if (origin_ds->ds_phys->ds_next_clones_obj) { 2368 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2369 origin_ds->ds_phys->ds_next_clones_obj, 2370 pa->newnext_obj, tx)); 2371 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2372 origin_ds->ds_phys->ds_next_clones_obj, 2373 oldnext_obj, tx)); 2374 } 2375 2376 /* change origin */ 2377 dmu_buf_will_dirty(dd->dd_dbuf, tx); 2378 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object); 2379 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj; 2380 dmu_buf_will_dirty(odd->dd_dbuf, tx); 2381 odd->dd_phys->dd_origin_obj = origin_ds->ds_object; 2382 2383 /* move snapshots to this dir */ 2384 name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2385 do { 2386 dsl_dataset_t *ds = snap->ds; 2387 2388 /* unregister props as dsl_dir is changing */ 2389 if (ds->ds_user_ptr) { 2390 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 2391 ds->ds_user_ptr = NULL; 2392 } 2393 /* move snap name entry */ 2394 dsl_dataset_name(ds, name); 2395 VERIFY(0 == dsl_dataset_snap_remove(pa->old_head, 2396 ds->ds_snapname, tx)); 2397 VERIFY(0 == zap_add(dp->dp_meta_objset, 2398 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname, 2399 8, 1, &ds->ds_object, tx)); 2400 /* change containing dsl_dir */ 2401 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2402 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object); 2403 ds->ds_phys->ds_dir_obj = dd->dd_object; 2404 ASSERT3P(ds->ds_dir, ==, odd); 2405 dsl_dir_close(ds->ds_dir, ds); 2406 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object, 2407 NULL, ds, &ds->ds_dir)); 2408 2409 ASSERT3U(dsl_prop_numcb(ds), ==, 0); 2410 } while (snap = list_next(&pa->snap_list, snap)); 2411 2412 /* change space accounting */ 2413 dsl_dir_diduse_space(odd, -pa->used, -pa->comp, -pa->uncomp, tx); 2414 dsl_dir_diduse_space(dd, pa->used, pa->comp, pa->uncomp, tx); 2415 origin_ds->ds_phys->ds_unique_bytes = pa->unique; 2416 2417 /* log history record */ 2418 spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx, 2419 cr, "dataset = %llu", hds->ds_object); 2420 2421 dsl_dir_close(odd, FTAG); 2422 kmem_free(name, MAXPATHLEN); 2423 } 2424 2425 int 2426 dsl_dataset_promote(const char *name) 2427 { 2428 dsl_dataset_t *ds; 2429 dsl_dir_t *dd; 2430 dsl_pool_t *dp; 2431 dmu_object_info_t doi; 2432 struct promotearg pa; 2433 struct promotenode *snap; 2434 uint64_t snap_obj; 2435 uint64_t last_snap = 0; 2436 int err; 2437 2438 err = dsl_dataset_hold(name, FTAG, &ds); 2439 if (err) 2440 return (err); 2441 dd = ds->ds_dir; 2442 dp = dd->dd_pool; 2443 2444 err = dmu_object_info(dp->dp_meta_objset, 2445 ds->ds_phys->ds_snapnames_zapobj, &doi); 2446 if (err) { 2447 dsl_dataset_rele(ds, FTAG); 2448 return (err); 2449 } 2450 2451 /* 2452 * We are going to inherit all the snapshots taken before our 2453 * origin (i.e., our new origin will be our parent's origin). 2454 * Take ownership of them so that we can rename them into our 2455 * namespace. 2456 */ 2457 pa.clone_origin = NULL; 2458 list_create(&pa.snap_list, 2459 sizeof (struct promotenode), offsetof(struct promotenode, link)); 2460 rw_enter(&dp->dp_config_rwlock, RW_READER); 2461 ASSERT(dd->dd_phys->dd_origin_obj != 0); 2462 snap_obj = dd->dd_phys->dd_origin_obj; 2463 while (snap_obj) { 2464 dsl_dataset_t *snapds; 2465 2466 /* 2467 * NB: this would be handled by the below check for 2468 * clone of a clone, but then we'd always own_obj() the 2469 * $ORIGIN, thus causing unnecessary EBUSYs. We don't 2470 * need to set pa.clone_origin because the $ORIGIN has 2471 * no data to account for. 2472 */ 2473 if (dp->dp_origin_snap && 2474 snap_obj == dp->dp_origin_snap->ds_object) 2475 break; 2476 2477 err = dsl_dataset_own_obj(dp, snap_obj, 0, FTAG, &snapds); 2478 if (err == ENOENT) { 2479 /* lost race with snapshot destroy */ 2480 struct promotenode *last = list_tail(&pa.snap_list); 2481 ASSERT(snap_obj != last->ds->ds_phys->ds_prev_snap_obj); 2482 snap_obj = last->ds->ds_phys->ds_prev_snap_obj; 2483 continue; 2484 } else if (err) { 2485 rw_exit(&dp->dp_config_rwlock); 2486 goto out; 2487 } 2488 2489 /* 2490 * We could be a clone of a clone. If we reach our 2491 * parent's branch point, we're done. 2492 */ 2493 if (last_snap && 2494 snapds->ds_phys->ds_next_snap_obj != last_snap) { 2495 pa.clone_origin = snapds; 2496 break; 2497 } 2498 2499 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP); 2500 snap->ds = snapds; 2501 list_insert_tail(&pa.snap_list, snap); 2502 last_snap = snap_obj; 2503 snap_obj = snap->ds->ds_phys->ds_prev_snap_obj; 2504 } 2505 snap = list_head(&pa.snap_list); 2506 ASSERT(snap != NULL); 2507 err = dsl_dataset_hold_obj(dp, 2508 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &pa.old_head); 2509 rw_exit(&dp->dp_config_rwlock); 2510 2511 if (err) 2512 goto out; 2513 2514 /* 2515 * Add in 128x the snapnames zapobj size, since we will be moving 2516 * a bunch of snapnames to the promoted ds, and dirtying their 2517 * bonus buffers. 2518 */ 2519 err = dsl_sync_task_do(dp, dsl_dataset_promote_check, 2520 dsl_dataset_promote_sync, ds, &pa, 2 + 2 * doi.doi_physical_blks); 2521 2522 dsl_dataset_rele(pa.old_head, FTAG); 2523 out: 2524 while ((snap = list_tail(&pa.snap_list)) != NULL) { 2525 list_remove(&pa.snap_list, snap); 2526 dsl_dataset_disown(snap->ds, FTAG); 2527 kmem_free(snap, sizeof (struct promotenode)); 2528 } 2529 list_destroy(&pa.snap_list); 2530 if (pa.clone_origin) 2531 dsl_dataset_disown(pa.clone_origin, FTAG); 2532 dsl_dataset_rele(ds, FTAG); 2533 return (err); 2534 } 2535 2536 struct cloneswaparg { 2537 dsl_dataset_t *cds; /* clone dataset */ 2538 dsl_dataset_t *ohds; /* origin's head dataset */ 2539 boolean_t force; 2540 int64_t unused_refres_delta; /* change in unconsumed refreservation */ 2541 }; 2542 2543 /* ARGSUSED */ 2544 static int 2545 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx) 2546 { 2547 struct cloneswaparg *csa = arg1; 2548 2549 /* they should both be heads */ 2550 if (dsl_dataset_is_snapshot(csa->cds) || 2551 dsl_dataset_is_snapshot(csa->ohds)) 2552 return (EINVAL); 2553 2554 /* the branch point should be just before them */ 2555 if (csa->cds->ds_prev != csa->ohds->ds_prev) 2556 return (EINVAL); 2557 2558 /* cds should be the clone */ 2559 if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj != 2560 csa->ohds->ds_object) 2561 return (EINVAL); 2562 2563 /* the clone should be a child of the origin */ 2564 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir) 2565 return (EINVAL); 2566 2567 /* ohds shouldn't be modified unless 'force' */ 2568 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds)) 2569 return (ETXTBSY); 2570 2571 /* adjust amount of any unconsumed refreservation */ 2572 csa->unused_refres_delta = 2573 (int64_t)MIN(csa->ohds->ds_reserved, 2574 csa->ohds->ds_phys->ds_unique_bytes) - 2575 (int64_t)MIN(csa->ohds->ds_reserved, 2576 csa->cds->ds_phys->ds_unique_bytes); 2577 2578 if (csa->unused_refres_delta > 0 && 2579 csa->unused_refres_delta > 2580 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE)) 2581 return (ENOSPC); 2582 2583 return (0); 2584 } 2585 2586 /* ARGSUSED */ 2587 static void 2588 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 2589 { 2590 struct cloneswaparg *csa = arg1; 2591 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool; 2592 uint64_t itor = 0; 2593 blkptr_t bp; 2594 uint64_t unique = 0; 2595 int err; 2596 2597 ASSERT(csa->cds->ds_reserved == 0); 2598 ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota); 2599 2600 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx); 2601 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx); 2602 dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx); 2603 2604 if (csa->cds->ds_user_ptr != NULL) { 2605 csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr); 2606 csa->cds->ds_user_ptr = NULL; 2607 } 2608 2609 if (csa->ohds->ds_user_ptr != NULL) { 2610 csa->ohds->ds_user_evict_func(csa->ohds, 2611 csa->ohds->ds_user_ptr); 2612 csa->ohds->ds_user_ptr = NULL; 2613 } 2614 2615 /* compute unique space */ 2616 while ((err = bplist_iterate(&csa->cds->ds_deadlist, 2617 &itor, &bp)) == 0) { 2618 if (bp.blk_birth > csa->cds->ds_prev->ds_phys->ds_prev_snap_txg) 2619 unique += bp_get_dasize(dp->dp_spa, &bp); 2620 } 2621 VERIFY(err == ENOENT); 2622 2623 /* reset origin's unique bytes */ 2624 csa->cds->ds_prev->ds_phys->ds_unique_bytes = unique; 2625 2626 /* swap blkptrs */ 2627 { 2628 blkptr_t tmp; 2629 tmp = csa->ohds->ds_phys->ds_bp; 2630 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp; 2631 csa->cds->ds_phys->ds_bp = tmp; 2632 } 2633 2634 /* set dd_*_bytes */ 2635 { 2636 int64_t dused, dcomp, duncomp; 2637 uint64_t cdl_used, cdl_comp, cdl_uncomp; 2638 uint64_t odl_used, odl_comp, odl_uncomp; 2639 2640 VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used, 2641 &cdl_comp, &cdl_uncomp)); 2642 VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used, 2643 &odl_comp, &odl_uncomp)); 2644 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used - 2645 (csa->ohds->ds_phys->ds_used_bytes + odl_used); 2646 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp - 2647 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp); 2648 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes + 2649 cdl_uncomp - 2650 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp); 2651 2652 dsl_dir_diduse_space(csa->ohds->ds_dir, 2653 dused, dcomp, duncomp, tx); 2654 dsl_dir_diduse_space(csa->cds->ds_dir, 2655 -dused, -dcomp, -duncomp, tx); 2656 } 2657 2658 #define SWITCH64(x, y) \ 2659 { \ 2660 uint64_t __tmp = (x); \ 2661 (x) = (y); \ 2662 (y) = __tmp; \ 2663 } 2664 2665 /* swap ds_*_bytes */ 2666 SWITCH64(csa->ohds->ds_phys->ds_used_bytes, 2667 csa->cds->ds_phys->ds_used_bytes); 2668 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes, 2669 csa->cds->ds_phys->ds_compressed_bytes); 2670 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes, 2671 csa->cds->ds_phys->ds_uncompressed_bytes); 2672 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes, 2673 csa->cds->ds_phys->ds_unique_bytes); 2674 2675 /* apply any parent delta for change in unconsumed refreservation */ 2676 dsl_dir_diduse_space(csa->ohds->ds_dir, csa->unused_refres_delta, 2677 0, 0, tx); 2678 2679 /* swap deadlists */ 2680 bplist_close(&csa->cds->ds_deadlist); 2681 bplist_close(&csa->ohds->ds_deadlist); 2682 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj, 2683 csa->cds->ds_phys->ds_deadlist_obj); 2684 VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset, 2685 csa->cds->ds_phys->ds_deadlist_obj)); 2686 VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset, 2687 csa->ohds->ds_phys->ds_deadlist_obj)); 2688 } 2689 2690 /* 2691 * Swap 'clone' with its origin head file system. Used at the end 2692 * of "online recv" to swizzle the file system to the new version. 2693 */ 2694 int 2695 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, 2696 boolean_t force) 2697 { 2698 struct cloneswaparg csa; 2699 int error; 2700 2701 ASSERT(clone->ds_owner); 2702 ASSERT(origin_head->ds_owner); 2703 retry: 2704 /* Need exclusive access for the swap */ 2705 rw_enter(&clone->ds_rwlock, RW_WRITER); 2706 if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) { 2707 rw_exit(&clone->ds_rwlock); 2708 rw_enter(&origin_head->ds_rwlock, RW_WRITER); 2709 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) { 2710 rw_exit(&origin_head->ds_rwlock); 2711 goto retry; 2712 } 2713 } 2714 csa.cds = clone; 2715 csa.ohds = origin_head; 2716 csa.force = force; 2717 error = dsl_sync_task_do(clone->ds_dir->dd_pool, 2718 dsl_dataset_clone_swap_check, 2719 dsl_dataset_clone_swap_sync, &csa, NULL, 9); 2720 return (error); 2721 } 2722 2723 /* 2724 * Given a pool name and a dataset object number in that pool, 2725 * return the name of that dataset. 2726 */ 2727 int 2728 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf) 2729 { 2730 spa_t *spa; 2731 dsl_pool_t *dp; 2732 dsl_dataset_t *ds; 2733 int error; 2734 2735 if ((error = spa_open(pname, &spa, FTAG)) != 0) 2736 return (error); 2737 dp = spa_get_dsl(spa); 2738 rw_enter(&dp->dp_config_rwlock, RW_READER); 2739 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) { 2740 dsl_dataset_name(ds, buf); 2741 dsl_dataset_rele(ds, FTAG); 2742 } 2743 rw_exit(&dp->dp_config_rwlock); 2744 spa_close(spa, FTAG); 2745 2746 return (error); 2747 } 2748 2749 int 2750 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, 2751 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv) 2752 { 2753 int error = 0; 2754 2755 ASSERT3S(asize, >, 0); 2756 2757 /* 2758 * *ref_rsrv is the portion of asize that will come from any 2759 * unconsumed refreservation space. 2760 */ 2761 *ref_rsrv = 0; 2762 2763 mutex_enter(&ds->ds_lock); 2764 /* 2765 * Make a space adjustment for reserved bytes. 2766 */ 2767 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) { 2768 ASSERT3U(*used, >=, 2769 ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 2770 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 2771 *ref_rsrv = 2772 asize - MIN(asize, parent_delta(ds, asize + inflight)); 2773 } 2774 2775 if (!check_quota || ds->ds_quota == 0) { 2776 mutex_exit(&ds->ds_lock); 2777 return (0); 2778 } 2779 /* 2780 * If they are requesting more space, and our current estimate 2781 * is over quota, they get to try again unless the actual 2782 * on-disk is over quota and there are no pending changes (which 2783 * may free up space for us). 2784 */ 2785 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) { 2786 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota) 2787 error = ERESTART; 2788 else 2789 error = EDQUOT; 2790 } 2791 mutex_exit(&ds->ds_lock); 2792 2793 return (error); 2794 } 2795 2796 /* ARGSUSED */ 2797 static int 2798 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) 2799 { 2800 dsl_dataset_t *ds = arg1; 2801 uint64_t *quotap = arg2; 2802 uint64_t new_quota = *quotap; 2803 2804 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA) 2805 return (ENOTSUP); 2806 2807 if (new_quota == 0) 2808 return (0); 2809 2810 if (new_quota < ds->ds_phys->ds_used_bytes || 2811 new_quota < ds->ds_reserved) 2812 return (ENOSPC); 2813 2814 return (0); 2815 } 2816 2817 /* ARGSUSED */ 2818 void 2819 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 2820 { 2821 dsl_dataset_t *ds = arg1; 2822 uint64_t *quotap = arg2; 2823 uint64_t new_quota = *quotap; 2824 2825 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2826 2827 ds->ds_quota = new_quota; 2828 2829 dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx); 2830 2831 spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa, 2832 tx, cr, "%lld dataset = %llu ", 2833 (longlong_t)new_quota, ds->ds_object); 2834 } 2835 2836 int 2837 dsl_dataset_set_quota(const char *dsname, uint64_t quota) 2838 { 2839 dsl_dataset_t *ds; 2840 int err; 2841 2842 err = dsl_dataset_hold(dsname, FTAG, &ds); 2843 if (err) 2844 return (err); 2845 2846 if (quota != ds->ds_quota) { 2847 /* 2848 * If someone removes a file, then tries to set the quota, we 2849 * want to make sure the file freeing takes effect. 2850 */ 2851 txg_wait_open(ds->ds_dir->dd_pool, 0); 2852 2853 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2854 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync, 2855 ds, "a, 0); 2856 } 2857 dsl_dataset_rele(ds, FTAG); 2858 return (err); 2859 } 2860 2861 static int 2862 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) 2863 { 2864 dsl_dataset_t *ds = arg1; 2865 uint64_t *reservationp = arg2; 2866 uint64_t new_reservation = *reservationp; 2867 int64_t delta; 2868 uint64_t unique; 2869 2870 if (new_reservation > INT64_MAX) 2871 return (EOVERFLOW); 2872 2873 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < 2874 SPA_VERSION_REFRESERVATION) 2875 return (ENOTSUP); 2876 2877 if (dsl_dataset_is_snapshot(ds)) 2878 return (EINVAL); 2879 2880 /* 2881 * If we are doing the preliminary check in open context, the 2882 * space estimates may be inaccurate. 2883 */ 2884 if (!dmu_tx_is_syncing(tx)) 2885 return (0); 2886 2887 mutex_enter(&ds->ds_lock); 2888 unique = dsl_dataset_unique(ds); 2889 delta = MAX(unique, new_reservation) - MAX(unique, ds->ds_reserved); 2890 mutex_exit(&ds->ds_lock); 2891 2892 if (delta > 0 && 2893 delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) 2894 return (ENOSPC); 2895 if (delta > 0 && ds->ds_quota > 0 && 2896 new_reservation > ds->ds_quota) 2897 return (ENOSPC); 2898 2899 return (0); 2900 } 2901 2902 /* ARGSUSED */ 2903 static void 2904 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, 2905 dmu_tx_t *tx) 2906 { 2907 dsl_dataset_t *ds = arg1; 2908 uint64_t *reservationp = arg2; 2909 uint64_t new_reservation = *reservationp; 2910 uint64_t unique; 2911 int64_t delta; 2912 2913 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2914 2915 mutex_enter(&ds->ds_lock); 2916 unique = dsl_dataset_unique(ds); 2917 delta = MAX(0, (int64_t)(new_reservation - unique)) - 2918 MAX(0, (int64_t)(ds->ds_reserved - unique)); 2919 ds->ds_reserved = new_reservation; 2920 mutex_exit(&ds->ds_lock); 2921 2922 dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation", 2923 new_reservation, cr, tx); 2924 2925 dsl_dir_diduse_space(ds->ds_dir, delta, 0, 0, tx); 2926 2927 spa_history_internal_log(LOG_DS_REFRESERV, 2928 ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu", 2929 (longlong_t)new_reservation, 2930 ds->ds_dir->dd_phys->dd_head_dataset_obj); 2931 } 2932 2933 int 2934 dsl_dataset_set_reservation(const char *dsname, uint64_t reservation) 2935 { 2936 dsl_dataset_t *ds; 2937 int err; 2938 2939 err = dsl_dataset_hold(dsname, FTAG, &ds); 2940 if (err) 2941 return (err); 2942 2943 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2944 dsl_dataset_set_reservation_check, 2945 dsl_dataset_set_reservation_sync, ds, &reservation, 0); 2946 dsl_dataset_rele(ds, FTAG); 2947 return (err); 2948 } 2949