1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #include <sys/dmu_objset.h> 26 #include <sys/dsl_dataset.h> 27 #include <sys/dsl_dir.h> 28 #include <sys/dsl_prop.h> 29 #include <sys/dsl_synctask.h> 30 #include <sys/dmu_traverse.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/arc.h> 33 #include <sys/zio.h> 34 #include <sys/zap.h> 35 #include <sys/unique.h> 36 #include <sys/zfs_context.h> 37 #include <sys/zfs_ioctl.h> 38 #include <sys/spa.h> 39 #include <sys/zfs_znode.h> 40 #include <sys/zfs_onexit.h> 41 #include <sys/zvol.h> 42 #include <sys/dsl_scan.h> 43 #include <sys/dsl_deadlist.h> 44 45 static char *dsl_reaper = "the grim reaper"; 46 47 static dsl_checkfunc_t dsl_dataset_destroy_begin_check; 48 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync; 49 static dsl_syncfunc_t dsl_dataset_set_reservation_sync; 50 51 #define SWITCH64(x, y) \ 52 { \ 53 uint64_t __tmp = (x); \ 54 (x) = (y); \ 55 (y) = __tmp; \ 56 } 57 58 #define DS_REF_MAX (1ULL << 62) 59 60 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE 61 62 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper) 63 64 65 /* 66 * Figure out how much of this delta should be propogated to the dsl_dir 67 * layer. If there's a refreservation, that space has already been 68 * partially accounted for in our ancestors. 69 */ 70 static int64_t 71 parent_delta(dsl_dataset_t *ds, int64_t delta) 72 { 73 uint64_t old_bytes, new_bytes; 74 75 if (ds->ds_reserved == 0) 76 return (delta); 77 78 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 79 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved); 80 81 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta)); 82 return (new_bytes - old_bytes); 83 } 84 85 void 86 dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) 87 { 88 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); 89 int compressed = BP_GET_PSIZE(bp); 90 int uncompressed = BP_GET_UCSIZE(bp); 91 int64_t delta; 92 93 dprintf_bp(bp, "ds=%p", ds); 94 95 ASSERT(dmu_tx_is_syncing(tx)); 96 /* It could have been compressed away to nothing */ 97 if (BP_IS_HOLE(bp)) 98 return; 99 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE); 100 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES); 101 if (ds == NULL) { 102 /* 103 * Account for the meta-objset space in its placeholder 104 * dsl_dir. 105 */ 106 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */ 107 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, 108 used, compressed, uncompressed, tx); 109 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 110 return; 111 } 112 dmu_buf_will_dirty(ds->ds_dbuf, tx); 113 114 mutex_enter(&ds->ds_dir->dd_lock); 115 mutex_enter(&ds->ds_lock); 116 delta = parent_delta(ds, used); 117 ds->ds_phys->ds_used_bytes += used; 118 ds->ds_phys->ds_compressed_bytes += compressed; 119 ds->ds_phys->ds_uncompressed_bytes += uncompressed; 120 ds->ds_phys->ds_unique_bytes += used; 121 mutex_exit(&ds->ds_lock); 122 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta, 123 compressed, uncompressed, tx); 124 dsl_dir_transfer_space(ds->ds_dir, used - delta, 125 DD_USED_REFRSRV, DD_USED_HEAD, tx); 126 mutex_exit(&ds->ds_dir->dd_lock); 127 } 128 129 int 130 dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, 131 boolean_t async) 132 { 133 if (BP_IS_HOLE(bp)) 134 return (0); 135 136 ASSERT(dmu_tx_is_syncing(tx)); 137 ASSERT(bp->blk_birth <= tx->tx_txg); 138 139 int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); 140 int compressed = BP_GET_PSIZE(bp); 141 int uncompressed = BP_GET_UCSIZE(bp); 142 143 ASSERT(used > 0); 144 if (ds == NULL) { 145 /* 146 * Account for the meta-objset space in its placeholder 147 * dataset. 148 */ 149 dsl_free(tx->tx_pool, tx->tx_txg, bp); 150 151 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, DD_USED_HEAD, 152 -used, -compressed, -uncompressed, tx); 153 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 154 return (used); 155 } 156 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool); 157 158 ASSERT(!dsl_dataset_is_snapshot(ds)); 159 dmu_buf_will_dirty(ds->ds_dbuf, tx); 160 161 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) { 162 int64_t delta; 163 164 dprintf_bp(bp, "freeing ds=%llu", ds->ds_object); 165 dsl_free(tx->tx_pool, tx->tx_txg, bp); 166 167 mutex_enter(&ds->ds_dir->dd_lock); 168 mutex_enter(&ds->ds_lock); 169 ASSERT(ds->ds_phys->ds_unique_bytes >= used || 170 !DS_UNIQUE_IS_ACCURATE(ds)); 171 delta = parent_delta(ds, -used); 172 ds->ds_phys->ds_unique_bytes -= used; 173 mutex_exit(&ds->ds_lock); 174 dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, 175 delta, -compressed, -uncompressed, tx); 176 dsl_dir_transfer_space(ds->ds_dir, -used - delta, 177 DD_USED_REFRSRV, DD_USED_HEAD, tx); 178 mutex_exit(&ds->ds_dir->dd_lock); 179 } else { 180 dprintf_bp(bp, "putting on dead list: %s", ""); 181 if (async) { 182 /* 183 * We are here as part of zio's write done callback, 184 * which means we're a zio interrupt thread. We can't 185 * call dsl_deadlist_insert() now because it may block 186 * waiting for I/O. Instead, put bp on the deferred 187 * queue and let dsl_pool_sync() finish the job. 188 */ 189 bplist_append(&ds->ds_pending_deadlist, bp); 190 } else { 191 dsl_deadlist_insert(&ds->ds_deadlist, bp, tx); 192 } 193 ASSERT3U(ds->ds_prev->ds_object, ==, 194 ds->ds_phys->ds_prev_snap_obj); 195 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0); 196 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */ 197 if (ds->ds_prev->ds_phys->ds_next_snap_obj == 198 ds->ds_object && bp->blk_birth > 199 ds->ds_prev->ds_phys->ds_prev_snap_txg) { 200 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 201 mutex_enter(&ds->ds_prev->ds_lock); 202 ds->ds_prev->ds_phys->ds_unique_bytes += used; 203 mutex_exit(&ds->ds_prev->ds_lock); 204 } 205 if (bp->blk_birth > ds->ds_dir->dd_origin_txg) { 206 dsl_dir_transfer_space(ds->ds_dir, used, 207 DD_USED_HEAD, DD_USED_SNAP, tx); 208 } 209 } 210 mutex_enter(&ds->ds_lock); 211 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used); 212 ds->ds_phys->ds_used_bytes -= used; 213 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed); 214 ds->ds_phys->ds_compressed_bytes -= compressed; 215 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed); 216 ds->ds_phys->ds_uncompressed_bytes -= uncompressed; 217 mutex_exit(&ds->ds_lock); 218 219 return (used); 220 } 221 222 uint64_t 223 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds) 224 { 225 uint64_t trysnap = 0; 226 227 if (ds == NULL) 228 return (0); 229 /* 230 * The snapshot creation could fail, but that would cause an 231 * incorrect FALSE return, which would only result in an 232 * overestimation of the amount of space that an operation would 233 * consume, which is OK. 234 * 235 * There's also a small window where we could miss a pending 236 * snapshot, because we could set the sync task in the quiescing 237 * phase. So this should only be used as a guess. 238 */ 239 if (ds->ds_trysnap_txg > 240 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa)) 241 trysnap = ds->ds_trysnap_txg; 242 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap)); 243 } 244 245 boolean_t 246 dsl_dataset_block_freeable(dsl_dataset_t *ds, const blkptr_t *bp, 247 uint64_t blk_birth) 248 { 249 if (blk_birth <= dsl_dataset_prev_snap_txg(ds)) 250 return (B_FALSE); 251 252 ddt_prefetch(dsl_dataset_get_spa(ds), bp); 253 254 return (B_TRUE); 255 } 256 257 /* ARGSUSED */ 258 static void 259 dsl_dataset_evict(dmu_buf_t *db, void *dsv) 260 { 261 dsl_dataset_t *ds = dsv; 262 263 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds)); 264 265 unique_remove(ds->ds_fsid_guid); 266 267 if (ds->ds_objset != NULL) 268 dmu_objset_evict(ds->ds_objset); 269 270 if (ds->ds_prev) { 271 dsl_dataset_drop_ref(ds->ds_prev, ds); 272 ds->ds_prev = NULL; 273 } 274 275 bplist_destroy(&ds->ds_pending_deadlist); 276 if (db != NULL) { 277 dsl_deadlist_close(&ds->ds_deadlist); 278 } else { 279 ASSERT(ds->ds_deadlist.dl_dbuf == NULL); 280 ASSERT(!ds->ds_deadlist.dl_oldfmt); 281 } 282 if (ds->ds_dir) 283 dsl_dir_close(ds->ds_dir, ds); 284 285 ASSERT(!list_link_active(&ds->ds_synced_link)); 286 287 mutex_destroy(&ds->ds_lock); 288 mutex_destroy(&ds->ds_recvlock); 289 mutex_destroy(&ds->ds_opening_lock); 290 rw_destroy(&ds->ds_rwlock); 291 cv_destroy(&ds->ds_exclusive_cv); 292 293 kmem_free(ds, sizeof (dsl_dataset_t)); 294 } 295 296 static int 297 dsl_dataset_get_snapname(dsl_dataset_t *ds) 298 { 299 dsl_dataset_phys_t *headphys; 300 int err; 301 dmu_buf_t *headdbuf; 302 dsl_pool_t *dp = ds->ds_dir->dd_pool; 303 objset_t *mos = dp->dp_meta_objset; 304 305 if (ds->ds_snapname[0]) 306 return (0); 307 if (ds->ds_phys->ds_next_snap_obj == 0) 308 return (0); 309 310 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj, 311 FTAG, &headdbuf); 312 if (err) 313 return (err); 314 headphys = headdbuf->db_data; 315 err = zap_value_search(dp->dp_meta_objset, 316 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname); 317 dmu_buf_rele(headdbuf, FTAG); 318 return (err); 319 } 320 321 static int 322 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value) 323 { 324 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 325 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 326 matchtype_t mt; 327 int err; 328 329 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 330 mt = MT_FIRST; 331 else 332 mt = MT_EXACT; 333 334 err = zap_lookup_norm(mos, snapobj, name, 8, 1, 335 value, mt, NULL, 0, NULL); 336 if (err == ENOTSUP && mt == MT_FIRST) 337 err = zap_lookup(mos, snapobj, name, 8, 1, value); 338 return (err); 339 } 340 341 static int 342 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx) 343 { 344 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 345 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 346 matchtype_t mt; 347 int err; 348 349 dsl_dir_snap_cmtime_update(ds->ds_dir); 350 351 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 352 mt = MT_FIRST; 353 else 354 mt = MT_EXACT; 355 356 err = zap_remove_norm(mos, snapobj, name, mt, tx); 357 if (err == ENOTSUP && mt == MT_FIRST) 358 err = zap_remove(mos, snapobj, name, tx); 359 return (err); 360 } 361 362 static int 363 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag, 364 dsl_dataset_t **dsp) 365 { 366 objset_t *mos = dp->dp_meta_objset; 367 dmu_buf_t *dbuf; 368 dsl_dataset_t *ds; 369 int err; 370 dmu_object_info_t doi; 371 372 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 373 dsl_pool_sync_context(dp)); 374 375 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf); 376 if (err) 377 return (err); 378 379 /* Make sure dsobj has the correct object type. */ 380 dmu_object_info_from_db(dbuf, &doi); 381 if (doi.doi_type != DMU_OT_DSL_DATASET) 382 return (EINVAL); 383 384 ds = dmu_buf_get_user(dbuf); 385 if (ds == NULL) { 386 dsl_dataset_t *winner; 387 388 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP); 389 ds->ds_dbuf = dbuf; 390 ds->ds_object = dsobj; 391 ds->ds_phys = dbuf->db_data; 392 393 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL); 394 mutex_init(&ds->ds_recvlock, NULL, MUTEX_DEFAULT, NULL); 395 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL); 396 rw_init(&ds->ds_rwlock, 0, 0, 0); 397 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL); 398 399 bplist_create(&ds->ds_pending_deadlist); 400 dsl_deadlist_open(&ds->ds_deadlist, 401 mos, ds->ds_phys->ds_deadlist_obj); 402 403 if (err == 0) { 404 err = dsl_dir_open_obj(dp, 405 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir); 406 } 407 if (err) { 408 mutex_destroy(&ds->ds_lock); 409 mutex_destroy(&ds->ds_recvlock); 410 mutex_destroy(&ds->ds_opening_lock); 411 rw_destroy(&ds->ds_rwlock); 412 cv_destroy(&ds->ds_exclusive_cv); 413 bplist_destroy(&ds->ds_pending_deadlist); 414 dsl_deadlist_close(&ds->ds_deadlist); 415 kmem_free(ds, sizeof (dsl_dataset_t)); 416 dmu_buf_rele(dbuf, tag); 417 return (err); 418 } 419 420 if (!dsl_dataset_is_snapshot(ds)) { 421 ds->ds_snapname[0] = '\0'; 422 if (ds->ds_phys->ds_prev_snap_obj) { 423 err = dsl_dataset_get_ref(dp, 424 ds->ds_phys->ds_prev_snap_obj, 425 ds, &ds->ds_prev); 426 } 427 } else { 428 if (zfs_flags & ZFS_DEBUG_SNAPNAMES) 429 err = dsl_dataset_get_snapname(ds); 430 if (err == 0 && ds->ds_phys->ds_userrefs_obj != 0) { 431 err = zap_count( 432 ds->ds_dir->dd_pool->dp_meta_objset, 433 ds->ds_phys->ds_userrefs_obj, 434 &ds->ds_userrefs); 435 } 436 } 437 438 if (err == 0 && !dsl_dataset_is_snapshot(ds)) { 439 /* 440 * In sync context, we're called with either no lock 441 * or with the write lock. If we're not syncing, 442 * we're always called with the read lock held. 443 */ 444 boolean_t need_lock = 445 !RW_WRITE_HELD(&dp->dp_config_rwlock) && 446 dsl_pool_sync_context(dp); 447 448 if (need_lock) 449 rw_enter(&dp->dp_config_rwlock, RW_READER); 450 451 err = dsl_prop_get_ds(ds, 452 "refreservation", sizeof (uint64_t), 1, 453 &ds->ds_reserved, NULL); 454 if (err == 0) { 455 err = dsl_prop_get_ds(ds, 456 "refquota", sizeof (uint64_t), 1, 457 &ds->ds_quota, NULL); 458 } 459 460 if (need_lock) 461 rw_exit(&dp->dp_config_rwlock); 462 } else { 463 ds->ds_reserved = ds->ds_quota = 0; 464 } 465 466 if (err == 0) { 467 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys, 468 dsl_dataset_evict); 469 } 470 if (err || winner) { 471 bplist_destroy(&ds->ds_pending_deadlist); 472 dsl_deadlist_close(&ds->ds_deadlist); 473 if (ds->ds_prev) 474 dsl_dataset_drop_ref(ds->ds_prev, ds); 475 dsl_dir_close(ds->ds_dir, ds); 476 mutex_destroy(&ds->ds_lock); 477 mutex_destroy(&ds->ds_recvlock); 478 mutex_destroy(&ds->ds_opening_lock); 479 rw_destroy(&ds->ds_rwlock); 480 cv_destroy(&ds->ds_exclusive_cv); 481 kmem_free(ds, sizeof (dsl_dataset_t)); 482 if (err) { 483 dmu_buf_rele(dbuf, tag); 484 return (err); 485 } 486 ds = winner; 487 } else { 488 ds->ds_fsid_guid = 489 unique_insert(ds->ds_phys->ds_fsid_guid); 490 } 491 } 492 ASSERT3P(ds->ds_dbuf, ==, dbuf); 493 ASSERT3P(ds->ds_phys, ==, dbuf->db_data); 494 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 || 495 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN || 496 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap); 497 mutex_enter(&ds->ds_lock); 498 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) { 499 mutex_exit(&ds->ds_lock); 500 dmu_buf_rele(ds->ds_dbuf, tag); 501 return (ENOENT); 502 } 503 mutex_exit(&ds->ds_lock); 504 *dsp = ds; 505 return (0); 506 } 507 508 static int 509 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag) 510 { 511 dsl_pool_t *dp = ds->ds_dir->dd_pool; 512 513 /* 514 * In syncing context we don't want the rwlock lock: there 515 * may be an existing writer waiting for sync phase to 516 * finish. We don't need to worry about such writers, since 517 * sync phase is single-threaded, so the writer can't be 518 * doing anything while we are active. 519 */ 520 if (dsl_pool_sync_context(dp)) { 521 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 522 return (0); 523 } 524 525 /* 526 * Normal users will hold the ds_rwlock as a READER until they 527 * are finished (i.e., call dsl_dataset_rele()). "Owners" will 528 * drop their READER lock after they set the ds_owner field. 529 * 530 * If the dataset is being destroyed, the destroy thread will 531 * obtain a WRITER lock for exclusive access after it's done its 532 * open-context work and then change the ds_owner to 533 * dsl_reaper once destruction is assured. So threads 534 * may block here temporarily, until the "destructability" of 535 * the dataset is determined. 536 */ 537 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock)); 538 mutex_enter(&ds->ds_lock); 539 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) { 540 rw_exit(&dp->dp_config_rwlock); 541 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock); 542 if (DSL_DATASET_IS_DESTROYED(ds)) { 543 mutex_exit(&ds->ds_lock); 544 dsl_dataset_drop_ref(ds, tag); 545 rw_enter(&dp->dp_config_rwlock, RW_READER); 546 return (ENOENT); 547 } 548 /* 549 * The dp_config_rwlock lives above the ds_lock. And 550 * we need to check DSL_DATASET_IS_DESTROYED() while 551 * holding the ds_lock, so we have to drop and reacquire 552 * the ds_lock here. 553 */ 554 mutex_exit(&ds->ds_lock); 555 rw_enter(&dp->dp_config_rwlock, RW_READER); 556 mutex_enter(&ds->ds_lock); 557 } 558 mutex_exit(&ds->ds_lock); 559 return (0); 560 } 561 562 int 563 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, 564 dsl_dataset_t **dsp) 565 { 566 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp); 567 568 if (err) 569 return (err); 570 return (dsl_dataset_hold_ref(*dsp, tag)); 571 } 572 573 int 574 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, boolean_t inconsistentok, 575 void *tag, dsl_dataset_t **dsp) 576 { 577 int err = dsl_dataset_hold_obj(dp, dsobj, tag, dsp); 578 if (err) 579 return (err); 580 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) { 581 dsl_dataset_rele(*dsp, tag); 582 *dsp = NULL; 583 return (EBUSY); 584 } 585 return (0); 586 } 587 588 int 589 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp) 590 { 591 dsl_dir_t *dd; 592 dsl_pool_t *dp; 593 const char *snapname; 594 uint64_t obj; 595 int err = 0; 596 597 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname); 598 if (err) 599 return (err); 600 601 dp = dd->dd_pool; 602 obj = dd->dd_phys->dd_head_dataset_obj; 603 rw_enter(&dp->dp_config_rwlock, RW_READER); 604 if (obj) 605 err = dsl_dataset_get_ref(dp, obj, tag, dsp); 606 else 607 err = ENOENT; 608 if (err) 609 goto out; 610 611 err = dsl_dataset_hold_ref(*dsp, tag); 612 613 /* we may be looking for a snapshot */ 614 if (err == 0 && snapname != NULL) { 615 dsl_dataset_t *ds = NULL; 616 617 if (*snapname++ != '@') { 618 dsl_dataset_rele(*dsp, tag); 619 err = ENOENT; 620 goto out; 621 } 622 623 dprintf("looking for snapshot '%s'\n", snapname); 624 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj); 625 if (err == 0) 626 err = dsl_dataset_get_ref(dp, obj, tag, &ds); 627 dsl_dataset_rele(*dsp, tag); 628 629 ASSERT3U((err == 0), ==, (ds != NULL)); 630 631 if (ds) { 632 mutex_enter(&ds->ds_lock); 633 if (ds->ds_snapname[0] == 0) 634 (void) strlcpy(ds->ds_snapname, snapname, 635 sizeof (ds->ds_snapname)); 636 mutex_exit(&ds->ds_lock); 637 err = dsl_dataset_hold_ref(ds, tag); 638 *dsp = err ? NULL : ds; 639 } 640 } 641 out: 642 rw_exit(&dp->dp_config_rwlock); 643 dsl_dir_close(dd, FTAG); 644 return (err); 645 } 646 647 int 648 dsl_dataset_own(const char *name, boolean_t inconsistentok, 649 void *tag, dsl_dataset_t **dsp) 650 { 651 int err = dsl_dataset_hold(name, tag, dsp); 652 if (err) 653 return (err); 654 if (!dsl_dataset_tryown(*dsp, inconsistentok, tag)) { 655 dsl_dataset_rele(*dsp, tag); 656 return (EBUSY); 657 } 658 return (0); 659 } 660 661 void 662 dsl_dataset_name(dsl_dataset_t *ds, char *name) 663 { 664 if (ds == NULL) { 665 (void) strcpy(name, "mos"); 666 } else { 667 dsl_dir_name(ds->ds_dir, name); 668 VERIFY(0 == dsl_dataset_get_snapname(ds)); 669 if (ds->ds_snapname[0]) { 670 (void) strcat(name, "@"); 671 /* 672 * We use a "recursive" mutex so that we 673 * can call dprintf_ds() with ds_lock held. 674 */ 675 if (!MUTEX_HELD(&ds->ds_lock)) { 676 mutex_enter(&ds->ds_lock); 677 (void) strcat(name, ds->ds_snapname); 678 mutex_exit(&ds->ds_lock); 679 } else { 680 (void) strcat(name, ds->ds_snapname); 681 } 682 } 683 } 684 } 685 686 static int 687 dsl_dataset_namelen(dsl_dataset_t *ds) 688 { 689 int result; 690 691 if (ds == NULL) { 692 result = 3; /* "mos" */ 693 } else { 694 result = dsl_dir_namelen(ds->ds_dir); 695 VERIFY(0 == dsl_dataset_get_snapname(ds)); 696 if (ds->ds_snapname[0]) { 697 ++result; /* adding one for the @-sign */ 698 if (!MUTEX_HELD(&ds->ds_lock)) { 699 mutex_enter(&ds->ds_lock); 700 result += strlen(ds->ds_snapname); 701 mutex_exit(&ds->ds_lock); 702 } else { 703 result += strlen(ds->ds_snapname); 704 } 705 } 706 } 707 708 return (result); 709 } 710 711 void 712 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag) 713 { 714 dmu_buf_rele(ds->ds_dbuf, tag); 715 } 716 717 void 718 dsl_dataset_rele(dsl_dataset_t *ds, void *tag) 719 { 720 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) { 721 rw_exit(&ds->ds_rwlock); 722 } 723 dsl_dataset_drop_ref(ds, tag); 724 } 725 726 void 727 dsl_dataset_disown(dsl_dataset_t *ds, void *tag) 728 { 729 ASSERT((ds->ds_owner == tag && ds->ds_dbuf) || 730 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL)); 731 732 mutex_enter(&ds->ds_lock); 733 ds->ds_owner = NULL; 734 if (RW_WRITE_HELD(&ds->ds_rwlock)) { 735 rw_exit(&ds->ds_rwlock); 736 cv_broadcast(&ds->ds_exclusive_cv); 737 } 738 mutex_exit(&ds->ds_lock); 739 if (ds->ds_dbuf) 740 dsl_dataset_drop_ref(ds, tag); 741 else 742 dsl_dataset_evict(NULL, ds); 743 } 744 745 boolean_t 746 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *tag) 747 { 748 boolean_t gotit = FALSE; 749 750 mutex_enter(&ds->ds_lock); 751 if (ds->ds_owner == NULL && 752 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) { 753 ds->ds_owner = tag; 754 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) 755 rw_exit(&ds->ds_rwlock); 756 gotit = TRUE; 757 } 758 mutex_exit(&ds->ds_lock); 759 return (gotit); 760 } 761 762 void 763 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner) 764 { 765 ASSERT3P(owner, ==, ds->ds_owner); 766 if (!RW_WRITE_HELD(&ds->ds_rwlock)) 767 rw_enter(&ds->ds_rwlock, RW_WRITER); 768 } 769 770 uint64_t 771 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, 772 uint64_t flags, dmu_tx_t *tx) 773 { 774 dsl_pool_t *dp = dd->dd_pool; 775 dmu_buf_t *dbuf; 776 dsl_dataset_phys_t *dsphys; 777 uint64_t dsobj; 778 objset_t *mos = dp->dp_meta_objset; 779 780 if (origin == NULL) 781 origin = dp->dp_origin_snap; 782 783 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp); 784 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0); 785 ASSERT(dmu_tx_is_syncing(tx)); 786 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); 787 788 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 789 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 790 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 791 dmu_buf_will_dirty(dbuf, tx); 792 dsphys = dbuf->db_data; 793 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 794 dsphys->ds_dir_obj = dd->dd_object; 795 dsphys->ds_flags = flags; 796 dsphys->ds_fsid_guid = unique_create(); 797 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 798 sizeof (dsphys->ds_guid)); 799 dsphys->ds_snapnames_zapobj = 800 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP, 801 DMU_OT_NONE, 0, tx); 802 dsphys->ds_creation_time = gethrestime_sec(); 803 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg; 804 805 if (origin == NULL) { 806 dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx); 807 } else { 808 dsl_dataset_t *ohds; 809 810 dsphys->ds_prev_snap_obj = origin->ds_object; 811 dsphys->ds_prev_snap_txg = 812 origin->ds_phys->ds_creation_txg; 813 dsphys->ds_used_bytes = 814 origin->ds_phys->ds_used_bytes; 815 dsphys->ds_compressed_bytes = 816 origin->ds_phys->ds_compressed_bytes; 817 dsphys->ds_uncompressed_bytes = 818 origin->ds_phys->ds_uncompressed_bytes; 819 dsphys->ds_bp = origin->ds_phys->ds_bp; 820 dsphys->ds_flags |= origin->ds_phys->ds_flags; 821 822 dmu_buf_will_dirty(origin->ds_dbuf, tx); 823 origin->ds_phys->ds_num_children++; 824 825 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, 826 origin->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ohds)); 827 dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist, 828 dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx); 829 dsl_dataset_rele(ohds, FTAG); 830 831 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) { 832 if (origin->ds_phys->ds_next_clones_obj == 0) { 833 origin->ds_phys->ds_next_clones_obj = 834 zap_create(mos, 835 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 836 } 837 VERIFY(0 == zap_add_int(mos, 838 origin->ds_phys->ds_next_clones_obj, 839 dsobj, tx)); 840 } 841 842 dmu_buf_will_dirty(dd->dd_dbuf, tx); 843 dd->dd_phys->dd_origin_obj = origin->ds_object; 844 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 845 if (origin->ds_dir->dd_phys->dd_clones == 0) { 846 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); 847 origin->ds_dir->dd_phys->dd_clones = 848 zap_create(mos, 849 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); 850 } 851 VERIFY3U(0, ==, zap_add_int(mos, 852 origin->ds_dir->dd_phys->dd_clones, dsobj, tx)); 853 } 854 } 855 856 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 857 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 858 859 dmu_buf_rele(dbuf, FTAG); 860 861 dmu_buf_will_dirty(dd->dd_dbuf, tx); 862 dd->dd_phys->dd_head_dataset_obj = dsobj; 863 864 return (dsobj); 865 } 866 867 uint64_t 868 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname, 869 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx) 870 { 871 dsl_pool_t *dp = pdd->dd_pool; 872 uint64_t dsobj, ddobj; 873 dsl_dir_t *dd; 874 875 ASSERT(lastname[0] != '@'); 876 877 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx); 878 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd)); 879 880 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx); 881 882 dsl_deleg_set_create_perms(dd, tx, cr); 883 884 dsl_dir_close(dd, FTAG); 885 886 /* 887 * If we are creating a clone, make sure we zero out any stale 888 * data from the origin snapshots zil header. 889 */ 890 if (origin != NULL) { 891 dsl_dataset_t *ds; 892 objset_t *os; 893 894 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 895 VERIFY3U(0, ==, dmu_objset_from_ds(ds, &os)); 896 bzero(&os->os_zil_header, sizeof (os->os_zil_header)); 897 dsl_dataset_dirty(ds, tx); 898 dsl_dataset_rele(ds, FTAG); 899 } 900 901 return (dsobj); 902 } 903 904 struct destroyarg { 905 dsl_sync_task_group_t *dstg; 906 char *snapname; 907 char *failed; 908 boolean_t defer; 909 }; 910 911 static int 912 dsl_snapshot_destroy_one(const char *name, void *arg) 913 { 914 struct destroyarg *da = arg; 915 dsl_dataset_t *ds; 916 int err; 917 char *dsname; 918 919 dsname = kmem_asprintf("%s@%s", name, da->snapname); 920 err = dsl_dataset_own(dsname, B_TRUE, da->dstg, &ds); 921 strfree(dsname); 922 if (err == 0) { 923 struct dsl_ds_destroyarg *dsda; 924 925 dsl_dataset_make_exclusive(ds, da->dstg); 926 dsda = kmem_zalloc(sizeof (struct dsl_ds_destroyarg), KM_SLEEP); 927 dsda->ds = ds; 928 dsda->defer = da->defer; 929 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check, 930 dsl_dataset_destroy_sync, dsda, da->dstg, 0); 931 } else if (err == ENOENT) { 932 err = 0; 933 } else { 934 (void) strcpy(da->failed, name); 935 } 936 return (err); 937 } 938 939 /* 940 * Destroy 'snapname' in all descendants of 'fsname'. 941 */ 942 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy 943 int 944 dsl_snapshots_destroy(char *fsname, char *snapname, boolean_t defer) 945 { 946 int err; 947 struct destroyarg da; 948 dsl_sync_task_t *dst; 949 spa_t *spa; 950 951 err = spa_open(fsname, &spa, FTAG); 952 if (err) 953 return (err); 954 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 955 da.snapname = snapname; 956 da.failed = fsname; 957 da.defer = defer; 958 959 err = dmu_objset_find(fsname, 960 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN); 961 962 if (err == 0) 963 err = dsl_sync_task_group_wait(da.dstg); 964 965 for (dst = list_head(&da.dstg->dstg_tasks); dst; 966 dst = list_next(&da.dstg->dstg_tasks, dst)) { 967 struct dsl_ds_destroyarg *dsda = dst->dst_arg1; 968 dsl_dataset_t *ds = dsda->ds; 969 970 /* 971 * Return the file system name that triggered the error 972 */ 973 if (dst->dst_err) { 974 dsl_dataset_name(ds, fsname); 975 *strchr(fsname, '@') = '\0'; 976 } 977 ASSERT3P(dsda->rm_origin, ==, NULL); 978 dsl_dataset_disown(ds, da.dstg); 979 kmem_free(dsda, sizeof (struct dsl_ds_destroyarg)); 980 } 981 982 dsl_sync_task_group_destroy(da.dstg); 983 spa_close(spa, FTAG); 984 return (err); 985 } 986 987 static boolean_t 988 dsl_dataset_might_destroy_origin(dsl_dataset_t *ds) 989 { 990 boolean_t might_destroy = B_FALSE; 991 992 mutex_enter(&ds->ds_lock); 993 if (ds->ds_phys->ds_num_children == 2 && ds->ds_userrefs == 0 && 994 DS_IS_DEFER_DESTROY(ds)) 995 might_destroy = B_TRUE; 996 mutex_exit(&ds->ds_lock); 997 998 return (might_destroy); 999 } 1000 1001 /* 1002 * If we're removing a clone, and these three conditions are true: 1003 * 1) the clone's origin has no other children 1004 * 2) the clone's origin has no user references 1005 * 3) the clone's origin has been marked for deferred destruction 1006 * Then, prepare to remove the origin as part of this sync task group. 1007 */ 1008 static int 1009 dsl_dataset_origin_rm_prep(struct dsl_ds_destroyarg *dsda, void *tag) 1010 { 1011 dsl_dataset_t *ds = dsda->ds; 1012 dsl_dataset_t *origin = ds->ds_prev; 1013 1014 if (dsl_dataset_might_destroy_origin(origin)) { 1015 char *name; 1016 int namelen; 1017 int error; 1018 1019 namelen = dsl_dataset_namelen(origin) + 1; 1020 name = kmem_alloc(namelen, KM_SLEEP); 1021 dsl_dataset_name(origin, name); 1022 #ifdef _KERNEL 1023 error = zfs_unmount_snap(name, NULL); 1024 if (error) { 1025 kmem_free(name, namelen); 1026 return (error); 1027 } 1028 #endif 1029 error = dsl_dataset_own(name, B_TRUE, tag, &origin); 1030 kmem_free(name, namelen); 1031 if (error) 1032 return (error); 1033 dsda->rm_origin = origin; 1034 dsl_dataset_make_exclusive(origin, tag); 1035 } 1036 1037 return (0); 1038 } 1039 1040 /* 1041 * ds must be opened as OWNER. On return (whether successful or not), 1042 * ds will be closed and caller can no longer dereference it. 1043 */ 1044 int 1045 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag, boolean_t defer) 1046 { 1047 int err; 1048 dsl_sync_task_group_t *dstg; 1049 objset_t *os; 1050 dsl_dir_t *dd; 1051 uint64_t obj; 1052 struct dsl_ds_destroyarg dsda = { 0 }; 1053 dsl_dataset_t dummy_ds = { 0 }; 1054 1055 dsda.ds = ds; 1056 1057 if (dsl_dataset_is_snapshot(ds)) { 1058 /* Destroying a snapshot is simpler */ 1059 dsl_dataset_make_exclusive(ds, tag); 1060 1061 dsda.defer = defer; 1062 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 1063 dsl_dataset_destroy_check, dsl_dataset_destroy_sync, 1064 &dsda, tag, 0); 1065 ASSERT3P(dsda.rm_origin, ==, NULL); 1066 goto out; 1067 } else if (defer) { 1068 err = EINVAL; 1069 goto out; 1070 } 1071 1072 dd = ds->ds_dir; 1073 dummy_ds.ds_dir = dd; 1074 dummy_ds.ds_object = ds->ds_object; 1075 1076 /* 1077 * Check for errors and mark this ds as inconsistent, in 1078 * case we crash while freeing the objects. 1079 */ 1080 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check, 1081 dsl_dataset_destroy_begin_sync, ds, NULL, 0); 1082 if (err) 1083 goto out; 1084 1085 err = dmu_objset_from_ds(ds, &os); 1086 if (err) 1087 goto out; 1088 1089 /* 1090 * remove the objects in open context, so that we won't 1091 * have too much to do in syncing context. 1092 */ 1093 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 1094 ds->ds_phys->ds_prev_snap_txg)) { 1095 /* 1096 * Ignore errors, if there is not enough disk space 1097 * we will deal with it in dsl_dataset_destroy_sync(). 1098 */ 1099 (void) dmu_free_object(os, obj); 1100 } 1101 if (err != ESRCH) 1102 goto out; 1103 1104 /* 1105 * Only the ZIL knows how to free log blocks. 1106 */ 1107 zil_destroy(dmu_objset_zil(os), B_FALSE); 1108 1109 /* 1110 * Sync out all in-flight IO. 1111 */ 1112 txg_wait_synced(dd->dd_pool, 0); 1113 1114 /* 1115 * If we managed to free all the objects in open 1116 * context, the user space accounting should be zero. 1117 */ 1118 if (ds->ds_phys->ds_bp.blk_fill == 0 && 1119 dmu_objset_userused_enabled(os)) { 1120 uint64_t count; 1121 1122 ASSERT(zap_count(os, DMU_USERUSED_OBJECT, &count) != 0 || 1123 count == 0); 1124 ASSERT(zap_count(os, DMU_GROUPUSED_OBJECT, &count) != 0 || 1125 count == 0); 1126 } 1127 1128 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 1129 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd); 1130 rw_exit(&dd->dd_pool->dp_config_rwlock); 1131 1132 if (err) 1133 goto out; 1134 1135 /* 1136 * Blow away the dsl_dir + head dataset. 1137 */ 1138 dsl_dataset_make_exclusive(ds, tag); 1139 /* 1140 * If we're removing a clone, we might also need to remove its 1141 * origin. 1142 */ 1143 do { 1144 dsda.need_prep = B_FALSE; 1145 if (dsl_dir_is_clone(dd)) { 1146 err = dsl_dataset_origin_rm_prep(&dsda, tag); 1147 if (err) { 1148 dsl_dir_close(dd, FTAG); 1149 goto out; 1150 } 1151 } 1152 1153 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool); 1154 dsl_sync_task_create(dstg, dsl_dataset_destroy_check, 1155 dsl_dataset_destroy_sync, &dsda, tag, 0); 1156 dsl_sync_task_create(dstg, dsl_dir_destroy_check, 1157 dsl_dir_destroy_sync, &dummy_ds, FTAG, 0); 1158 err = dsl_sync_task_group_wait(dstg); 1159 dsl_sync_task_group_destroy(dstg); 1160 1161 /* 1162 * We could be racing against 'zfs release' or 'zfs destroy -d' 1163 * on the origin snap, in which case we can get EBUSY if we 1164 * needed to destroy the origin snap but were not ready to 1165 * do so. 1166 */ 1167 if (dsda.need_prep) { 1168 ASSERT(err == EBUSY); 1169 ASSERT(dsl_dir_is_clone(dd)); 1170 ASSERT(dsda.rm_origin == NULL); 1171 } 1172 } while (dsda.need_prep); 1173 1174 if (dsda.rm_origin != NULL) 1175 dsl_dataset_disown(dsda.rm_origin, tag); 1176 1177 /* if it is successful, dsl_dir_destroy_sync will close the dd */ 1178 if (err) 1179 dsl_dir_close(dd, FTAG); 1180 out: 1181 dsl_dataset_disown(ds, tag); 1182 return (err); 1183 } 1184 1185 blkptr_t * 1186 dsl_dataset_get_blkptr(dsl_dataset_t *ds) 1187 { 1188 return (&ds->ds_phys->ds_bp); 1189 } 1190 1191 void 1192 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) 1193 { 1194 ASSERT(dmu_tx_is_syncing(tx)); 1195 /* If it's the meta-objset, set dp_meta_rootbp */ 1196 if (ds == NULL) { 1197 tx->tx_pool->dp_meta_rootbp = *bp; 1198 } else { 1199 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1200 ds->ds_phys->ds_bp = *bp; 1201 } 1202 } 1203 1204 spa_t * 1205 dsl_dataset_get_spa(dsl_dataset_t *ds) 1206 { 1207 return (ds->ds_dir->dd_pool->dp_spa); 1208 } 1209 1210 void 1211 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx) 1212 { 1213 dsl_pool_t *dp; 1214 1215 if (ds == NULL) /* this is the meta-objset */ 1216 return; 1217 1218 ASSERT(ds->ds_objset != NULL); 1219 1220 if (ds->ds_phys->ds_next_snap_obj != 0) 1221 panic("dirtying snapshot!"); 1222 1223 dp = ds->ds_dir->dd_pool; 1224 1225 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) { 1226 /* up the hold count until we can be written out */ 1227 dmu_buf_add_ref(ds->ds_dbuf, ds); 1228 } 1229 } 1230 1231 /* 1232 * The unique space in the head dataset can be calculated by subtracting 1233 * the space used in the most recent snapshot, that is still being used 1234 * in this file system, from the space currently in use. To figure out 1235 * the space in the most recent snapshot still in use, we need to take 1236 * the total space used in the snapshot and subtract out the space that 1237 * has been freed up since the snapshot was taken. 1238 */ 1239 static void 1240 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds) 1241 { 1242 uint64_t mrs_used; 1243 uint64_t dlused, dlcomp, dluncomp; 1244 1245 ASSERT(!dsl_dataset_is_snapshot(ds)); 1246 1247 if (ds->ds_phys->ds_prev_snap_obj != 0) 1248 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes; 1249 else 1250 mrs_used = 0; 1251 1252 dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp); 1253 1254 ASSERT3U(dlused, <=, mrs_used); 1255 ds->ds_phys->ds_unique_bytes = 1256 ds->ds_phys->ds_used_bytes - (mrs_used - dlused); 1257 1258 if (spa_version(ds->ds_dir->dd_pool->dp_spa) >= 1259 SPA_VERSION_UNIQUE_ACCURATE) 1260 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 1261 } 1262 1263 struct killarg { 1264 dsl_dataset_t *ds; 1265 dmu_tx_t *tx; 1266 }; 1267 1268 /* ARGSUSED */ 1269 static int 1270 kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, 1271 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 1272 { 1273 struct killarg *ka = arg; 1274 dmu_tx_t *tx = ka->tx; 1275 1276 if (bp == NULL) 1277 return (0); 1278 1279 if (zb->zb_level == ZB_ZIL_LEVEL) { 1280 ASSERT(zilog != NULL); 1281 /* 1282 * It's a block in the intent log. It has no 1283 * accounting, so just free it. 1284 */ 1285 dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp); 1286 } else { 1287 ASSERT(zilog == NULL); 1288 ASSERT3U(bp->blk_birth, >, ka->ds->ds_phys->ds_prev_snap_txg); 1289 (void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE); 1290 } 1291 1292 return (0); 1293 } 1294 1295 /* ARGSUSED */ 1296 static int 1297 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx) 1298 { 1299 dsl_dataset_t *ds = arg1; 1300 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1301 uint64_t count; 1302 int err; 1303 1304 /* 1305 * Can't delete a head dataset if there are snapshots of it. 1306 * (Except if the only snapshots are from the branch we cloned 1307 * from.) 1308 */ 1309 if (ds->ds_prev != NULL && 1310 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1311 return (EBUSY); 1312 1313 /* 1314 * This is really a dsl_dir thing, but check it here so that 1315 * we'll be less likely to leave this dataset inconsistent & 1316 * nearly destroyed. 1317 */ 1318 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count); 1319 if (err) 1320 return (err); 1321 if (count != 0) 1322 return (EEXIST); 1323 1324 return (0); 1325 } 1326 1327 /* ARGSUSED */ 1328 static void 1329 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, dmu_tx_t *tx) 1330 { 1331 dsl_dataset_t *ds = arg1; 1332 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1333 1334 /* Mark it as inconsistent on-disk, in case we crash */ 1335 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1336 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 1337 1338 spa_history_log_internal(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx, 1339 "dataset = %llu", ds->ds_object); 1340 } 1341 1342 static int 1343 dsl_dataset_origin_check(struct dsl_ds_destroyarg *dsda, void *tag, 1344 dmu_tx_t *tx) 1345 { 1346 dsl_dataset_t *ds = dsda->ds; 1347 dsl_dataset_t *ds_prev = ds->ds_prev; 1348 1349 if (dsl_dataset_might_destroy_origin(ds_prev)) { 1350 struct dsl_ds_destroyarg ndsda = {0}; 1351 1352 /* 1353 * If we're not prepared to remove the origin, don't remove 1354 * the clone either. 1355 */ 1356 if (dsda->rm_origin == NULL) { 1357 dsda->need_prep = B_TRUE; 1358 return (EBUSY); 1359 } 1360 1361 ndsda.ds = ds_prev; 1362 ndsda.is_origin_rm = B_TRUE; 1363 return (dsl_dataset_destroy_check(&ndsda, tag, tx)); 1364 } 1365 1366 /* 1367 * If we're not going to remove the origin after all, 1368 * undo the open context setup. 1369 */ 1370 if (dsda->rm_origin != NULL) { 1371 dsl_dataset_disown(dsda->rm_origin, tag); 1372 dsda->rm_origin = NULL; 1373 } 1374 1375 return (0); 1376 } 1377 1378 /* 1379 * If you add new checks here, you may need to add 1380 * additional checks to the "temporary" case in 1381 * snapshot_check() in dmu_objset.c. 1382 */ 1383 /* ARGSUSED */ 1384 int 1385 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) 1386 { 1387 struct dsl_ds_destroyarg *dsda = arg1; 1388 dsl_dataset_t *ds = dsda->ds; 1389 1390 /* we have an owner hold, so noone else can destroy us */ 1391 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 1392 1393 /* 1394 * Only allow deferred destroy on pools that support it. 1395 * NOTE: deferred destroy is only supported on snapshots. 1396 */ 1397 if (dsda->defer) { 1398 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < 1399 SPA_VERSION_USERREFS) 1400 return (ENOTSUP); 1401 ASSERT(dsl_dataset_is_snapshot(ds)); 1402 return (0); 1403 } 1404 1405 /* 1406 * Can't delete a head dataset if there are snapshots of it. 1407 * (Except if the only snapshots are from the branch we cloned 1408 * from.) 1409 */ 1410 if (ds->ds_prev != NULL && 1411 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1412 return (EBUSY); 1413 1414 /* 1415 * If we made changes this txg, traverse_dsl_dataset won't find 1416 * them. Try again. 1417 */ 1418 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) 1419 return (EAGAIN); 1420 1421 if (dsl_dataset_is_snapshot(ds)) { 1422 /* 1423 * If this snapshot has an elevated user reference count, 1424 * we can't destroy it yet. 1425 */ 1426 if (ds->ds_userrefs > 0 && !dsda->releasing) 1427 return (EBUSY); 1428 1429 mutex_enter(&ds->ds_lock); 1430 /* 1431 * Can't delete a branch point. However, if we're destroying 1432 * a clone and removing its origin due to it having a user 1433 * hold count of 0 and having been marked for deferred destroy, 1434 * it's OK for the origin to have a single clone. 1435 */ 1436 if (ds->ds_phys->ds_num_children > 1437 (dsda->is_origin_rm ? 2 : 1)) { 1438 mutex_exit(&ds->ds_lock); 1439 return (EEXIST); 1440 } 1441 mutex_exit(&ds->ds_lock); 1442 } else if (dsl_dir_is_clone(ds->ds_dir)) { 1443 return (dsl_dataset_origin_check(dsda, arg2, tx)); 1444 } 1445 1446 /* XXX we should do some i/o error checking... */ 1447 return (0); 1448 } 1449 1450 struct refsarg { 1451 kmutex_t lock; 1452 boolean_t gone; 1453 kcondvar_t cv; 1454 }; 1455 1456 /* ARGSUSED */ 1457 static void 1458 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv) 1459 { 1460 struct refsarg *arg = argv; 1461 1462 mutex_enter(&arg->lock); 1463 arg->gone = TRUE; 1464 cv_signal(&arg->cv); 1465 mutex_exit(&arg->lock); 1466 } 1467 1468 static void 1469 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag) 1470 { 1471 struct refsarg arg; 1472 1473 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL); 1474 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL); 1475 arg.gone = FALSE; 1476 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys, 1477 dsl_dataset_refs_gone); 1478 dmu_buf_rele(ds->ds_dbuf, tag); 1479 mutex_enter(&arg.lock); 1480 while (!arg.gone) 1481 cv_wait(&arg.cv, &arg.lock); 1482 ASSERT(arg.gone); 1483 mutex_exit(&arg.lock); 1484 ds->ds_dbuf = NULL; 1485 ds->ds_phys = NULL; 1486 mutex_destroy(&arg.lock); 1487 cv_destroy(&arg.cv); 1488 } 1489 1490 static void 1491 remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj, dmu_tx_t *tx) 1492 { 1493 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1494 uint64_t count; 1495 int err; 1496 1497 ASSERT(ds->ds_phys->ds_num_children >= 2); 1498 err = zap_remove_int(mos, ds->ds_phys->ds_next_clones_obj, obj, tx); 1499 /* 1500 * The err should not be ENOENT, but a bug in a previous version 1501 * of the code could cause upgrade_clones_cb() to not set 1502 * ds_next_snap_obj when it should, leading to a missing entry. 1503 * If we knew that the pool was created after 1504 * SPA_VERSION_NEXT_CLONES, we could assert that it isn't 1505 * ENOENT. However, at least we can check that we don't have 1506 * too many entries in the next_clones_obj even after failing to 1507 * remove this one. 1508 */ 1509 if (err != ENOENT) { 1510 VERIFY3U(err, ==, 0); 1511 } 1512 ASSERT3U(0, ==, zap_count(mos, ds->ds_phys->ds_next_clones_obj, 1513 &count)); 1514 ASSERT3U(count, <=, ds->ds_phys->ds_num_children - 2); 1515 } 1516 1517 static void 1518 dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx) 1519 { 1520 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1521 zap_cursor_t zc; 1522 zap_attribute_t za; 1523 1524 /* 1525 * If it is the old version, dd_clones doesn't exist so we can't 1526 * find the clones, but deadlist_remove_key() is a no-op so it 1527 * doesn't matter. 1528 */ 1529 if (ds->ds_dir->dd_phys->dd_clones == 0) 1530 return; 1531 1532 for (zap_cursor_init(&zc, mos, ds->ds_dir->dd_phys->dd_clones); 1533 zap_cursor_retrieve(&zc, &za) == 0; 1534 zap_cursor_advance(&zc)) { 1535 dsl_dataset_t *clone; 1536 1537 VERIFY3U(0, ==, dsl_dataset_hold_obj(ds->ds_dir->dd_pool, 1538 za.za_first_integer, FTAG, &clone)); 1539 if (clone->ds_dir->dd_origin_txg > mintxg) { 1540 dsl_deadlist_remove_key(&clone->ds_deadlist, 1541 mintxg, tx); 1542 dsl_dataset_remove_clones_key(clone, mintxg, tx); 1543 } 1544 dsl_dataset_rele(clone, FTAG); 1545 } 1546 zap_cursor_fini(&zc); 1547 } 1548 1549 struct process_old_arg { 1550 dsl_dataset_t *ds; 1551 dsl_dataset_t *ds_prev; 1552 boolean_t after_branch_point; 1553 zio_t *pio; 1554 uint64_t used, comp, uncomp; 1555 }; 1556 1557 static int 1558 process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1559 { 1560 struct process_old_arg *poa = arg; 1561 dsl_pool_t *dp = poa->ds->ds_dir->dd_pool; 1562 1563 if (bp->blk_birth <= poa->ds->ds_phys->ds_prev_snap_txg) { 1564 dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx); 1565 if (poa->ds_prev && !poa->after_branch_point && 1566 bp->blk_birth > 1567 poa->ds_prev->ds_phys->ds_prev_snap_txg) { 1568 poa->ds_prev->ds_phys->ds_unique_bytes += 1569 bp_get_dsize_sync(dp->dp_spa, bp); 1570 } 1571 } else { 1572 poa->used += bp_get_dsize_sync(dp->dp_spa, bp); 1573 poa->comp += BP_GET_PSIZE(bp); 1574 poa->uncomp += BP_GET_UCSIZE(bp); 1575 dsl_free_sync(poa->pio, dp, tx->tx_txg, bp); 1576 } 1577 return (0); 1578 } 1579 1580 static void 1581 process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev, 1582 dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx) 1583 { 1584 struct process_old_arg poa = { 0 }; 1585 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1586 objset_t *mos = dp->dp_meta_objset; 1587 1588 ASSERT(ds->ds_deadlist.dl_oldfmt); 1589 ASSERT(ds_next->ds_deadlist.dl_oldfmt); 1590 1591 poa.ds = ds; 1592 poa.ds_prev = ds_prev; 1593 poa.after_branch_point = after_branch_point; 1594 poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 1595 VERIFY3U(0, ==, bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj, 1596 process_old_cb, &poa, tx)); 1597 VERIFY3U(zio_wait(poa.pio), ==, 0); 1598 ASSERT3U(poa.used, ==, ds->ds_phys->ds_unique_bytes); 1599 1600 /* change snapused */ 1601 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP, 1602 -poa.used, -poa.comp, -poa.uncomp, tx); 1603 1604 /* swap next's deadlist to our deadlist */ 1605 dsl_deadlist_close(&ds->ds_deadlist); 1606 dsl_deadlist_close(&ds_next->ds_deadlist); 1607 SWITCH64(ds_next->ds_phys->ds_deadlist_obj, 1608 ds->ds_phys->ds_deadlist_obj); 1609 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj); 1610 dsl_deadlist_open(&ds_next->ds_deadlist, mos, 1611 ds_next->ds_phys->ds_deadlist_obj); 1612 } 1613 1614 void 1615 dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) 1616 { 1617 struct dsl_ds_destroyarg *dsda = arg1; 1618 dsl_dataset_t *ds = dsda->ds; 1619 int err; 1620 int after_branch_point = FALSE; 1621 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1622 objset_t *mos = dp->dp_meta_objset; 1623 dsl_dataset_t *ds_prev = NULL; 1624 boolean_t wont_destroy; 1625 uint64_t obj; 1626 1627 wont_destroy = (dsda->defer && 1628 (ds->ds_userrefs > 0 || ds->ds_phys->ds_num_children > 1)); 1629 1630 ASSERT(ds->ds_owner || wont_destroy); 1631 ASSERT(dsda->defer || ds->ds_phys->ds_num_children <= 1); 1632 ASSERT(ds->ds_prev == NULL || 1633 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object); 1634 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg); 1635 1636 if (wont_destroy) { 1637 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 1638 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1639 ds->ds_phys->ds_flags |= DS_FLAG_DEFER_DESTROY; 1640 return; 1641 } 1642 1643 /* signal any waiters that this dataset is going away */ 1644 mutex_enter(&ds->ds_lock); 1645 ds->ds_owner = dsl_reaper; 1646 cv_broadcast(&ds->ds_exclusive_cv); 1647 mutex_exit(&ds->ds_lock); 1648 1649 /* Remove our reservation */ 1650 if (ds->ds_reserved != 0) { 1651 dsl_prop_setarg_t psa; 1652 uint64_t value = 0; 1653 1654 dsl_prop_setarg_init_uint64(&psa, "refreservation", 1655 (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED), 1656 &value); 1657 psa.psa_effective_value = 0; /* predict default value */ 1658 1659 dsl_dataset_set_reservation_sync(ds, &psa, tx); 1660 ASSERT3U(ds->ds_reserved, ==, 0); 1661 } 1662 1663 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 1664 1665 dsl_scan_ds_destroyed(ds, tx); 1666 1667 obj = ds->ds_object; 1668 1669 if (ds->ds_phys->ds_prev_snap_obj != 0) { 1670 if (ds->ds_prev) { 1671 ds_prev = ds->ds_prev; 1672 } else { 1673 VERIFY(0 == dsl_dataset_hold_obj(dp, 1674 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev)); 1675 } 1676 after_branch_point = 1677 (ds_prev->ds_phys->ds_next_snap_obj != obj); 1678 1679 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx); 1680 if (after_branch_point && 1681 ds_prev->ds_phys->ds_next_clones_obj != 0) { 1682 remove_from_next_clones(ds_prev, obj, tx); 1683 if (ds->ds_phys->ds_next_snap_obj != 0) { 1684 VERIFY(0 == zap_add_int(mos, 1685 ds_prev->ds_phys->ds_next_clones_obj, 1686 ds->ds_phys->ds_next_snap_obj, tx)); 1687 } 1688 } 1689 if (after_branch_point && 1690 ds->ds_phys->ds_next_snap_obj == 0) { 1691 /* This clone is toast. */ 1692 ASSERT(ds_prev->ds_phys->ds_num_children > 1); 1693 ds_prev->ds_phys->ds_num_children--; 1694 1695 /* 1696 * If the clone's origin has no other clones, no 1697 * user holds, and has been marked for deferred 1698 * deletion, then we should have done the necessary 1699 * destroy setup for it. 1700 */ 1701 if (ds_prev->ds_phys->ds_num_children == 1 && 1702 ds_prev->ds_userrefs == 0 && 1703 DS_IS_DEFER_DESTROY(ds_prev)) { 1704 ASSERT3P(dsda->rm_origin, !=, NULL); 1705 } else { 1706 ASSERT3P(dsda->rm_origin, ==, NULL); 1707 } 1708 } else if (!after_branch_point) { 1709 ds_prev->ds_phys->ds_next_snap_obj = 1710 ds->ds_phys->ds_next_snap_obj; 1711 } 1712 } 1713 1714 if (dsl_dataset_is_snapshot(ds)) { 1715 dsl_dataset_t *ds_next; 1716 uint64_t old_unique; 1717 uint64_t used = 0, comp = 0, uncomp = 0; 1718 1719 VERIFY(0 == dsl_dataset_hold_obj(dp, 1720 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next)); 1721 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj); 1722 1723 old_unique = ds_next->ds_phys->ds_unique_bytes; 1724 1725 dmu_buf_will_dirty(ds_next->ds_dbuf, tx); 1726 ds_next->ds_phys->ds_prev_snap_obj = 1727 ds->ds_phys->ds_prev_snap_obj; 1728 ds_next->ds_phys->ds_prev_snap_txg = 1729 ds->ds_phys->ds_prev_snap_txg; 1730 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 1731 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0); 1732 1733 1734 if (ds_next->ds_deadlist.dl_oldfmt) { 1735 process_old_deadlist(ds, ds_prev, ds_next, 1736 after_branch_point, tx); 1737 } else { 1738 /* Adjust prev's unique space. */ 1739 if (ds_prev && !after_branch_point) { 1740 dsl_deadlist_space_range(&ds_next->ds_deadlist, 1741 ds_prev->ds_phys->ds_prev_snap_txg, 1742 ds->ds_phys->ds_prev_snap_txg, 1743 &used, &comp, &uncomp); 1744 ds_prev->ds_phys->ds_unique_bytes += used; 1745 } 1746 1747 /* Adjust snapused. */ 1748 dsl_deadlist_space_range(&ds_next->ds_deadlist, 1749 ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, 1750 &used, &comp, &uncomp); 1751 dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP, 1752 -used, -comp, -uncomp, tx); 1753 1754 /* Move blocks to be freed to pool's free list. */ 1755 dsl_deadlist_move_bpobj(&ds_next->ds_deadlist, 1756 &dp->dp_free_bpobj, ds->ds_phys->ds_prev_snap_txg, 1757 tx); 1758 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, 1759 DD_USED_HEAD, used, comp, uncomp, tx); 1760 dsl_dir_dirty(tx->tx_pool->dp_free_dir, tx); 1761 1762 /* Merge our deadlist into next's and free it. */ 1763 dsl_deadlist_merge(&ds_next->ds_deadlist, 1764 ds->ds_phys->ds_deadlist_obj, tx); 1765 } 1766 dsl_deadlist_close(&ds->ds_deadlist); 1767 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx); 1768 1769 /* Collapse range in clone heads */ 1770 dsl_dataset_remove_clones_key(ds, 1771 ds->ds_phys->ds_creation_txg, tx); 1772 1773 if (dsl_dataset_is_snapshot(ds_next)) { 1774 dsl_dataset_t *ds_nextnext; 1775 1776 /* 1777 * Update next's unique to include blocks which 1778 * were previously shared by only this snapshot 1779 * and it. Those blocks will be born after the 1780 * prev snap and before this snap, and will have 1781 * died after the next snap and before the one 1782 * after that (ie. be on the snap after next's 1783 * deadlist). 1784 */ 1785 VERIFY(0 == dsl_dataset_hold_obj(dp, 1786 ds_next->ds_phys->ds_next_snap_obj, 1787 FTAG, &ds_nextnext)); 1788 dsl_deadlist_space_range(&ds_nextnext->ds_deadlist, 1789 ds->ds_phys->ds_prev_snap_txg, 1790 ds->ds_phys->ds_creation_txg, 1791 &used, &comp, &uncomp); 1792 ds_next->ds_phys->ds_unique_bytes += used; 1793 dsl_dataset_rele(ds_nextnext, FTAG); 1794 ASSERT3P(ds_next->ds_prev, ==, NULL); 1795 1796 /* Collapse range in this head. */ 1797 dsl_dataset_t *hds; 1798 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, 1799 ds->ds_dir->dd_phys->dd_head_dataset_obj, 1800 FTAG, &hds)); 1801 dsl_deadlist_remove_key(&hds->ds_deadlist, 1802 ds->ds_phys->ds_creation_txg, tx); 1803 dsl_dataset_rele(hds, FTAG); 1804 1805 } else { 1806 ASSERT3P(ds_next->ds_prev, ==, ds); 1807 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next); 1808 ds_next->ds_prev = NULL; 1809 if (ds_prev) { 1810 VERIFY(0 == dsl_dataset_get_ref(dp, 1811 ds->ds_phys->ds_prev_snap_obj, 1812 ds_next, &ds_next->ds_prev)); 1813 } 1814 1815 dsl_dataset_recalc_head_uniq(ds_next); 1816 1817 /* 1818 * Reduce the amount of our unconsmed refreservation 1819 * being charged to our parent by the amount of 1820 * new unique data we have gained. 1821 */ 1822 if (old_unique < ds_next->ds_reserved) { 1823 int64_t mrsdelta; 1824 uint64_t new_unique = 1825 ds_next->ds_phys->ds_unique_bytes; 1826 1827 ASSERT(old_unique <= new_unique); 1828 mrsdelta = MIN(new_unique - old_unique, 1829 ds_next->ds_reserved - old_unique); 1830 dsl_dir_diduse_space(ds->ds_dir, 1831 DD_USED_REFRSRV, -mrsdelta, 0, 0, tx); 1832 } 1833 } 1834 dsl_dataset_rele(ds_next, FTAG); 1835 } else { 1836 /* 1837 * There's no next snapshot, so this is a head dataset. 1838 * Destroy the deadlist. Unless it's a clone, the 1839 * deadlist should be empty. (If it's a clone, it's 1840 * safe to ignore the deadlist contents.) 1841 */ 1842 struct killarg ka; 1843 1844 dsl_deadlist_close(&ds->ds_deadlist); 1845 dsl_deadlist_free(mos, ds->ds_phys->ds_deadlist_obj, tx); 1846 ds->ds_phys->ds_deadlist_obj = 0; 1847 1848 /* 1849 * Free everything that we point to (that's born after 1850 * the previous snapshot, if we are a clone) 1851 * 1852 * NB: this should be very quick, because we already 1853 * freed all the objects in open context. 1854 */ 1855 ka.ds = ds; 1856 ka.tx = tx; 1857 err = traverse_dataset(ds, ds->ds_phys->ds_prev_snap_txg, 1858 TRAVERSE_POST, kill_blkptr, &ka); 1859 ASSERT3U(err, ==, 0); 1860 ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) || 1861 ds->ds_phys->ds_unique_bytes == 0); 1862 1863 if (ds->ds_prev != NULL) { 1864 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 1865 VERIFY3U(0, ==, zap_remove_int(mos, 1866 ds->ds_prev->ds_dir->dd_phys->dd_clones, 1867 ds->ds_object, tx)); 1868 } 1869 dsl_dataset_rele(ds->ds_prev, ds); 1870 ds->ds_prev = ds_prev = NULL; 1871 } 1872 } 1873 1874 /* 1875 * This must be done after the dsl_traverse(), because it will 1876 * re-open the objset. 1877 */ 1878 if (ds->ds_objset) { 1879 dmu_objset_evict(ds->ds_objset); 1880 ds->ds_objset = NULL; 1881 } 1882 1883 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) { 1884 /* Erase the link in the dir */ 1885 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 1886 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0; 1887 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0); 1888 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx); 1889 ASSERT(err == 0); 1890 } else { 1891 /* remove from snapshot namespace */ 1892 dsl_dataset_t *ds_head; 1893 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0); 1894 VERIFY(0 == dsl_dataset_hold_obj(dp, 1895 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head)); 1896 VERIFY(0 == dsl_dataset_get_snapname(ds)); 1897 #ifdef ZFS_DEBUG 1898 { 1899 uint64_t val; 1900 1901 err = dsl_dataset_snap_lookup(ds_head, 1902 ds->ds_snapname, &val); 1903 ASSERT3U(err, ==, 0); 1904 ASSERT3U(val, ==, obj); 1905 } 1906 #endif 1907 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx); 1908 ASSERT(err == 0); 1909 dsl_dataset_rele(ds_head, FTAG); 1910 } 1911 1912 if (ds_prev && ds->ds_prev != ds_prev) 1913 dsl_dataset_rele(ds_prev, FTAG); 1914 1915 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx); 1916 spa_history_log_internal(LOG_DS_DESTROY, dp->dp_spa, tx, 1917 "dataset = %llu", ds->ds_object); 1918 1919 if (ds->ds_phys->ds_next_clones_obj != 0) { 1920 uint64_t count; 1921 ASSERT(0 == zap_count(mos, 1922 ds->ds_phys->ds_next_clones_obj, &count) && count == 0); 1923 VERIFY(0 == dmu_object_free(mos, 1924 ds->ds_phys->ds_next_clones_obj, tx)); 1925 } 1926 if (ds->ds_phys->ds_props_obj != 0) 1927 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_props_obj, tx)); 1928 if (ds->ds_phys->ds_userrefs_obj != 0) 1929 VERIFY(0 == zap_destroy(mos, ds->ds_phys->ds_userrefs_obj, tx)); 1930 dsl_dir_close(ds->ds_dir, ds); 1931 ds->ds_dir = NULL; 1932 dsl_dataset_drain_refs(ds, tag); 1933 VERIFY(0 == dmu_object_free(mos, obj, tx)); 1934 1935 if (dsda->rm_origin) { 1936 /* 1937 * Remove the origin of the clone we just destroyed. 1938 */ 1939 struct dsl_ds_destroyarg ndsda = {0}; 1940 1941 ndsda.ds = dsda->rm_origin; 1942 dsl_dataset_destroy_sync(&ndsda, tag, tx); 1943 } 1944 } 1945 1946 static int 1947 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx) 1948 { 1949 uint64_t asize; 1950 1951 if (!dmu_tx_is_syncing(tx)) 1952 return (0); 1953 1954 /* 1955 * If there's an fs-only reservation, any blocks that might become 1956 * owned by the snapshot dataset must be accommodated by space 1957 * outside of the reservation. 1958 */ 1959 ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds)); 1960 asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 1961 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) 1962 return (ENOSPC); 1963 1964 /* 1965 * Propogate any reserved space for this snapshot to other 1966 * snapshot checks in this sync group. 1967 */ 1968 if (asize > 0) 1969 dsl_dir_willuse_space(ds->ds_dir, asize, tx); 1970 1971 return (0); 1972 } 1973 1974 int 1975 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx) 1976 { 1977 dsl_dataset_t *ds = arg1; 1978 const char *snapname = arg2; 1979 int err; 1980 uint64_t value; 1981 1982 /* 1983 * We don't allow multiple snapshots of the same txg. If there 1984 * is already one, try again. 1985 */ 1986 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg) 1987 return (EAGAIN); 1988 1989 /* 1990 * Check for conflicting name snapshot name. 1991 */ 1992 err = dsl_dataset_snap_lookup(ds, snapname, &value); 1993 if (err == 0) 1994 return (EEXIST); 1995 if (err != ENOENT) 1996 return (err); 1997 1998 /* 1999 * Check that the dataset's name is not too long. Name consists 2000 * of the dataset's length + 1 for the @-sign + snapshot name's length 2001 */ 2002 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN) 2003 return (ENAMETOOLONG); 2004 2005 err = dsl_dataset_snapshot_reserve_space(ds, tx); 2006 if (err) 2007 return (err); 2008 2009 ds->ds_trysnap_txg = tx->tx_txg; 2010 return (0); 2011 } 2012 2013 void 2014 dsl_dataset_snapshot_sync(void *arg1, void *arg2, dmu_tx_t *tx) 2015 { 2016 dsl_dataset_t *ds = arg1; 2017 const char *snapname = arg2; 2018 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2019 dmu_buf_t *dbuf; 2020 dsl_dataset_phys_t *dsphys; 2021 uint64_t dsobj, crtxg; 2022 objset_t *mos = dp->dp_meta_objset; 2023 int err; 2024 2025 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 2026 2027 /* 2028 * The origin's ds_creation_txg has to be < TXG_INITIAL 2029 */ 2030 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0) 2031 crtxg = 1; 2032 else 2033 crtxg = tx->tx_txg; 2034 2035 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 2036 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 2037 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 2038 dmu_buf_will_dirty(dbuf, tx); 2039 dsphys = dbuf->db_data; 2040 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 2041 dsphys->ds_dir_obj = ds->ds_dir->dd_object; 2042 dsphys->ds_fsid_guid = unique_create(); 2043 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 2044 sizeof (dsphys->ds_guid)); 2045 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj; 2046 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg; 2047 dsphys->ds_next_snap_obj = ds->ds_object; 2048 dsphys->ds_num_children = 1; 2049 dsphys->ds_creation_time = gethrestime_sec(); 2050 dsphys->ds_creation_txg = crtxg; 2051 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj; 2052 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes; 2053 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes; 2054 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes; 2055 dsphys->ds_flags = ds->ds_phys->ds_flags; 2056 dsphys->ds_bp = ds->ds_phys->ds_bp; 2057 dmu_buf_rele(dbuf, FTAG); 2058 2059 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0); 2060 if (ds->ds_prev) { 2061 uint64_t next_clones_obj = 2062 ds->ds_prev->ds_phys->ds_next_clones_obj; 2063 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj == 2064 ds->ds_object || 2065 ds->ds_prev->ds_phys->ds_num_children > 1); 2066 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { 2067 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 2068 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 2069 ds->ds_prev->ds_phys->ds_creation_txg); 2070 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj; 2071 } else if (next_clones_obj != 0) { 2072 remove_from_next_clones(ds->ds_prev, 2073 dsphys->ds_next_snap_obj, tx); 2074 VERIFY3U(0, ==, zap_add_int(mos, 2075 next_clones_obj, dsobj, tx)); 2076 } 2077 } 2078 2079 /* 2080 * If we have a reference-reservation on this dataset, we will 2081 * need to increase the amount of refreservation being charged 2082 * since our unique space is going to zero. 2083 */ 2084 if (ds->ds_reserved) { 2085 int64_t delta; 2086 ASSERT(DS_UNIQUE_IS_ACCURATE(ds)); 2087 delta = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 2088 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, 2089 delta, 0, 0, tx); 2090 } 2091 2092 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2093 zfs_dbgmsg("taking snapshot %s@%s/%llu; newkey=%llu", 2094 ds->ds_dir->dd_myname, snapname, dsobj, 2095 ds->ds_phys->ds_prev_snap_txg); 2096 ds->ds_phys->ds_deadlist_obj = dsl_deadlist_clone(&ds->ds_deadlist, 2097 UINT64_MAX, ds->ds_phys->ds_prev_snap_obj, tx); 2098 dsl_deadlist_close(&ds->ds_deadlist); 2099 dsl_deadlist_open(&ds->ds_deadlist, mos, ds->ds_phys->ds_deadlist_obj); 2100 dsl_deadlist_add_key(&ds->ds_deadlist, 2101 ds->ds_phys->ds_prev_snap_txg, tx); 2102 2103 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg); 2104 ds->ds_phys->ds_prev_snap_obj = dsobj; 2105 ds->ds_phys->ds_prev_snap_txg = crtxg; 2106 ds->ds_phys->ds_unique_bytes = 0; 2107 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 2108 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 2109 2110 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj, 2111 snapname, 8, 1, &dsobj, tx); 2112 ASSERT(err == 0); 2113 2114 if (ds->ds_prev) 2115 dsl_dataset_drop_ref(ds->ds_prev, ds); 2116 VERIFY(0 == dsl_dataset_get_ref(dp, 2117 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 2118 2119 dsl_scan_ds_snapshotted(ds, tx); 2120 2121 dsl_dir_snap_cmtime_update(ds->ds_dir); 2122 2123 spa_history_log_internal(LOG_DS_SNAPSHOT, dp->dp_spa, tx, 2124 "dataset = %llu", dsobj); 2125 } 2126 2127 void 2128 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx) 2129 { 2130 ASSERT(dmu_tx_is_syncing(tx)); 2131 ASSERT(ds->ds_objset != NULL); 2132 ASSERT(ds->ds_phys->ds_next_snap_obj == 0); 2133 2134 /* 2135 * in case we had to change ds_fsid_guid when we opened it, 2136 * sync it out now. 2137 */ 2138 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2139 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid; 2140 2141 dsl_dir_dirty(ds->ds_dir, tx); 2142 dmu_objset_sync(ds->ds_objset, zio, tx); 2143 } 2144 2145 void 2146 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv) 2147 { 2148 uint64_t refd, avail, uobjs, aobjs; 2149 2150 dsl_dir_stats(ds->ds_dir, nv); 2151 2152 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs); 2153 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail); 2154 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd); 2155 2156 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION, 2157 ds->ds_phys->ds_creation_time); 2158 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG, 2159 ds->ds_phys->ds_creation_txg); 2160 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA, 2161 ds->ds_quota); 2162 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION, 2163 ds->ds_reserved); 2164 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID, 2165 ds->ds_phys->ds_guid); 2166 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE, 2167 ds->ds_phys->ds_unique_bytes); 2168 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID, 2169 ds->ds_object); 2170 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS, 2171 ds->ds_userrefs); 2172 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY, 2173 DS_IS_DEFER_DESTROY(ds) ? 1 : 0); 2174 2175 if (ds->ds_phys->ds_next_snap_obj) { 2176 /* 2177 * This is a snapshot; override the dd's space used with 2178 * our unique space and compression ratio. 2179 */ 2180 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, 2181 ds->ds_phys->ds_unique_bytes); 2182 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, 2183 ds->ds_phys->ds_compressed_bytes == 0 ? 100 : 2184 (ds->ds_phys->ds_uncompressed_bytes * 100 / 2185 ds->ds_phys->ds_compressed_bytes)); 2186 } 2187 } 2188 2189 void 2190 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat) 2191 { 2192 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg; 2193 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT; 2194 stat->dds_guid = ds->ds_phys->ds_guid; 2195 if (ds->ds_phys->ds_next_snap_obj) { 2196 stat->dds_is_snapshot = B_TRUE; 2197 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1; 2198 } else { 2199 stat->dds_is_snapshot = B_FALSE; 2200 stat->dds_num_clones = 0; 2201 } 2202 2203 /* clone origin is really a dsl_dir thing... */ 2204 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); 2205 if (dsl_dir_is_clone(ds->ds_dir)) { 2206 dsl_dataset_t *ods; 2207 2208 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool, 2209 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods)); 2210 dsl_dataset_name(ods, stat->dds_origin); 2211 dsl_dataset_drop_ref(ods, FTAG); 2212 } else { 2213 stat->dds_origin[0] = '\0'; 2214 } 2215 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); 2216 } 2217 2218 uint64_t 2219 dsl_dataset_fsid_guid(dsl_dataset_t *ds) 2220 { 2221 return (ds->ds_fsid_guid); 2222 } 2223 2224 void 2225 dsl_dataset_space(dsl_dataset_t *ds, 2226 uint64_t *refdbytesp, uint64_t *availbytesp, 2227 uint64_t *usedobjsp, uint64_t *availobjsp) 2228 { 2229 *refdbytesp = ds->ds_phys->ds_used_bytes; 2230 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE); 2231 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) 2232 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes; 2233 if (ds->ds_quota != 0) { 2234 /* 2235 * Adjust available bytes according to refquota 2236 */ 2237 if (*refdbytesp < ds->ds_quota) 2238 *availbytesp = MIN(*availbytesp, 2239 ds->ds_quota - *refdbytesp); 2240 else 2241 *availbytesp = 0; 2242 } 2243 *usedobjsp = ds->ds_phys->ds_bp.blk_fill; 2244 *availobjsp = DN_MAX_OBJECT - *usedobjsp; 2245 } 2246 2247 boolean_t 2248 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds) 2249 { 2250 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2251 2252 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 2253 dsl_pool_sync_context(dp)); 2254 if (ds->ds_prev == NULL) 2255 return (B_FALSE); 2256 if (ds->ds_phys->ds_bp.blk_birth > 2257 ds->ds_prev->ds_phys->ds_creation_txg) { 2258 objset_t *os, *os_prev; 2259 /* 2260 * It may be that only the ZIL differs, because it was 2261 * reset in the head. Don't count that as being 2262 * modified. 2263 */ 2264 if (dmu_objset_from_ds(ds, &os) != 0) 2265 return (B_TRUE); 2266 if (dmu_objset_from_ds(ds->ds_prev, &os_prev) != 0) 2267 return (B_TRUE); 2268 return (bcmp(&os->os_phys->os_meta_dnode, 2269 &os_prev->os_phys->os_meta_dnode, 2270 sizeof (os->os_phys->os_meta_dnode)) != 0); 2271 } 2272 return (B_FALSE); 2273 } 2274 2275 /* ARGSUSED */ 2276 static int 2277 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) 2278 { 2279 dsl_dataset_t *ds = arg1; 2280 char *newsnapname = arg2; 2281 dsl_dir_t *dd = ds->ds_dir; 2282 dsl_dataset_t *hds; 2283 uint64_t val; 2284 int err; 2285 2286 err = dsl_dataset_hold_obj(dd->dd_pool, 2287 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds); 2288 if (err) 2289 return (err); 2290 2291 /* new name better not be in use */ 2292 err = dsl_dataset_snap_lookup(hds, newsnapname, &val); 2293 dsl_dataset_rele(hds, FTAG); 2294 2295 if (err == 0) 2296 err = EEXIST; 2297 else if (err == ENOENT) 2298 err = 0; 2299 2300 /* dataset name + 1 for the "@" + the new snapshot name must fit */ 2301 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN) 2302 err = ENAMETOOLONG; 2303 2304 return (err); 2305 } 2306 2307 static void 2308 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx) 2309 { 2310 dsl_dataset_t *ds = arg1; 2311 const char *newsnapname = arg2; 2312 dsl_dir_t *dd = ds->ds_dir; 2313 objset_t *mos = dd->dd_pool->dp_meta_objset; 2314 dsl_dataset_t *hds; 2315 int err; 2316 2317 ASSERT(ds->ds_phys->ds_next_snap_obj != 0); 2318 2319 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, 2320 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds)); 2321 2322 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2323 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx); 2324 ASSERT3U(err, ==, 0); 2325 mutex_enter(&ds->ds_lock); 2326 (void) strcpy(ds->ds_snapname, newsnapname); 2327 mutex_exit(&ds->ds_lock); 2328 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj, 2329 ds->ds_snapname, 8, 1, &ds->ds_object, tx); 2330 ASSERT3U(err, ==, 0); 2331 2332 spa_history_log_internal(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx, 2333 "dataset = %llu", ds->ds_object); 2334 dsl_dataset_rele(hds, FTAG); 2335 } 2336 2337 struct renamesnaparg { 2338 dsl_sync_task_group_t *dstg; 2339 char failed[MAXPATHLEN]; 2340 char *oldsnap; 2341 char *newsnap; 2342 }; 2343 2344 static int 2345 dsl_snapshot_rename_one(const char *name, void *arg) 2346 { 2347 struct renamesnaparg *ra = arg; 2348 dsl_dataset_t *ds = NULL; 2349 char *snapname; 2350 int err; 2351 2352 snapname = kmem_asprintf("%s@%s", name, ra->oldsnap); 2353 (void) strlcpy(ra->failed, snapname, sizeof (ra->failed)); 2354 2355 /* 2356 * For recursive snapshot renames the parent won't be changing 2357 * so we just pass name for both the to/from argument. 2358 */ 2359 err = zfs_secpolicy_rename_perms(snapname, snapname, CRED()); 2360 if (err != 0) { 2361 strfree(snapname); 2362 return (err == ENOENT ? 0 : err); 2363 } 2364 2365 #ifdef _KERNEL 2366 /* 2367 * For all filesystems undergoing rename, we'll need to unmount it. 2368 */ 2369 (void) zfs_unmount_snap(snapname, NULL); 2370 #endif 2371 err = dsl_dataset_hold(snapname, ra->dstg, &ds); 2372 strfree(snapname); 2373 if (err != 0) 2374 return (err == ENOENT ? 0 : err); 2375 2376 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check, 2377 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0); 2378 2379 return (0); 2380 } 2381 2382 static int 2383 dsl_recursive_rename(char *oldname, const char *newname) 2384 { 2385 int err; 2386 struct renamesnaparg *ra; 2387 dsl_sync_task_t *dst; 2388 spa_t *spa; 2389 char *cp, *fsname = spa_strdup(oldname); 2390 int len = strlen(oldname) + 1; 2391 2392 /* truncate the snapshot name to get the fsname */ 2393 cp = strchr(fsname, '@'); 2394 *cp = '\0'; 2395 2396 err = spa_open(fsname, &spa, FTAG); 2397 if (err) { 2398 kmem_free(fsname, len); 2399 return (err); 2400 } 2401 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP); 2402 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 2403 2404 ra->oldsnap = strchr(oldname, '@') + 1; 2405 ra->newsnap = strchr(newname, '@') + 1; 2406 *ra->failed = '\0'; 2407 2408 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra, 2409 DS_FIND_CHILDREN); 2410 kmem_free(fsname, len); 2411 2412 if (err == 0) { 2413 err = dsl_sync_task_group_wait(ra->dstg); 2414 } 2415 2416 for (dst = list_head(&ra->dstg->dstg_tasks); dst; 2417 dst = list_next(&ra->dstg->dstg_tasks, dst)) { 2418 dsl_dataset_t *ds = dst->dst_arg1; 2419 if (dst->dst_err) { 2420 dsl_dir_name(ds->ds_dir, ra->failed); 2421 (void) strlcat(ra->failed, "@", sizeof (ra->failed)); 2422 (void) strlcat(ra->failed, ra->newsnap, 2423 sizeof (ra->failed)); 2424 } 2425 dsl_dataset_rele(ds, ra->dstg); 2426 } 2427 2428 if (err) 2429 (void) strlcpy(oldname, ra->failed, sizeof (ra->failed)); 2430 2431 dsl_sync_task_group_destroy(ra->dstg); 2432 kmem_free(ra, sizeof (struct renamesnaparg)); 2433 spa_close(spa, FTAG); 2434 return (err); 2435 } 2436 2437 static int 2438 dsl_valid_rename(const char *oldname, void *arg) 2439 { 2440 int delta = *(int *)arg; 2441 2442 if (strlen(oldname) + delta >= MAXNAMELEN) 2443 return (ENAMETOOLONG); 2444 2445 return (0); 2446 } 2447 2448 #pragma weak dmu_objset_rename = dsl_dataset_rename 2449 int 2450 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive) 2451 { 2452 dsl_dir_t *dd; 2453 dsl_dataset_t *ds; 2454 const char *tail; 2455 int err; 2456 2457 err = dsl_dir_open(oldname, FTAG, &dd, &tail); 2458 if (err) 2459 return (err); 2460 2461 if (tail == NULL) { 2462 int delta = strlen(newname) - strlen(oldname); 2463 2464 /* if we're growing, validate child name lengths */ 2465 if (delta > 0) 2466 err = dmu_objset_find(oldname, dsl_valid_rename, 2467 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 2468 2469 if (err == 0) 2470 err = dsl_dir_rename(dd, newname); 2471 dsl_dir_close(dd, FTAG); 2472 return (err); 2473 } 2474 2475 if (tail[0] != '@') { 2476 /* the name ended in a nonexistent component */ 2477 dsl_dir_close(dd, FTAG); 2478 return (ENOENT); 2479 } 2480 2481 dsl_dir_close(dd, FTAG); 2482 2483 /* new name must be snapshot in same filesystem */ 2484 tail = strchr(newname, '@'); 2485 if (tail == NULL) 2486 return (EINVAL); 2487 tail++; 2488 if (strncmp(oldname, newname, tail - newname) != 0) 2489 return (EXDEV); 2490 2491 if (recursive) { 2492 err = dsl_recursive_rename(oldname, newname); 2493 } else { 2494 err = dsl_dataset_hold(oldname, FTAG, &ds); 2495 if (err) 2496 return (err); 2497 2498 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2499 dsl_dataset_snapshot_rename_check, 2500 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1); 2501 2502 dsl_dataset_rele(ds, FTAG); 2503 } 2504 2505 return (err); 2506 } 2507 2508 struct promotenode { 2509 list_node_t link; 2510 dsl_dataset_t *ds; 2511 }; 2512 2513 struct promotearg { 2514 list_t shared_snaps, origin_snaps, clone_snaps; 2515 dsl_dataset_t *origin_origin; 2516 uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap; 2517 char *err_ds; 2518 }; 2519 2520 static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep); 2521 static boolean_t snaplist_unstable(list_t *l); 2522 2523 static int 2524 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx) 2525 { 2526 dsl_dataset_t *hds = arg1; 2527 struct promotearg *pa = arg2; 2528 struct promotenode *snap = list_head(&pa->shared_snaps); 2529 dsl_dataset_t *origin_ds = snap->ds; 2530 int err; 2531 uint64_t unused; 2532 2533 /* Check that it is a real clone */ 2534 if (!dsl_dir_is_clone(hds->ds_dir)) 2535 return (EINVAL); 2536 2537 /* Since this is so expensive, don't do the preliminary check */ 2538 if (!dmu_tx_is_syncing(tx)) 2539 return (0); 2540 2541 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) 2542 return (EXDEV); 2543 2544 /* compute origin's new unique space */ 2545 snap = list_tail(&pa->clone_snaps); 2546 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); 2547 dsl_deadlist_space_range(&snap->ds->ds_deadlist, 2548 origin_ds->ds_phys->ds_prev_snap_txg, UINT64_MAX, 2549 &pa->unique, &unused, &unused); 2550 2551 /* 2552 * Walk the snapshots that we are moving 2553 * 2554 * Compute space to transfer. Consider the incremental changes 2555 * to used for each snapshot: 2556 * (my used) = (prev's used) + (blocks born) - (blocks killed) 2557 * So each snapshot gave birth to: 2558 * (blocks born) = (my used) - (prev's used) + (blocks killed) 2559 * So a sequence would look like: 2560 * (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0) 2561 * Which simplifies to: 2562 * uN + kN + kN-1 + ... + k1 + k0 2563 * Note however, if we stop before we reach the ORIGIN we get: 2564 * uN + kN + kN-1 + ... + kM - uM-1 2565 */ 2566 pa->used = origin_ds->ds_phys->ds_used_bytes; 2567 pa->comp = origin_ds->ds_phys->ds_compressed_bytes; 2568 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes; 2569 for (snap = list_head(&pa->shared_snaps); snap; 2570 snap = list_next(&pa->shared_snaps, snap)) { 2571 uint64_t val, dlused, dlcomp, dluncomp; 2572 dsl_dataset_t *ds = snap->ds; 2573 2574 /* Check that the snapshot name does not conflict */ 2575 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2576 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val); 2577 if (err == 0) { 2578 err = EEXIST; 2579 goto out; 2580 } 2581 if (err != ENOENT) 2582 goto out; 2583 2584 /* The very first snapshot does not have a deadlist */ 2585 if (ds->ds_phys->ds_prev_snap_obj == 0) 2586 continue; 2587 2588 dsl_deadlist_space(&ds->ds_deadlist, 2589 &dlused, &dlcomp, &dluncomp); 2590 pa->used += dlused; 2591 pa->comp += dlcomp; 2592 pa->uncomp += dluncomp; 2593 } 2594 2595 /* 2596 * If we are a clone of a clone then we never reached ORIGIN, 2597 * so we need to subtract out the clone origin's used space. 2598 */ 2599 if (pa->origin_origin) { 2600 pa->used -= pa->origin_origin->ds_phys->ds_used_bytes; 2601 pa->comp -= pa->origin_origin->ds_phys->ds_compressed_bytes; 2602 pa->uncomp -= pa->origin_origin->ds_phys->ds_uncompressed_bytes; 2603 } 2604 2605 /* Check that there is enough space here */ 2606 err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir, 2607 pa->used); 2608 if (err) 2609 return (err); 2610 2611 /* 2612 * Compute the amounts of space that will be used by snapshots 2613 * after the promotion (for both origin and clone). For each, 2614 * it is the amount of space that will be on all of their 2615 * deadlists (that was not born before their new origin). 2616 */ 2617 if (hds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { 2618 uint64_t space; 2619 2620 /* 2621 * Note, typically this will not be a clone of a clone, 2622 * so dd_origin_txg will be < TXG_INITIAL, so 2623 * these snaplist_space() -> dsl_deadlist_space_range() 2624 * calls will be fast because they do not have to 2625 * iterate over all bps. 2626 */ 2627 snap = list_head(&pa->origin_snaps); 2628 err = snaplist_space(&pa->shared_snaps, 2629 snap->ds->ds_dir->dd_origin_txg, &pa->cloneusedsnap); 2630 if (err) 2631 return (err); 2632 2633 err = snaplist_space(&pa->clone_snaps, 2634 snap->ds->ds_dir->dd_origin_txg, &space); 2635 if (err) 2636 return (err); 2637 pa->cloneusedsnap += space; 2638 } 2639 if (origin_ds->ds_dir->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN) { 2640 err = snaplist_space(&pa->origin_snaps, 2641 origin_ds->ds_phys->ds_creation_txg, &pa->originusedsnap); 2642 if (err) 2643 return (err); 2644 } 2645 2646 return (0); 2647 out: 2648 pa->err_ds = snap->ds->ds_snapname; 2649 return (err); 2650 } 2651 2652 static void 2653 dsl_dataset_promote_sync(void *arg1, void *arg2, dmu_tx_t *tx) 2654 { 2655 dsl_dataset_t *hds = arg1; 2656 struct promotearg *pa = arg2; 2657 struct promotenode *snap = list_head(&pa->shared_snaps); 2658 dsl_dataset_t *origin_ds = snap->ds; 2659 dsl_dataset_t *origin_head; 2660 dsl_dir_t *dd = hds->ds_dir; 2661 dsl_pool_t *dp = hds->ds_dir->dd_pool; 2662 dsl_dir_t *odd = NULL; 2663 uint64_t oldnext_obj; 2664 int64_t delta; 2665 2666 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)); 2667 2668 snap = list_head(&pa->origin_snaps); 2669 origin_head = snap->ds; 2670 2671 /* 2672 * We need to explicitly open odd, since origin_ds's dd will be 2673 * changing. 2674 */ 2675 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object, 2676 NULL, FTAG, &odd)); 2677 2678 /* change origin's next snap */ 2679 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx); 2680 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj; 2681 snap = list_tail(&pa->clone_snaps); 2682 ASSERT3U(snap->ds->ds_phys->ds_prev_snap_obj, ==, origin_ds->ds_object); 2683 origin_ds->ds_phys->ds_next_snap_obj = snap->ds->ds_object; 2684 2685 /* change the origin's next clone */ 2686 if (origin_ds->ds_phys->ds_next_clones_obj) { 2687 remove_from_next_clones(origin_ds, snap->ds->ds_object, tx); 2688 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2689 origin_ds->ds_phys->ds_next_clones_obj, 2690 oldnext_obj, tx)); 2691 } 2692 2693 /* change origin */ 2694 dmu_buf_will_dirty(dd->dd_dbuf, tx); 2695 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object); 2696 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj; 2697 dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg; 2698 dmu_buf_will_dirty(odd->dd_dbuf, tx); 2699 odd->dd_phys->dd_origin_obj = origin_ds->ds_object; 2700 origin_head->ds_dir->dd_origin_txg = 2701 origin_ds->ds_phys->ds_creation_txg; 2702 2703 /* change dd_clone entries */ 2704 if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 2705 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2706 odd->dd_phys->dd_clones, hds->ds_object, tx)); 2707 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2708 pa->origin_origin->ds_dir->dd_phys->dd_clones, 2709 hds->ds_object, tx)); 2710 2711 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2712 pa->origin_origin->ds_dir->dd_phys->dd_clones, 2713 origin_head->ds_object, tx)); 2714 if (dd->dd_phys->dd_clones == 0) { 2715 dd->dd_phys->dd_clones = zap_create(dp->dp_meta_objset, 2716 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx); 2717 } 2718 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2719 dd->dd_phys->dd_clones, origin_head->ds_object, tx)); 2720 2721 } 2722 2723 /* move snapshots to this dir */ 2724 for (snap = list_head(&pa->shared_snaps); snap; 2725 snap = list_next(&pa->shared_snaps, snap)) { 2726 dsl_dataset_t *ds = snap->ds; 2727 2728 /* unregister props as dsl_dir is changing */ 2729 if (ds->ds_objset) { 2730 dmu_objset_evict(ds->ds_objset); 2731 ds->ds_objset = NULL; 2732 } 2733 /* move snap name entry */ 2734 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2735 VERIFY(0 == dsl_dataset_snap_remove(origin_head, 2736 ds->ds_snapname, tx)); 2737 VERIFY(0 == zap_add(dp->dp_meta_objset, 2738 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname, 2739 8, 1, &ds->ds_object, tx)); 2740 2741 /* change containing dsl_dir */ 2742 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2743 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object); 2744 ds->ds_phys->ds_dir_obj = dd->dd_object; 2745 ASSERT3P(ds->ds_dir, ==, odd); 2746 dsl_dir_close(ds->ds_dir, ds); 2747 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object, 2748 NULL, ds, &ds->ds_dir)); 2749 2750 /* move any clone references */ 2751 if (ds->ds_phys->ds_next_clones_obj && 2752 spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) { 2753 zap_cursor_t zc; 2754 zap_attribute_t za; 2755 2756 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2757 ds->ds_phys->ds_next_clones_obj); 2758 zap_cursor_retrieve(&zc, &za) == 0; 2759 zap_cursor_advance(&zc)) { 2760 dsl_dataset_t *cnds; 2761 uint64_t o; 2762 2763 if (za.za_first_integer == oldnext_obj) { 2764 /* 2765 * We've already moved the 2766 * origin's reference. 2767 */ 2768 continue; 2769 } 2770 2771 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, 2772 za.za_first_integer, FTAG, &cnds)); 2773 o = cnds->ds_dir->dd_phys->dd_head_dataset_obj; 2774 2775 VERIFY3U(zap_remove_int(dp->dp_meta_objset, 2776 odd->dd_phys->dd_clones, o, tx), ==, 0); 2777 VERIFY3U(zap_add_int(dp->dp_meta_objset, 2778 dd->dd_phys->dd_clones, o, tx), ==, 0); 2779 dsl_dataset_rele(cnds, FTAG); 2780 } 2781 zap_cursor_fini(&zc); 2782 } 2783 2784 ASSERT3U(dsl_prop_numcb(ds), ==, 0); 2785 } 2786 2787 /* 2788 * Change space accounting. 2789 * Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either 2790 * both be valid, or both be 0 (resulting in delta == 0). This 2791 * is true for each of {clone,origin} independently. 2792 */ 2793 2794 delta = pa->cloneusedsnap - 2795 dd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; 2796 ASSERT3S(delta, >=, 0); 2797 ASSERT3U(pa->used, >=, delta); 2798 dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx); 2799 dsl_dir_diduse_space(dd, DD_USED_HEAD, 2800 pa->used - delta, pa->comp, pa->uncomp, tx); 2801 2802 delta = pa->originusedsnap - 2803 odd->dd_phys->dd_used_breakdown[DD_USED_SNAP]; 2804 ASSERT3S(delta, <=, 0); 2805 ASSERT3U(pa->used, >=, -delta); 2806 dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx); 2807 dsl_dir_diduse_space(odd, DD_USED_HEAD, 2808 -pa->used - delta, -pa->comp, -pa->uncomp, tx); 2809 2810 origin_ds->ds_phys->ds_unique_bytes = pa->unique; 2811 2812 /* log history record */ 2813 spa_history_log_internal(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx, 2814 "dataset = %llu", hds->ds_object); 2815 2816 dsl_dir_close(odd, FTAG); 2817 } 2818 2819 static char *snaplist_tag = "snaplist"; 2820 /* 2821 * Make a list of dsl_dataset_t's for the snapshots between first_obj 2822 * (exclusive) and last_obj (inclusive). The list will be in reverse 2823 * order (last_obj will be the list_head()). If first_obj == 0, do all 2824 * snapshots back to this dataset's origin. 2825 */ 2826 static int 2827 snaplist_make(dsl_pool_t *dp, boolean_t own, 2828 uint64_t first_obj, uint64_t last_obj, list_t *l) 2829 { 2830 uint64_t obj = last_obj; 2831 2832 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock)); 2833 2834 list_create(l, sizeof (struct promotenode), 2835 offsetof(struct promotenode, link)); 2836 2837 while (obj != first_obj) { 2838 dsl_dataset_t *ds; 2839 struct promotenode *snap; 2840 int err; 2841 2842 if (own) { 2843 err = dsl_dataset_own_obj(dp, obj, 2844 0, snaplist_tag, &ds); 2845 if (err == 0) 2846 dsl_dataset_make_exclusive(ds, snaplist_tag); 2847 } else { 2848 err = dsl_dataset_hold_obj(dp, obj, snaplist_tag, &ds); 2849 } 2850 if (err == ENOENT) { 2851 /* lost race with snapshot destroy */ 2852 struct promotenode *last = list_tail(l); 2853 ASSERT(obj != last->ds->ds_phys->ds_prev_snap_obj); 2854 obj = last->ds->ds_phys->ds_prev_snap_obj; 2855 continue; 2856 } else if (err) { 2857 return (err); 2858 } 2859 2860 if (first_obj == 0) 2861 first_obj = ds->ds_dir->dd_phys->dd_origin_obj; 2862 2863 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP); 2864 snap->ds = ds; 2865 list_insert_tail(l, snap); 2866 obj = ds->ds_phys->ds_prev_snap_obj; 2867 } 2868 2869 return (0); 2870 } 2871 2872 static int 2873 snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep) 2874 { 2875 struct promotenode *snap; 2876 2877 *spacep = 0; 2878 for (snap = list_head(l); snap; snap = list_next(l, snap)) { 2879 uint64_t used, comp, uncomp; 2880 dsl_deadlist_space_range(&snap->ds->ds_deadlist, 2881 mintxg, UINT64_MAX, &used, &comp, &uncomp); 2882 *spacep += used; 2883 } 2884 return (0); 2885 } 2886 2887 static void 2888 snaplist_destroy(list_t *l, boolean_t own) 2889 { 2890 struct promotenode *snap; 2891 2892 if (!l || !list_link_active(&l->list_head)) 2893 return; 2894 2895 while ((snap = list_tail(l)) != NULL) { 2896 list_remove(l, snap); 2897 if (own) 2898 dsl_dataset_disown(snap->ds, snaplist_tag); 2899 else 2900 dsl_dataset_rele(snap->ds, snaplist_tag); 2901 kmem_free(snap, sizeof (struct promotenode)); 2902 } 2903 list_destroy(l); 2904 } 2905 2906 /* 2907 * Promote a clone. Nomenclature note: 2908 * "clone" or "cds": the original clone which is being promoted 2909 * "origin" or "ods": the snapshot which is originally clone's origin 2910 * "origin head" or "ohds": the dataset which is the head 2911 * (filesystem/volume) for the origin 2912 * "origin origin": the origin of the origin's filesystem (typically 2913 * NULL, indicating that the clone is not a clone of a clone). 2914 */ 2915 int 2916 dsl_dataset_promote(const char *name, char *conflsnap) 2917 { 2918 dsl_dataset_t *ds; 2919 dsl_dir_t *dd; 2920 dsl_pool_t *dp; 2921 dmu_object_info_t doi; 2922 struct promotearg pa = { 0 }; 2923 struct promotenode *snap; 2924 int err; 2925 2926 err = dsl_dataset_hold(name, FTAG, &ds); 2927 if (err) 2928 return (err); 2929 dd = ds->ds_dir; 2930 dp = dd->dd_pool; 2931 2932 err = dmu_object_info(dp->dp_meta_objset, 2933 ds->ds_phys->ds_snapnames_zapobj, &doi); 2934 if (err) { 2935 dsl_dataset_rele(ds, FTAG); 2936 return (err); 2937 } 2938 2939 if (dsl_dataset_is_snapshot(ds) || dd->dd_phys->dd_origin_obj == 0) { 2940 dsl_dataset_rele(ds, FTAG); 2941 return (EINVAL); 2942 } 2943 2944 /* 2945 * We are going to inherit all the snapshots taken before our 2946 * origin (i.e., our new origin will be our parent's origin). 2947 * Take ownership of them so that we can rename them into our 2948 * namespace. 2949 */ 2950 rw_enter(&dp->dp_config_rwlock, RW_READER); 2951 2952 err = snaplist_make(dp, B_TRUE, 0, dd->dd_phys->dd_origin_obj, 2953 &pa.shared_snaps); 2954 if (err != 0) 2955 goto out; 2956 2957 err = snaplist_make(dp, B_FALSE, 0, ds->ds_object, &pa.clone_snaps); 2958 if (err != 0) 2959 goto out; 2960 2961 snap = list_head(&pa.shared_snaps); 2962 ASSERT3U(snap->ds->ds_object, ==, dd->dd_phys->dd_origin_obj); 2963 err = snaplist_make(dp, B_FALSE, dd->dd_phys->dd_origin_obj, 2964 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, &pa.origin_snaps); 2965 if (err != 0) 2966 goto out; 2967 2968 if (snap->ds->ds_dir->dd_phys->dd_origin_obj != 0) { 2969 err = dsl_dataset_hold_obj(dp, 2970 snap->ds->ds_dir->dd_phys->dd_origin_obj, 2971 FTAG, &pa.origin_origin); 2972 if (err != 0) 2973 goto out; 2974 } 2975 2976 out: 2977 rw_exit(&dp->dp_config_rwlock); 2978 2979 /* 2980 * Add in 128x the snapnames zapobj size, since we will be moving 2981 * a bunch of snapnames to the promoted ds, and dirtying their 2982 * bonus buffers. 2983 */ 2984 if (err == 0) { 2985 err = dsl_sync_task_do(dp, dsl_dataset_promote_check, 2986 dsl_dataset_promote_sync, ds, &pa, 2987 2 + 2 * doi.doi_physical_blocks_512); 2988 if (err && pa.err_ds && conflsnap) 2989 (void) strncpy(conflsnap, pa.err_ds, MAXNAMELEN); 2990 } 2991 2992 snaplist_destroy(&pa.shared_snaps, B_TRUE); 2993 snaplist_destroy(&pa.clone_snaps, B_FALSE); 2994 snaplist_destroy(&pa.origin_snaps, B_FALSE); 2995 if (pa.origin_origin) 2996 dsl_dataset_rele(pa.origin_origin, FTAG); 2997 dsl_dataset_rele(ds, FTAG); 2998 return (err); 2999 } 3000 3001 struct cloneswaparg { 3002 dsl_dataset_t *cds; /* clone dataset */ 3003 dsl_dataset_t *ohds; /* origin's head dataset */ 3004 boolean_t force; 3005 int64_t unused_refres_delta; /* change in unconsumed refreservation */ 3006 }; 3007 3008 /* ARGSUSED */ 3009 static int 3010 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx) 3011 { 3012 struct cloneswaparg *csa = arg1; 3013 3014 /* they should both be heads */ 3015 if (dsl_dataset_is_snapshot(csa->cds) || 3016 dsl_dataset_is_snapshot(csa->ohds)) 3017 return (EINVAL); 3018 3019 /* the branch point should be just before them */ 3020 if (csa->cds->ds_prev != csa->ohds->ds_prev) 3021 return (EINVAL); 3022 3023 /* cds should be the clone (unless they are unrelated) */ 3024 if (csa->cds->ds_prev != NULL && 3025 csa->cds->ds_prev != csa->cds->ds_dir->dd_pool->dp_origin_snap && 3026 csa->ohds->ds_object != 3027 csa->cds->ds_prev->ds_phys->ds_next_snap_obj) 3028 return (EINVAL); 3029 3030 /* the clone should be a child of the origin */ 3031 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir) 3032 return (EINVAL); 3033 3034 /* ohds shouldn't be modified unless 'force' */ 3035 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds)) 3036 return (ETXTBSY); 3037 3038 /* adjust amount of any unconsumed refreservation */ 3039 csa->unused_refres_delta = 3040 (int64_t)MIN(csa->ohds->ds_reserved, 3041 csa->ohds->ds_phys->ds_unique_bytes) - 3042 (int64_t)MIN(csa->ohds->ds_reserved, 3043 csa->cds->ds_phys->ds_unique_bytes); 3044 3045 if (csa->unused_refres_delta > 0 && 3046 csa->unused_refres_delta > 3047 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE)) 3048 return (ENOSPC); 3049 3050 if (csa->ohds->ds_quota != 0 && 3051 csa->cds->ds_phys->ds_unique_bytes > csa->ohds->ds_quota) 3052 return (EDQUOT); 3053 3054 return (0); 3055 } 3056 3057 /* ARGSUSED */ 3058 static void 3059 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3060 { 3061 struct cloneswaparg *csa = arg1; 3062 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool; 3063 3064 ASSERT(csa->cds->ds_reserved == 0); 3065 ASSERT(csa->ohds->ds_quota == 0 || 3066 csa->cds->ds_phys->ds_unique_bytes <= csa->ohds->ds_quota); 3067 3068 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx); 3069 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx); 3070 3071 if (csa->cds->ds_objset != NULL) { 3072 dmu_objset_evict(csa->cds->ds_objset); 3073 csa->cds->ds_objset = NULL; 3074 } 3075 3076 if (csa->ohds->ds_objset != NULL) { 3077 dmu_objset_evict(csa->ohds->ds_objset); 3078 csa->ohds->ds_objset = NULL; 3079 } 3080 3081 /* 3082 * Reset origin's unique bytes, if it exists. 3083 */ 3084 if (csa->cds->ds_prev) { 3085 dsl_dataset_t *origin = csa->cds->ds_prev; 3086 uint64_t comp, uncomp; 3087 3088 dmu_buf_will_dirty(origin->ds_dbuf, tx); 3089 dsl_deadlist_space_range(&csa->cds->ds_deadlist, 3090 origin->ds_phys->ds_prev_snap_txg, UINT64_MAX, 3091 &origin->ds_phys->ds_unique_bytes, &comp, &uncomp); 3092 } 3093 3094 /* swap blkptrs */ 3095 { 3096 blkptr_t tmp; 3097 tmp = csa->ohds->ds_phys->ds_bp; 3098 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp; 3099 csa->cds->ds_phys->ds_bp = tmp; 3100 } 3101 3102 /* set dd_*_bytes */ 3103 { 3104 int64_t dused, dcomp, duncomp; 3105 uint64_t cdl_used, cdl_comp, cdl_uncomp; 3106 uint64_t odl_used, odl_comp, odl_uncomp; 3107 3108 ASSERT3U(csa->cds->ds_dir->dd_phys-> 3109 dd_used_breakdown[DD_USED_SNAP], ==, 0); 3110 3111 dsl_deadlist_space(&csa->cds->ds_deadlist, 3112 &cdl_used, &cdl_comp, &cdl_uncomp); 3113 dsl_deadlist_space(&csa->ohds->ds_deadlist, 3114 &odl_used, &odl_comp, &odl_uncomp); 3115 3116 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used - 3117 (csa->ohds->ds_phys->ds_used_bytes + odl_used); 3118 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp - 3119 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp); 3120 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes + 3121 cdl_uncomp - 3122 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp); 3123 3124 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_HEAD, 3125 dused, dcomp, duncomp, tx); 3126 dsl_dir_diduse_space(csa->cds->ds_dir, DD_USED_HEAD, 3127 -dused, -dcomp, -duncomp, tx); 3128 3129 /* 3130 * The difference in the space used by snapshots is the 3131 * difference in snapshot space due to the head's 3132 * deadlist (since that's the only thing that's 3133 * changing that affects the snapused). 3134 */ 3135 dsl_deadlist_space_range(&csa->cds->ds_deadlist, 3136 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, 3137 &cdl_used, &cdl_comp, &cdl_uncomp); 3138 dsl_deadlist_space_range(&csa->ohds->ds_deadlist, 3139 csa->ohds->ds_dir->dd_origin_txg, UINT64_MAX, 3140 &odl_used, &odl_comp, &odl_uncomp); 3141 dsl_dir_transfer_space(csa->ohds->ds_dir, cdl_used - odl_used, 3142 DD_USED_HEAD, DD_USED_SNAP, tx); 3143 } 3144 3145 /* swap ds_*_bytes */ 3146 SWITCH64(csa->ohds->ds_phys->ds_used_bytes, 3147 csa->cds->ds_phys->ds_used_bytes); 3148 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes, 3149 csa->cds->ds_phys->ds_compressed_bytes); 3150 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes, 3151 csa->cds->ds_phys->ds_uncompressed_bytes); 3152 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes, 3153 csa->cds->ds_phys->ds_unique_bytes); 3154 3155 /* apply any parent delta for change in unconsumed refreservation */ 3156 dsl_dir_diduse_space(csa->ohds->ds_dir, DD_USED_REFRSRV, 3157 csa->unused_refres_delta, 0, 0, tx); 3158 3159 /* 3160 * Swap deadlists. 3161 */ 3162 dsl_deadlist_close(&csa->cds->ds_deadlist); 3163 dsl_deadlist_close(&csa->ohds->ds_deadlist); 3164 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj, 3165 csa->cds->ds_phys->ds_deadlist_obj); 3166 dsl_deadlist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset, 3167 csa->cds->ds_phys->ds_deadlist_obj); 3168 dsl_deadlist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset, 3169 csa->ohds->ds_phys->ds_deadlist_obj); 3170 3171 dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx); 3172 } 3173 3174 /* 3175 * Swap 'clone' with its origin head datasets. Used at the end of "zfs 3176 * recv" into an existing fs to swizzle the file system to the new 3177 * version, and by "zfs rollback". Can also be used to swap two 3178 * independent head datasets if neither has any snapshots. 3179 */ 3180 int 3181 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, 3182 boolean_t force) 3183 { 3184 struct cloneswaparg csa; 3185 int error; 3186 3187 ASSERT(clone->ds_owner); 3188 ASSERT(origin_head->ds_owner); 3189 retry: 3190 /* 3191 * Need exclusive access for the swap. If we're swapping these 3192 * datasets back after an error, we already hold the locks. 3193 */ 3194 if (!RW_WRITE_HELD(&clone->ds_rwlock)) 3195 rw_enter(&clone->ds_rwlock, RW_WRITER); 3196 if (!RW_WRITE_HELD(&origin_head->ds_rwlock) && 3197 !rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) { 3198 rw_exit(&clone->ds_rwlock); 3199 rw_enter(&origin_head->ds_rwlock, RW_WRITER); 3200 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) { 3201 rw_exit(&origin_head->ds_rwlock); 3202 goto retry; 3203 } 3204 } 3205 csa.cds = clone; 3206 csa.ohds = origin_head; 3207 csa.force = force; 3208 error = dsl_sync_task_do(clone->ds_dir->dd_pool, 3209 dsl_dataset_clone_swap_check, 3210 dsl_dataset_clone_swap_sync, &csa, NULL, 9); 3211 return (error); 3212 } 3213 3214 /* 3215 * Given a pool name and a dataset object number in that pool, 3216 * return the name of that dataset. 3217 */ 3218 int 3219 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf) 3220 { 3221 spa_t *spa; 3222 dsl_pool_t *dp; 3223 dsl_dataset_t *ds; 3224 int error; 3225 3226 if ((error = spa_open(pname, &spa, FTAG)) != 0) 3227 return (error); 3228 dp = spa_get_dsl(spa); 3229 rw_enter(&dp->dp_config_rwlock, RW_READER); 3230 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) { 3231 dsl_dataset_name(ds, buf); 3232 dsl_dataset_rele(ds, FTAG); 3233 } 3234 rw_exit(&dp->dp_config_rwlock); 3235 spa_close(spa, FTAG); 3236 3237 return (error); 3238 } 3239 3240 int 3241 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, 3242 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv) 3243 { 3244 int error = 0; 3245 3246 ASSERT3S(asize, >, 0); 3247 3248 /* 3249 * *ref_rsrv is the portion of asize that will come from any 3250 * unconsumed refreservation space. 3251 */ 3252 *ref_rsrv = 0; 3253 3254 mutex_enter(&ds->ds_lock); 3255 /* 3256 * Make a space adjustment for reserved bytes. 3257 */ 3258 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) { 3259 ASSERT3U(*used, >=, 3260 ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 3261 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 3262 *ref_rsrv = 3263 asize - MIN(asize, parent_delta(ds, asize + inflight)); 3264 } 3265 3266 if (!check_quota || ds->ds_quota == 0) { 3267 mutex_exit(&ds->ds_lock); 3268 return (0); 3269 } 3270 /* 3271 * If they are requesting more space, and our current estimate 3272 * is over quota, they get to try again unless the actual 3273 * on-disk is over quota and there are no pending changes (which 3274 * may free up space for us). 3275 */ 3276 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) { 3277 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota) 3278 error = ERESTART; 3279 else 3280 error = EDQUOT; 3281 } 3282 mutex_exit(&ds->ds_lock); 3283 3284 return (error); 3285 } 3286 3287 /* ARGSUSED */ 3288 static int 3289 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) 3290 { 3291 dsl_dataset_t *ds = arg1; 3292 dsl_prop_setarg_t *psa = arg2; 3293 int err; 3294 3295 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA) 3296 return (ENOTSUP); 3297 3298 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0) 3299 return (err); 3300 3301 if (psa->psa_effective_value == 0) 3302 return (0); 3303 3304 if (psa->psa_effective_value < ds->ds_phys->ds_used_bytes || 3305 psa->psa_effective_value < ds->ds_reserved) 3306 return (ENOSPC); 3307 3308 return (0); 3309 } 3310 3311 extern void dsl_prop_set_sync(void *, void *, dmu_tx_t *); 3312 3313 void 3314 dsl_dataset_set_quota_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3315 { 3316 dsl_dataset_t *ds = arg1; 3317 dsl_prop_setarg_t *psa = arg2; 3318 uint64_t effective_value = psa->psa_effective_value; 3319 3320 dsl_prop_set_sync(ds, psa, tx); 3321 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa); 3322 3323 if (ds->ds_quota != effective_value) { 3324 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3325 ds->ds_quota = effective_value; 3326 3327 spa_history_log_internal(LOG_DS_REFQUOTA, 3328 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu ", 3329 (longlong_t)ds->ds_quota, ds->ds_object); 3330 } 3331 } 3332 3333 int 3334 dsl_dataset_set_quota(const char *dsname, zprop_source_t source, uint64_t quota) 3335 { 3336 dsl_dataset_t *ds; 3337 dsl_prop_setarg_t psa; 3338 int err; 3339 3340 dsl_prop_setarg_init_uint64(&psa, "refquota", source, "a); 3341 3342 err = dsl_dataset_hold(dsname, FTAG, &ds); 3343 if (err) 3344 return (err); 3345 3346 /* 3347 * If someone removes a file, then tries to set the quota, we 3348 * want to make sure the file freeing takes effect. 3349 */ 3350 txg_wait_open(ds->ds_dir->dd_pool, 0); 3351 3352 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 3353 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync, 3354 ds, &psa, 0); 3355 3356 dsl_dataset_rele(ds, FTAG); 3357 return (err); 3358 } 3359 3360 static int 3361 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) 3362 { 3363 dsl_dataset_t *ds = arg1; 3364 dsl_prop_setarg_t *psa = arg2; 3365 uint64_t effective_value; 3366 uint64_t unique; 3367 int err; 3368 3369 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < 3370 SPA_VERSION_REFRESERVATION) 3371 return (ENOTSUP); 3372 3373 if (dsl_dataset_is_snapshot(ds)) 3374 return (EINVAL); 3375 3376 if ((err = dsl_prop_predict_sync(ds->ds_dir, psa)) != 0) 3377 return (err); 3378 3379 effective_value = psa->psa_effective_value; 3380 3381 /* 3382 * If we are doing the preliminary check in open context, the 3383 * space estimates may be inaccurate. 3384 */ 3385 if (!dmu_tx_is_syncing(tx)) 3386 return (0); 3387 3388 mutex_enter(&ds->ds_lock); 3389 if (!DS_UNIQUE_IS_ACCURATE(ds)) 3390 dsl_dataset_recalc_head_uniq(ds); 3391 unique = ds->ds_phys->ds_unique_bytes; 3392 mutex_exit(&ds->ds_lock); 3393 3394 if (MAX(unique, effective_value) > MAX(unique, ds->ds_reserved)) { 3395 uint64_t delta = MAX(unique, effective_value) - 3396 MAX(unique, ds->ds_reserved); 3397 3398 if (delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) 3399 return (ENOSPC); 3400 if (ds->ds_quota > 0 && 3401 effective_value > ds->ds_quota) 3402 return (ENOSPC); 3403 } 3404 3405 return (0); 3406 } 3407 3408 static void 3409 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3410 { 3411 dsl_dataset_t *ds = arg1; 3412 dsl_prop_setarg_t *psa = arg2; 3413 uint64_t effective_value = psa->psa_effective_value; 3414 uint64_t unique; 3415 int64_t delta; 3416 3417 dsl_prop_set_sync(ds, psa, tx); 3418 DSL_PROP_CHECK_PREDICTION(ds->ds_dir, psa); 3419 3420 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3421 3422 mutex_enter(&ds->ds_dir->dd_lock); 3423 mutex_enter(&ds->ds_lock); 3424 ASSERT(DS_UNIQUE_IS_ACCURATE(ds)); 3425 unique = ds->ds_phys->ds_unique_bytes; 3426 delta = MAX(0, (int64_t)(effective_value - unique)) - 3427 MAX(0, (int64_t)(ds->ds_reserved - unique)); 3428 ds->ds_reserved = effective_value; 3429 mutex_exit(&ds->ds_lock); 3430 3431 dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx); 3432 mutex_exit(&ds->ds_dir->dd_lock); 3433 3434 spa_history_log_internal(LOG_DS_REFRESERV, 3435 ds->ds_dir->dd_pool->dp_spa, tx, "%lld dataset = %llu", 3436 (longlong_t)effective_value, ds->ds_object); 3437 } 3438 3439 int 3440 dsl_dataset_set_reservation(const char *dsname, zprop_source_t source, 3441 uint64_t reservation) 3442 { 3443 dsl_dataset_t *ds; 3444 dsl_prop_setarg_t psa; 3445 int err; 3446 3447 dsl_prop_setarg_init_uint64(&psa, "refreservation", source, 3448 &reservation); 3449 3450 err = dsl_dataset_hold(dsname, FTAG, &ds); 3451 if (err) 3452 return (err); 3453 3454 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 3455 dsl_dataset_set_reservation_check, 3456 dsl_dataset_set_reservation_sync, ds, &psa, 0); 3457 3458 dsl_dataset_rele(ds, FTAG); 3459 return (err); 3460 } 3461 3462 typedef struct zfs_hold_cleanup_arg { 3463 dsl_pool_t *dp; 3464 uint64_t dsobj; 3465 char htag[MAXNAMELEN]; 3466 } zfs_hold_cleanup_arg_t; 3467 3468 static void 3469 dsl_dataset_user_release_onexit(void *arg) 3470 { 3471 zfs_hold_cleanup_arg_t *ca = arg; 3472 3473 (void) dsl_dataset_user_release_tmp(ca->dp, ca->dsobj, ca->htag, 3474 B_TRUE); 3475 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t)); 3476 } 3477 3478 void 3479 dsl_register_onexit_hold_cleanup(dsl_dataset_t *ds, const char *htag, 3480 minor_t minor) 3481 { 3482 zfs_hold_cleanup_arg_t *ca; 3483 3484 ca = kmem_alloc(sizeof (zfs_hold_cleanup_arg_t), KM_SLEEP); 3485 ca->dp = ds->ds_dir->dd_pool; 3486 ca->dsobj = ds->ds_object; 3487 (void) strlcpy(ca->htag, htag, sizeof (ca->htag)); 3488 VERIFY3U(0, ==, zfs_onexit_add_cb(minor, 3489 dsl_dataset_user_release_onexit, ca, NULL)); 3490 } 3491 3492 /* 3493 * If you add new checks here, you may need to add 3494 * additional checks to the "temporary" case in 3495 * snapshot_check() in dmu_objset.c. 3496 */ 3497 static int 3498 dsl_dataset_user_hold_check(void *arg1, void *arg2, dmu_tx_t *tx) 3499 { 3500 dsl_dataset_t *ds = arg1; 3501 struct dsl_ds_holdarg *ha = arg2; 3502 char *htag = ha->htag; 3503 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 3504 int error = 0; 3505 3506 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS) 3507 return (ENOTSUP); 3508 3509 if (!dsl_dataset_is_snapshot(ds)) 3510 return (EINVAL); 3511 3512 /* tags must be unique */ 3513 mutex_enter(&ds->ds_lock); 3514 if (ds->ds_phys->ds_userrefs_obj) { 3515 error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj, htag, 3516 8, 1, tx); 3517 if (error == 0) 3518 error = EEXIST; 3519 else if (error == ENOENT) 3520 error = 0; 3521 } 3522 mutex_exit(&ds->ds_lock); 3523 3524 if (error == 0 && ha->temphold && 3525 strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN) 3526 error = E2BIG; 3527 3528 return (error); 3529 } 3530 3531 void 3532 dsl_dataset_user_hold_sync(void *arg1, void *arg2, dmu_tx_t *tx) 3533 { 3534 dsl_dataset_t *ds = arg1; 3535 struct dsl_ds_holdarg *ha = arg2; 3536 char *htag = ha->htag; 3537 dsl_pool_t *dp = ds->ds_dir->dd_pool; 3538 objset_t *mos = dp->dp_meta_objset; 3539 uint64_t now = gethrestime_sec(); 3540 uint64_t zapobj; 3541 3542 mutex_enter(&ds->ds_lock); 3543 if (ds->ds_phys->ds_userrefs_obj == 0) { 3544 /* 3545 * This is the first user hold for this dataset. Create 3546 * the userrefs zap object. 3547 */ 3548 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3549 zapobj = ds->ds_phys->ds_userrefs_obj = 3550 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx); 3551 } else { 3552 zapobj = ds->ds_phys->ds_userrefs_obj; 3553 } 3554 ds->ds_userrefs++; 3555 mutex_exit(&ds->ds_lock); 3556 3557 VERIFY(0 == zap_add(mos, zapobj, htag, 8, 1, &now, tx)); 3558 3559 if (ha->temphold) { 3560 VERIFY(0 == dsl_pool_user_hold(dp, ds->ds_object, 3561 htag, &now, tx)); 3562 } 3563 3564 spa_history_log_internal(LOG_DS_USER_HOLD, 3565 dp->dp_spa, tx, "<%s> temp = %d dataset = %llu", htag, 3566 (int)ha->temphold, ds->ds_object); 3567 } 3568 3569 static int 3570 dsl_dataset_user_hold_one(const char *dsname, void *arg) 3571 { 3572 struct dsl_ds_holdarg *ha = arg; 3573 dsl_dataset_t *ds; 3574 int error; 3575 char *name; 3576 3577 /* alloc a buffer to hold dsname@snapname plus terminating NULL */ 3578 name = kmem_asprintf("%s@%s", dsname, ha->snapname); 3579 error = dsl_dataset_hold(name, ha->dstg, &ds); 3580 strfree(name); 3581 if (error == 0) { 3582 ha->gotone = B_TRUE; 3583 dsl_sync_task_create(ha->dstg, dsl_dataset_user_hold_check, 3584 dsl_dataset_user_hold_sync, ds, ha, 0); 3585 } else if (error == ENOENT && ha->recursive) { 3586 error = 0; 3587 } else { 3588 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3589 } 3590 return (error); 3591 } 3592 3593 int 3594 dsl_dataset_user_hold_for_send(dsl_dataset_t *ds, char *htag, 3595 boolean_t temphold) 3596 { 3597 struct dsl_ds_holdarg *ha; 3598 int error; 3599 3600 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); 3601 ha->htag = htag; 3602 ha->temphold = temphold; 3603 error = dsl_sync_task_do(ds->ds_dir->dd_pool, 3604 dsl_dataset_user_hold_check, dsl_dataset_user_hold_sync, 3605 ds, ha, 0); 3606 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3607 3608 return (error); 3609 } 3610 3611 int 3612 dsl_dataset_user_hold(char *dsname, char *snapname, char *htag, 3613 boolean_t recursive, boolean_t temphold, int cleanup_fd) 3614 { 3615 struct dsl_ds_holdarg *ha; 3616 dsl_sync_task_t *dst; 3617 spa_t *spa; 3618 int error; 3619 minor_t minor = 0; 3620 3621 if (cleanup_fd != -1) { 3622 /* Currently we only support cleanup-on-exit of tempholds. */ 3623 if (!temphold) 3624 return (EINVAL); 3625 error = zfs_onexit_fd_hold(cleanup_fd, &minor); 3626 if (error) 3627 return (error); 3628 } 3629 3630 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); 3631 3632 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3633 3634 error = spa_open(dsname, &spa, FTAG); 3635 if (error) { 3636 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3637 if (cleanup_fd != -1) 3638 zfs_onexit_fd_rele(cleanup_fd); 3639 return (error); 3640 } 3641 3642 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 3643 ha->htag = htag; 3644 ha->snapname = snapname; 3645 ha->recursive = recursive; 3646 ha->temphold = temphold; 3647 3648 if (recursive) { 3649 error = dmu_objset_find(dsname, dsl_dataset_user_hold_one, 3650 ha, DS_FIND_CHILDREN); 3651 } else { 3652 error = dsl_dataset_user_hold_one(dsname, ha); 3653 } 3654 if (error == 0) 3655 error = dsl_sync_task_group_wait(ha->dstg); 3656 3657 for (dst = list_head(&ha->dstg->dstg_tasks); dst; 3658 dst = list_next(&ha->dstg->dstg_tasks, dst)) { 3659 dsl_dataset_t *ds = dst->dst_arg1; 3660 3661 if (dst->dst_err) { 3662 dsl_dataset_name(ds, ha->failed); 3663 *strchr(ha->failed, '@') = '\0'; 3664 } else if (error == 0 && minor != 0 && temphold) { 3665 /* 3666 * If this hold is to be released upon process exit, 3667 * register that action now. 3668 */ 3669 dsl_register_onexit_hold_cleanup(ds, htag, minor); 3670 } 3671 dsl_dataset_rele(ds, ha->dstg); 3672 } 3673 3674 if (error == 0 && recursive && !ha->gotone) 3675 error = ENOENT; 3676 3677 if (error) 3678 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed)); 3679 3680 dsl_sync_task_group_destroy(ha->dstg); 3681 3682 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3683 spa_close(spa, FTAG); 3684 if (cleanup_fd != -1) 3685 zfs_onexit_fd_rele(cleanup_fd); 3686 return (error); 3687 } 3688 3689 struct dsl_ds_releasearg { 3690 dsl_dataset_t *ds; 3691 const char *htag; 3692 boolean_t own; /* do we own or just hold ds? */ 3693 }; 3694 3695 static int 3696 dsl_dataset_release_might_destroy(dsl_dataset_t *ds, const char *htag, 3697 boolean_t *might_destroy) 3698 { 3699 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 3700 uint64_t zapobj; 3701 uint64_t tmp; 3702 int error; 3703 3704 *might_destroy = B_FALSE; 3705 3706 mutex_enter(&ds->ds_lock); 3707 zapobj = ds->ds_phys->ds_userrefs_obj; 3708 if (zapobj == 0) { 3709 /* The tag can't possibly exist */ 3710 mutex_exit(&ds->ds_lock); 3711 return (ESRCH); 3712 } 3713 3714 /* Make sure the tag exists */ 3715 error = zap_lookup(mos, zapobj, htag, 8, 1, &tmp); 3716 if (error) { 3717 mutex_exit(&ds->ds_lock); 3718 if (error == ENOENT) 3719 error = ESRCH; 3720 return (error); 3721 } 3722 3723 if (ds->ds_userrefs == 1 && ds->ds_phys->ds_num_children == 1 && 3724 DS_IS_DEFER_DESTROY(ds)) 3725 *might_destroy = B_TRUE; 3726 3727 mutex_exit(&ds->ds_lock); 3728 return (0); 3729 } 3730 3731 static int 3732 dsl_dataset_user_release_check(void *arg1, void *tag, dmu_tx_t *tx) 3733 { 3734 struct dsl_ds_releasearg *ra = arg1; 3735 dsl_dataset_t *ds = ra->ds; 3736 boolean_t might_destroy; 3737 int error; 3738 3739 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_USERREFS) 3740 return (ENOTSUP); 3741 3742 error = dsl_dataset_release_might_destroy(ds, ra->htag, &might_destroy); 3743 if (error) 3744 return (error); 3745 3746 if (might_destroy) { 3747 struct dsl_ds_destroyarg dsda = {0}; 3748 3749 if (dmu_tx_is_syncing(tx)) { 3750 /* 3751 * If we're not prepared to remove the snapshot, 3752 * we can't allow the release to happen right now. 3753 */ 3754 if (!ra->own) 3755 return (EBUSY); 3756 } 3757 dsda.ds = ds; 3758 dsda.releasing = B_TRUE; 3759 return (dsl_dataset_destroy_check(&dsda, tag, tx)); 3760 } 3761 3762 return (0); 3763 } 3764 3765 static void 3766 dsl_dataset_user_release_sync(void *arg1, void *tag, dmu_tx_t *tx) 3767 { 3768 struct dsl_ds_releasearg *ra = arg1; 3769 dsl_dataset_t *ds = ra->ds; 3770 dsl_pool_t *dp = ds->ds_dir->dd_pool; 3771 objset_t *mos = dp->dp_meta_objset; 3772 uint64_t zapobj; 3773 uint64_t dsobj = ds->ds_object; 3774 uint64_t refs; 3775 int error; 3776 3777 mutex_enter(&ds->ds_lock); 3778 ds->ds_userrefs--; 3779 refs = ds->ds_userrefs; 3780 mutex_exit(&ds->ds_lock); 3781 error = dsl_pool_user_release(dp, ds->ds_object, ra->htag, tx); 3782 VERIFY(error == 0 || error == ENOENT); 3783 zapobj = ds->ds_phys->ds_userrefs_obj; 3784 VERIFY(0 == zap_remove(mos, zapobj, ra->htag, tx)); 3785 if (ds->ds_userrefs == 0 && ds->ds_phys->ds_num_children == 1 && 3786 DS_IS_DEFER_DESTROY(ds)) { 3787 struct dsl_ds_destroyarg dsda = {0}; 3788 3789 ASSERT(ra->own); 3790 dsda.ds = ds; 3791 dsda.releasing = B_TRUE; 3792 /* We already did the destroy_check */ 3793 dsl_dataset_destroy_sync(&dsda, tag, tx); 3794 } 3795 3796 spa_history_log_internal(LOG_DS_USER_RELEASE, 3797 dp->dp_spa, tx, "<%s> %lld dataset = %llu", 3798 ra->htag, (longlong_t)refs, dsobj); 3799 } 3800 3801 static int 3802 dsl_dataset_user_release_one(const char *dsname, void *arg) 3803 { 3804 struct dsl_ds_holdarg *ha = arg; 3805 struct dsl_ds_releasearg *ra; 3806 dsl_dataset_t *ds; 3807 int error; 3808 void *dtag = ha->dstg; 3809 char *name; 3810 boolean_t own = B_FALSE; 3811 boolean_t might_destroy; 3812 3813 /* alloc a buffer to hold dsname@snapname, plus the terminating NULL */ 3814 name = kmem_asprintf("%s@%s", dsname, ha->snapname); 3815 error = dsl_dataset_hold(name, dtag, &ds); 3816 strfree(name); 3817 if (error == ENOENT && ha->recursive) 3818 return (0); 3819 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3820 if (error) 3821 return (error); 3822 3823 ha->gotone = B_TRUE; 3824 3825 ASSERT(dsl_dataset_is_snapshot(ds)); 3826 3827 error = dsl_dataset_release_might_destroy(ds, ha->htag, &might_destroy); 3828 if (error) { 3829 dsl_dataset_rele(ds, dtag); 3830 return (error); 3831 } 3832 3833 if (might_destroy) { 3834 #ifdef _KERNEL 3835 name = kmem_asprintf("%s@%s", dsname, ha->snapname); 3836 error = zfs_unmount_snap(name, NULL); 3837 strfree(name); 3838 if (error) { 3839 dsl_dataset_rele(ds, dtag); 3840 return (error); 3841 } 3842 #endif 3843 if (!dsl_dataset_tryown(ds, B_TRUE, dtag)) { 3844 dsl_dataset_rele(ds, dtag); 3845 return (EBUSY); 3846 } else { 3847 own = B_TRUE; 3848 dsl_dataset_make_exclusive(ds, dtag); 3849 } 3850 } 3851 3852 ra = kmem_alloc(sizeof (struct dsl_ds_releasearg), KM_SLEEP); 3853 ra->ds = ds; 3854 ra->htag = ha->htag; 3855 ra->own = own; 3856 dsl_sync_task_create(ha->dstg, dsl_dataset_user_release_check, 3857 dsl_dataset_user_release_sync, ra, dtag, 0); 3858 3859 return (0); 3860 } 3861 3862 int 3863 dsl_dataset_user_release(char *dsname, char *snapname, char *htag, 3864 boolean_t recursive) 3865 { 3866 struct dsl_ds_holdarg *ha; 3867 dsl_sync_task_t *dst; 3868 spa_t *spa; 3869 int error; 3870 3871 top: 3872 ha = kmem_zalloc(sizeof (struct dsl_ds_holdarg), KM_SLEEP); 3873 3874 (void) strlcpy(ha->failed, dsname, sizeof (ha->failed)); 3875 3876 error = spa_open(dsname, &spa, FTAG); 3877 if (error) { 3878 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3879 return (error); 3880 } 3881 3882 ha->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 3883 ha->htag = htag; 3884 ha->snapname = snapname; 3885 ha->recursive = recursive; 3886 if (recursive) { 3887 error = dmu_objset_find(dsname, dsl_dataset_user_release_one, 3888 ha, DS_FIND_CHILDREN); 3889 } else { 3890 error = dsl_dataset_user_release_one(dsname, ha); 3891 } 3892 if (error == 0) 3893 error = dsl_sync_task_group_wait(ha->dstg); 3894 3895 for (dst = list_head(&ha->dstg->dstg_tasks); dst; 3896 dst = list_next(&ha->dstg->dstg_tasks, dst)) { 3897 struct dsl_ds_releasearg *ra = dst->dst_arg1; 3898 dsl_dataset_t *ds = ra->ds; 3899 3900 if (dst->dst_err) 3901 dsl_dataset_name(ds, ha->failed); 3902 3903 if (ra->own) 3904 dsl_dataset_disown(ds, ha->dstg); 3905 else 3906 dsl_dataset_rele(ds, ha->dstg); 3907 3908 kmem_free(ra, sizeof (struct dsl_ds_releasearg)); 3909 } 3910 3911 if (error == 0 && recursive && !ha->gotone) 3912 error = ENOENT; 3913 3914 if (error && error != EBUSY) 3915 (void) strlcpy(dsname, ha->failed, sizeof (ha->failed)); 3916 3917 dsl_sync_task_group_destroy(ha->dstg); 3918 kmem_free(ha, sizeof (struct dsl_ds_holdarg)); 3919 spa_close(spa, FTAG); 3920 3921 /* 3922 * We can get EBUSY if we were racing with deferred destroy and 3923 * dsl_dataset_user_release_check() hadn't done the necessary 3924 * open context setup. We can also get EBUSY if we're racing 3925 * with destroy and that thread is the ds_owner. Either way 3926 * the busy condition should be transient, and we should retry 3927 * the release operation. 3928 */ 3929 if (error == EBUSY) 3930 goto top; 3931 3932 return (error); 3933 } 3934 3935 /* 3936 * Called at spa_load time (with retry == B_FALSE) to release a stale 3937 * temporary user hold. Also called by the onexit code (with retry == B_TRUE). 3938 */ 3939 int 3940 dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag, 3941 boolean_t retry) 3942 { 3943 dsl_dataset_t *ds; 3944 char *snap; 3945 char *name; 3946 int namelen; 3947 int error; 3948 3949 do { 3950 rw_enter(&dp->dp_config_rwlock, RW_READER); 3951 error = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 3952 rw_exit(&dp->dp_config_rwlock); 3953 if (error) 3954 return (error); 3955 namelen = dsl_dataset_namelen(ds)+1; 3956 name = kmem_alloc(namelen, KM_SLEEP); 3957 dsl_dataset_name(ds, name); 3958 dsl_dataset_rele(ds, FTAG); 3959 3960 snap = strchr(name, '@'); 3961 *snap = '\0'; 3962 ++snap; 3963 error = dsl_dataset_user_release(name, snap, htag, B_FALSE); 3964 kmem_free(name, namelen); 3965 3966 /* 3967 * The object can't have been destroyed because we have a hold, 3968 * but it might have been renamed, resulting in ENOENT. Retry 3969 * if we've been requested to do so. 3970 * 3971 * It would be nice if we could use the dsobj all the way 3972 * through and avoid ENOENT entirely. But we might need to 3973 * unmount the snapshot, and there's currently no way to lookup 3974 * a vfsp using a ZFS object id. 3975 */ 3976 } while ((error == ENOENT) && retry); 3977 3978 return (error); 3979 } 3980 3981 int 3982 dsl_dataset_get_holds(const char *dsname, nvlist_t **nvp) 3983 { 3984 dsl_dataset_t *ds; 3985 int err; 3986 3987 err = dsl_dataset_hold(dsname, FTAG, &ds); 3988 if (err) 3989 return (err); 3990 3991 VERIFY(0 == nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP)); 3992 if (ds->ds_phys->ds_userrefs_obj != 0) { 3993 zap_attribute_t *za; 3994 zap_cursor_t zc; 3995 3996 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); 3997 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset, 3998 ds->ds_phys->ds_userrefs_obj); 3999 zap_cursor_retrieve(&zc, za) == 0; 4000 zap_cursor_advance(&zc)) { 4001 VERIFY(0 == nvlist_add_uint64(*nvp, za->za_name, 4002 za->za_first_integer)); 4003 } 4004 zap_cursor_fini(&zc); 4005 kmem_free(za, sizeof (zap_attribute_t)); 4006 } 4007 dsl_dataset_rele(ds, FTAG); 4008 return (0); 4009 } 4010 4011 /* 4012 * Note, this fuction is used as the callback for dmu_objset_find(). We 4013 * always return 0 so that we will continue to find and process 4014 * inconsistent datasets, even if we encounter an error trying to 4015 * process one of them. 4016 */ 4017 /* ARGSUSED */ 4018 int 4019 dsl_destroy_inconsistent(const char *dsname, void *arg) 4020 { 4021 dsl_dataset_t *ds; 4022 4023 if (dsl_dataset_own(dsname, B_TRUE, FTAG, &ds) == 0) { 4024 if (DS_IS_INCONSISTENT(ds)) 4025 (void) dsl_dataset_destroy(ds, FTAG, B_FALSE); 4026 else 4027 dsl_dataset_disown(ds, FTAG); 4028 } 4029 return (0); 4030 } 4031