1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/dmu_objset.h> 29 #include <sys/dsl_dataset.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/dsl_prop.h> 32 #include <sys/dsl_synctask.h> 33 #include <sys/dmu_traverse.h> 34 #include <sys/dmu_tx.h> 35 #include <sys/arc.h> 36 #include <sys/zio.h> 37 #include <sys/zap.h> 38 #include <sys/unique.h> 39 #include <sys/zfs_context.h> 40 #include <sys/zfs_ioctl.h> 41 #include <sys/spa.h> 42 #include <sys/zfs_znode.h> 43 #include <sys/sunddi.h> 44 45 static char *dsl_reaper = "the grim reaper"; 46 47 static dsl_checkfunc_t dsl_dataset_destroy_begin_check; 48 static dsl_syncfunc_t dsl_dataset_destroy_begin_sync; 49 static dsl_checkfunc_t dsl_dataset_rollback_check; 50 static dsl_syncfunc_t dsl_dataset_rollback_sync; 51 static dsl_syncfunc_t dsl_dataset_set_reservation_sync; 52 53 #define DS_REF_MAX (1ULL << 62) 54 55 #define DSL_DEADLIST_BLOCKSIZE SPA_MAXBLOCKSIZE 56 57 #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper) 58 59 60 /* 61 * Figure out how much of this delta should be propogated to the dsl_dir 62 * layer. If there's a refreservation, that space has already been 63 * partially accounted for in our ancestors. 64 */ 65 static int64_t 66 parent_delta(dsl_dataset_t *ds, int64_t delta) 67 { 68 uint64_t old_bytes, new_bytes; 69 70 if (ds->ds_reserved == 0) 71 return (delta); 72 73 old_bytes = MAX(ds->ds_phys->ds_unique_bytes, ds->ds_reserved); 74 new_bytes = MAX(ds->ds_phys->ds_unique_bytes + delta, ds->ds_reserved); 75 76 ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta)); 77 return (new_bytes - old_bytes); 78 } 79 80 void 81 dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) 82 { 83 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp); 84 int compressed = BP_GET_PSIZE(bp); 85 int uncompressed = BP_GET_UCSIZE(bp); 86 int64_t delta; 87 88 dprintf_bp(bp, "born, ds=%p\n", ds); 89 90 ASSERT(dmu_tx_is_syncing(tx)); 91 /* It could have been compressed away to nothing */ 92 if (BP_IS_HOLE(bp)) 93 return; 94 ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE); 95 ASSERT3U(BP_GET_TYPE(bp), <, DMU_OT_NUMTYPES); 96 if (ds == NULL) { 97 /* 98 * Account for the meta-objset space in its placeholder 99 * dsl_dir. 100 */ 101 ASSERT3U(compressed, ==, uncompressed); /* it's all metadata */ 102 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, 103 used, compressed, uncompressed, tx); 104 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 105 return; 106 } 107 dmu_buf_will_dirty(ds->ds_dbuf, tx); 108 mutex_enter(&ds->ds_lock); 109 delta = parent_delta(ds, used); 110 ds->ds_phys->ds_used_bytes += used; 111 ds->ds_phys->ds_compressed_bytes += compressed; 112 ds->ds_phys->ds_uncompressed_bytes += uncompressed; 113 ds->ds_phys->ds_unique_bytes += used; 114 mutex_exit(&ds->ds_lock); 115 dsl_dir_diduse_space(ds->ds_dir, delta, compressed, uncompressed, tx); 116 } 117 118 int 119 dsl_dataset_block_kill(dsl_dataset_t *ds, blkptr_t *bp, zio_t *pio, 120 dmu_tx_t *tx) 121 { 122 int used = bp_get_dasize(tx->tx_pool->dp_spa, bp); 123 int compressed = BP_GET_PSIZE(bp); 124 int uncompressed = BP_GET_UCSIZE(bp); 125 126 ASSERT(dmu_tx_is_syncing(tx)); 127 /* No block pointer => nothing to free */ 128 if (BP_IS_HOLE(bp)) 129 return (0); 130 131 ASSERT(used > 0); 132 if (ds == NULL) { 133 int err; 134 /* 135 * Account for the meta-objset space in its placeholder 136 * dataset. 137 */ 138 err = dsl_free(pio, tx->tx_pool, 139 tx->tx_txg, bp, NULL, NULL, pio ? ARC_NOWAIT: ARC_WAIT); 140 ASSERT(err == 0); 141 142 dsl_dir_diduse_space(tx->tx_pool->dp_mos_dir, 143 -used, -compressed, -uncompressed, tx); 144 dsl_dir_dirty(tx->tx_pool->dp_mos_dir, tx); 145 return (used); 146 } 147 ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool); 148 149 dmu_buf_will_dirty(ds->ds_dbuf, tx); 150 151 if (bp->blk_birth > ds->ds_phys->ds_prev_snap_txg) { 152 int err; 153 int64_t delta; 154 155 dprintf_bp(bp, "freeing: %s", ""); 156 err = dsl_free(pio, tx->tx_pool, 157 tx->tx_txg, bp, NULL, NULL, pio ? ARC_NOWAIT: ARC_WAIT); 158 ASSERT(err == 0); 159 160 mutex_enter(&ds->ds_lock); 161 ASSERT(ds->ds_phys->ds_unique_bytes >= used || 162 !DS_UNIQUE_IS_ACCURATE(ds)); 163 delta = parent_delta(ds, -used); 164 ds->ds_phys->ds_unique_bytes -= used; 165 mutex_exit(&ds->ds_lock); 166 dsl_dir_diduse_space(ds->ds_dir, 167 delta, -compressed, -uncompressed, tx); 168 } else { 169 dprintf_bp(bp, "putting on dead list: %s", ""); 170 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, bp, tx)); 171 ASSERT3U(ds->ds_prev->ds_object, ==, 172 ds->ds_phys->ds_prev_snap_obj); 173 ASSERT(ds->ds_prev->ds_phys->ds_num_children > 0); 174 /* if (bp->blk_birth > prev prev snap txg) prev unique += bs */ 175 if (ds->ds_prev->ds_phys->ds_next_snap_obj == 176 ds->ds_object && bp->blk_birth > 177 ds->ds_prev->ds_phys->ds_prev_snap_txg) { 178 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 179 mutex_enter(&ds->ds_prev->ds_lock); 180 ds->ds_prev->ds_phys->ds_unique_bytes += used; 181 mutex_exit(&ds->ds_prev->ds_lock); 182 } 183 } 184 mutex_enter(&ds->ds_lock); 185 ASSERT3U(ds->ds_phys->ds_used_bytes, >=, used); 186 ds->ds_phys->ds_used_bytes -= used; 187 ASSERT3U(ds->ds_phys->ds_compressed_bytes, >=, compressed); 188 ds->ds_phys->ds_compressed_bytes -= compressed; 189 ASSERT3U(ds->ds_phys->ds_uncompressed_bytes, >=, uncompressed); 190 ds->ds_phys->ds_uncompressed_bytes -= uncompressed; 191 mutex_exit(&ds->ds_lock); 192 193 return (used); 194 } 195 196 uint64_t 197 dsl_dataset_prev_snap_txg(dsl_dataset_t *ds) 198 { 199 uint64_t trysnap = 0; 200 201 if (ds == NULL) 202 return (0); 203 /* 204 * The snapshot creation could fail, but that would cause an 205 * incorrect FALSE return, which would only result in an 206 * overestimation of the amount of space that an operation would 207 * consume, which is OK. 208 * 209 * There's also a small window where we could miss a pending 210 * snapshot, because we could set the sync task in the quiescing 211 * phase. So this should only be used as a guess. 212 */ 213 if (ds->ds_trysnap_txg > 214 spa_last_synced_txg(ds->ds_dir->dd_pool->dp_spa)) 215 trysnap = ds->ds_trysnap_txg; 216 return (MAX(ds->ds_phys->ds_prev_snap_txg, trysnap)); 217 } 218 219 int 220 dsl_dataset_block_freeable(dsl_dataset_t *ds, uint64_t blk_birth) 221 { 222 return (blk_birth > dsl_dataset_prev_snap_txg(ds)); 223 } 224 225 /* ARGSUSED */ 226 static void 227 dsl_dataset_evict(dmu_buf_t *db, void *dsv) 228 { 229 dsl_dataset_t *ds = dsv; 230 231 ASSERT(ds->ds_owner == NULL || DSL_DATASET_IS_DESTROYED(ds)); 232 233 dprintf_ds(ds, "evicting %s\n", ""); 234 235 unique_remove(ds->ds_fsid_guid); 236 237 if (ds->ds_user_ptr != NULL) 238 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 239 240 if (ds->ds_prev) { 241 dsl_dataset_drop_ref(ds->ds_prev, ds); 242 ds->ds_prev = NULL; 243 } 244 245 bplist_close(&ds->ds_deadlist); 246 if (ds->ds_dir) 247 dsl_dir_close(ds->ds_dir, ds); 248 249 ASSERT(!list_link_active(&ds->ds_synced_link)); 250 251 mutex_destroy(&ds->ds_lock); 252 mutex_destroy(&ds->ds_opening_lock); 253 mutex_destroy(&ds->ds_deadlist.bpl_lock); 254 rw_destroy(&ds->ds_rwlock); 255 cv_destroy(&ds->ds_exclusive_cv); 256 257 kmem_free(ds, sizeof (dsl_dataset_t)); 258 } 259 260 static int 261 dsl_dataset_get_snapname(dsl_dataset_t *ds) 262 { 263 dsl_dataset_phys_t *headphys; 264 int err; 265 dmu_buf_t *headdbuf; 266 dsl_pool_t *dp = ds->ds_dir->dd_pool; 267 objset_t *mos = dp->dp_meta_objset; 268 269 if (ds->ds_snapname[0]) 270 return (0); 271 if (ds->ds_phys->ds_next_snap_obj == 0) 272 return (0); 273 274 err = dmu_bonus_hold(mos, ds->ds_dir->dd_phys->dd_head_dataset_obj, 275 FTAG, &headdbuf); 276 if (err) 277 return (err); 278 headphys = headdbuf->db_data; 279 err = zap_value_search(dp->dp_meta_objset, 280 headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname); 281 dmu_buf_rele(headdbuf, FTAG); 282 return (err); 283 } 284 285 static int 286 dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value) 287 { 288 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 289 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 290 matchtype_t mt; 291 int err; 292 293 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 294 mt = MT_FIRST; 295 else 296 mt = MT_EXACT; 297 298 err = zap_lookup_norm(mos, snapobj, name, 8, 1, 299 value, mt, NULL, 0, NULL); 300 if (err == ENOTSUP && mt == MT_FIRST) 301 err = zap_lookup(mos, snapobj, name, 8, 1, value); 302 return (err); 303 } 304 305 static int 306 dsl_dataset_snap_remove(dsl_dataset_t *ds, char *name, dmu_tx_t *tx) 307 { 308 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 309 uint64_t snapobj = ds->ds_phys->ds_snapnames_zapobj; 310 matchtype_t mt; 311 int err; 312 313 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 314 mt = MT_FIRST; 315 else 316 mt = MT_EXACT; 317 318 err = zap_remove_norm(mos, snapobj, name, mt, tx); 319 if (err == ENOTSUP && mt == MT_FIRST) 320 err = zap_remove(mos, snapobj, name, tx); 321 return (err); 322 } 323 324 static int 325 dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag, 326 dsl_dataset_t **dsp) 327 { 328 objset_t *mos = dp->dp_meta_objset; 329 dmu_buf_t *dbuf; 330 dsl_dataset_t *ds; 331 int err; 332 333 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 334 dsl_pool_sync_context(dp)); 335 336 err = dmu_bonus_hold(mos, dsobj, tag, &dbuf); 337 if (err) 338 return (err); 339 ds = dmu_buf_get_user(dbuf); 340 if (ds == NULL) { 341 dsl_dataset_t *winner; 342 343 ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP); 344 ds->ds_dbuf = dbuf; 345 ds->ds_object = dsobj; 346 ds->ds_phys = dbuf->db_data; 347 348 mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL); 349 mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL); 350 mutex_init(&ds->ds_deadlist.bpl_lock, NULL, MUTEX_DEFAULT, 351 NULL); 352 rw_init(&ds->ds_rwlock, 0, 0, 0); 353 cv_init(&ds->ds_exclusive_cv, NULL, CV_DEFAULT, NULL); 354 355 err = bplist_open(&ds->ds_deadlist, 356 mos, ds->ds_phys->ds_deadlist_obj); 357 if (err == 0) { 358 err = dsl_dir_open_obj(dp, 359 ds->ds_phys->ds_dir_obj, NULL, ds, &ds->ds_dir); 360 } 361 if (err) { 362 /* 363 * we don't really need to close the blist if we 364 * just opened it. 365 */ 366 mutex_destroy(&ds->ds_lock); 367 mutex_destroy(&ds->ds_opening_lock); 368 mutex_destroy(&ds->ds_deadlist.bpl_lock); 369 rw_destroy(&ds->ds_rwlock); 370 cv_destroy(&ds->ds_exclusive_cv); 371 kmem_free(ds, sizeof (dsl_dataset_t)); 372 dmu_buf_rele(dbuf, tag); 373 return (err); 374 } 375 376 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == dsobj) { 377 ds->ds_snapname[0] = '\0'; 378 if (ds->ds_phys->ds_prev_snap_obj) { 379 err = dsl_dataset_get_ref(dp, 380 ds->ds_phys->ds_prev_snap_obj, 381 ds, &ds->ds_prev); 382 } 383 } else if (zfs_flags & ZFS_DEBUG_SNAPNAMES) { 384 err = dsl_dataset_get_snapname(ds); 385 } 386 387 if (!dsl_dataset_is_snapshot(ds)) { 388 /* 389 * In sync context, we're called with either no lock 390 * or with the write lock. If we're not syncing, 391 * we're always called with the read lock held. 392 */ 393 boolean_t need_lock = 394 !RW_WRITE_HELD(&dp->dp_config_rwlock) && 395 dsl_pool_sync_context(dp); 396 397 if (need_lock) 398 rw_enter(&dp->dp_config_rwlock, RW_READER); 399 400 err = dsl_prop_get_ds_locked(ds->ds_dir, 401 "refreservation", sizeof (uint64_t), 1, 402 &ds->ds_reserved, NULL); 403 if (err == 0) { 404 err = dsl_prop_get_ds_locked(ds->ds_dir, 405 "refquota", sizeof (uint64_t), 1, 406 &ds->ds_quota, NULL); 407 } 408 409 if (need_lock) 410 rw_exit(&dp->dp_config_rwlock); 411 } else { 412 ds->ds_reserved = ds->ds_quota = 0; 413 } 414 415 if (err == 0) { 416 winner = dmu_buf_set_user_ie(dbuf, ds, &ds->ds_phys, 417 dsl_dataset_evict); 418 } 419 if (err || winner) { 420 bplist_close(&ds->ds_deadlist); 421 if (ds->ds_prev) 422 dsl_dataset_drop_ref(ds->ds_prev, ds); 423 dsl_dir_close(ds->ds_dir, ds); 424 mutex_destroy(&ds->ds_lock); 425 mutex_destroy(&ds->ds_opening_lock); 426 mutex_destroy(&ds->ds_deadlist.bpl_lock); 427 rw_destroy(&ds->ds_rwlock); 428 cv_destroy(&ds->ds_exclusive_cv); 429 kmem_free(ds, sizeof (dsl_dataset_t)); 430 if (err) { 431 dmu_buf_rele(dbuf, tag); 432 return (err); 433 } 434 ds = winner; 435 } else { 436 ds->ds_fsid_guid = 437 unique_insert(ds->ds_phys->ds_fsid_guid); 438 } 439 } 440 ASSERT3P(ds->ds_dbuf, ==, dbuf); 441 ASSERT3P(ds->ds_phys, ==, dbuf->db_data); 442 ASSERT(ds->ds_phys->ds_prev_snap_obj != 0 || 443 spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN || 444 dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap); 445 mutex_enter(&ds->ds_lock); 446 if (!dsl_pool_sync_context(dp) && DSL_DATASET_IS_DESTROYED(ds)) { 447 mutex_exit(&ds->ds_lock); 448 dmu_buf_rele(ds->ds_dbuf, tag); 449 return (ENOENT); 450 } 451 mutex_exit(&ds->ds_lock); 452 *dsp = ds; 453 return (0); 454 } 455 456 static int 457 dsl_dataset_hold_ref(dsl_dataset_t *ds, void *tag) 458 { 459 dsl_pool_t *dp = ds->ds_dir->dd_pool; 460 461 /* 462 * In syncing context we don't want the rwlock lock: there 463 * may be an existing writer waiting for sync phase to 464 * finish. We don't need to worry about such writers, since 465 * sync phase is single-threaded, so the writer can't be 466 * doing anything while we are active. 467 */ 468 if (dsl_pool_sync_context(dp)) { 469 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 470 return (0); 471 } 472 473 /* 474 * Normal users will hold the ds_rwlock as a READER until they 475 * are finished (i.e., call dsl_dataset_rele()). "Owners" will 476 * drop their READER lock after they set the ds_owner field. 477 * 478 * If the dataset is being destroyed, the destroy thread will 479 * obtain a WRITER lock for exclusive access after it's done its 480 * open-context work and then change the ds_owner to 481 * dsl_reaper once destruction is assured. So threads 482 * may block here temporarily, until the "destructability" of 483 * the dataset is determined. 484 */ 485 ASSERT(!RW_WRITE_HELD(&dp->dp_config_rwlock)); 486 mutex_enter(&ds->ds_lock); 487 while (!rw_tryenter(&ds->ds_rwlock, RW_READER)) { 488 rw_exit(&dp->dp_config_rwlock); 489 cv_wait(&ds->ds_exclusive_cv, &ds->ds_lock); 490 if (DSL_DATASET_IS_DESTROYED(ds)) { 491 mutex_exit(&ds->ds_lock); 492 dsl_dataset_drop_ref(ds, tag); 493 rw_enter(&dp->dp_config_rwlock, RW_READER); 494 return (ENOENT); 495 } 496 rw_enter(&dp->dp_config_rwlock, RW_READER); 497 } 498 mutex_exit(&ds->ds_lock); 499 return (0); 500 } 501 502 int 503 dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag, 504 dsl_dataset_t **dsp) 505 { 506 int err = dsl_dataset_get_ref(dp, dsobj, tag, dsp); 507 508 if (err) 509 return (err); 510 return (dsl_dataset_hold_ref(*dsp, tag)); 511 } 512 513 int 514 dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, int flags, void *owner, 515 dsl_dataset_t **dsp) 516 { 517 int err = dsl_dataset_hold_obj(dp, dsobj, owner, dsp); 518 519 ASSERT(DS_MODE_TYPE(flags) != DS_MODE_USER); 520 521 if (err) 522 return (err); 523 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) { 524 dsl_dataset_rele(*dsp, owner); 525 return (EBUSY); 526 } 527 return (0); 528 } 529 530 int 531 dsl_dataset_hold(const char *name, void *tag, dsl_dataset_t **dsp) 532 { 533 dsl_dir_t *dd; 534 dsl_pool_t *dp; 535 const char *snapname; 536 uint64_t obj; 537 int err = 0; 538 539 err = dsl_dir_open_spa(NULL, name, FTAG, &dd, &snapname); 540 if (err) 541 return (err); 542 543 dp = dd->dd_pool; 544 obj = dd->dd_phys->dd_head_dataset_obj; 545 rw_enter(&dp->dp_config_rwlock, RW_READER); 546 if (obj) 547 err = dsl_dataset_get_ref(dp, obj, tag, dsp); 548 else 549 err = ENOENT; 550 if (err) 551 goto out; 552 553 err = dsl_dataset_hold_ref(*dsp, tag); 554 555 /* we may be looking for a snapshot */ 556 if (err == 0 && snapname != NULL) { 557 dsl_dataset_t *ds = NULL; 558 559 if (*snapname++ != '@') { 560 dsl_dataset_rele(*dsp, tag); 561 err = ENOENT; 562 goto out; 563 } 564 565 dprintf("looking for snapshot '%s'\n", snapname); 566 err = dsl_dataset_snap_lookup(*dsp, snapname, &obj); 567 if (err == 0) 568 err = dsl_dataset_get_ref(dp, obj, tag, &ds); 569 dsl_dataset_rele(*dsp, tag); 570 571 ASSERT3U((err == 0), ==, (ds != NULL)); 572 573 if (ds) { 574 mutex_enter(&ds->ds_lock); 575 if (ds->ds_snapname[0] == 0) 576 (void) strlcpy(ds->ds_snapname, snapname, 577 sizeof (ds->ds_snapname)); 578 mutex_exit(&ds->ds_lock); 579 err = dsl_dataset_hold_ref(ds, tag); 580 *dsp = err ? NULL : ds; 581 } 582 } 583 out: 584 rw_exit(&dp->dp_config_rwlock); 585 dsl_dir_close(dd, FTAG); 586 return (err); 587 } 588 589 int 590 dsl_dataset_own(const char *name, int flags, void *owner, dsl_dataset_t **dsp) 591 { 592 int err = dsl_dataset_hold(name, owner, dsp); 593 if (err) 594 return (err); 595 if ((*dsp)->ds_phys->ds_num_children > 0 && 596 !DS_MODE_IS_READONLY(flags)) { 597 dsl_dataset_rele(*dsp, owner); 598 return (EROFS); 599 } 600 if (!dsl_dataset_tryown(*dsp, DS_MODE_IS_INCONSISTENT(flags), owner)) { 601 dsl_dataset_rele(*dsp, owner); 602 return (EBUSY); 603 } 604 return (0); 605 } 606 607 void 608 dsl_dataset_name(dsl_dataset_t *ds, char *name) 609 { 610 if (ds == NULL) { 611 (void) strcpy(name, "mos"); 612 } else { 613 dsl_dir_name(ds->ds_dir, name); 614 VERIFY(0 == dsl_dataset_get_snapname(ds)); 615 if (ds->ds_snapname[0]) { 616 (void) strcat(name, "@"); 617 /* 618 * We use a "recursive" mutex so that we 619 * can call dprintf_ds() with ds_lock held. 620 */ 621 if (!MUTEX_HELD(&ds->ds_lock)) { 622 mutex_enter(&ds->ds_lock); 623 (void) strcat(name, ds->ds_snapname); 624 mutex_exit(&ds->ds_lock); 625 } else { 626 (void) strcat(name, ds->ds_snapname); 627 } 628 } 629 } 630 } 631 632 static int 633 dsl_dataset_namelen(dsl_dataset_t *ds) 634 { 635 int result; 636 637 if (ds == NULL) { 638 result = 3; /* "mos" */ 639 } else { 640 result = dsl_dir_namelen(ds->ds_dir); 641 VERIFY(0 == dsl_dataset_get_snapname(ds)); 642 if (ds->ds_snapname[0]) { 643 ++result; /* adding one for the @-sign */ 644 if (!MUTEX_HELD(&ds->ds_lock)) { 645 mutex_enter(&ds->ds_lock); 646 result += strlen(ds->ds_snapname); 647 mutex_exit(&ds->ds_lock); 648 } else { 649 result += strlen(ds->ds_snapname); 650 } 651 } 652 } 653 654 return (result); 655 } 656 657 void 658 dsl_dataset_drop_ref(dsl_dataset_t *ds, void *tag) 659 { 660 dmu_buf_rele(ds->ds_dbuf, tag); 661 } 662 663 void 664 dsl_dataset_rele(dsl_dataset_t *ds, void *tag) 665 { 666 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) { 667 rw_exit(&ds->ds_rwlock); 668 } 669 dsl_dataset_drop_ref(ds, tag); 670 } 671 672 void 673 dsl_dataset_disown(dsl_dataset_t *ds, void *owner) 674 { 675 ASSERT((ds->ds_owner == owner && ds->ds_dbuf) || 676 (DSL_DATASET_IS_DESTROYED(ds) && ds->ds_dbuf == NULL)); 677 678 mutex_enter(&ds->ds_lock); 679 ds->ds_owner = NULL; 680 if (RW_WRITE_HELD(&ds->ds_rwlock)) { 681 rw_exit(&ds->ds_rwlock); 682 cv_broadcast(&ds->ds_exclusive_cv); 683 } 684 mutex_exit(&ds->ds_lock); 685 if (ds->ds_dbuf) 686 dsl_dataset_drop_ref(ds, owner); 687 else 688 dsl_dataset_evict(ds->ds_dbuf, ds); 689 } 690 691 boolean_t 692 dsl_dataset_tryown(dsl_dataset_t *ds, boolean_t inconsistentok, void *owner) 693 { 694 boolean_t gotit = FALSE; 695 696 mutex_enter(&ds->ds_lock); 697 if (ds->ds_owner == NULL && 698 (!DS_IS_INCONSISTENT(ds) || inconsistentok)) { 699 ds->ds_owner = owner; 700 if (!dsl_pool_sync_context(ds->ds_dir->dd_pool)) 701 rw_exit(&ds->ds_rwlock); 702 gotit = TRUE; 703 } 704 mutex_exit(&ds->ds_lock); 705 return (gotit); 706 } 707 708 void 709 dsl_dataset_make_exclusive(dsl_dataset_t *ds, void *owner) 710 { 711 ASSERT3P(owner, ==, ds->ds_owner); 712 if (!RW_WRITE_HELD(&ds->ds_rwlock)) 713 rw_enter(&ds->ds_rwlock, RW_WRITER); 714 } 715 716 uint64_t 717 dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin, 718 uint64_t flags, dmu_tx_t *tx) 719 { 720 dsl_pool_t *dp = dd->dd_pool; 721 dmu_buf_t *dbuf; 722 dsl_dataset_phys_t *dsphys; 723 uint64_t dsobj; 724 objset_t *mos = dp->dp_meta_objset; 725 726 if (origin == NULL) 727 origin = dp->dp_origin_snap; 728 729 ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp); 730 ASSERT(origin == NULL || origin->ds_phys->ds_num_children > 0); 731 ASSERT(dmu_tx_is_syncing(tx)); 732 ASSERT(dd->dd_phys->dd_head_dataset_obj == 0); 733 734 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 735 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 736 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 737 dmu_buf_will_dirty(dbuf, tx); 738 dsphys = dbuf->db_data; 739 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 740 dsphys->ds_dir_obj = dd->dd_object; 741 dsphys->ds_flags = flags; 742 dsphys->ds_fsid_guid = unique_create(); 743 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 744 sizeof (dsphys->ds_guid)); 745 dsphys->ds_snapnames_zapobj = 746 zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP, 747 DMU_OT_NONE, 0, tx); 748 dsphys->ds_creation_time = gethrestime_sec(); 749 dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg; 750 dsphys->ds_deadlist_obj = 751 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); 752 753 if (origin) { 754 dsphys->ds_prev_snap_obj = origin->ds_object; 755 dsphys->ds_prev_snap_txg = 756 origin->ds_phys->ds_creation_txg; 757 dsphys->ds_used_bytes = 758 origin->ds_phys->ds_used_bytes; 759 dsphys->ds_compressed_bytes = 760 origin->ds_phys->ds_compressed_bytes; 761 dsphys->ds_uncompressed_bytes = 762 origin->ds_phys->ds_uncompressed_bytes; 763 dsphys->ds_bp = origin->ds_phys->ds_bp; 764 dsphys->ds_flags |= origin->ds_phys->ds_flags; 765 766 dmu_buf_will_dirty(origin->ds_dbuf, tx); 767 origin->ds_phys->ds_num_children++; 768 769 if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) { 770 if (origin->ds_phys->ds_next_clones_obj == 0) { 771 origin->ds_phys->ds_next_clones_obj = 772 zap_create(mos, 773 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 774 } 775 VERIFY(0 == zap_add_int(mos, 776 origin->ds_phys->ds_next_clones_obj, 777 dsobj, tx)); 778 } 779 780 dmu_buf_will_dirty(dd->dd_dbuf, tx); 781 dd->dd_phys->dd_origin_obj = origin->ds_object; 782 } 783 784 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 785 dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 786 787 dmu_buf_rele(dbuf, FTAG); 788 789 dmu_buf_will_dirty(dd->dd_dbuf, tx); 790 dd->dd_phys->dd_head_dataset_obj = dsobj; 791 792 return (dsobj); 793 } 794 795 uint64_t 796 dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname, 797 dsl_dataset_t *origin, uint64_t flags, cred_t *cr, dmu_tx_t *tx) 798 { 799 dsl_pool_t *dp = pdd->dd_pool; 800 uint64_t dsobj, ddobj; 801 dsl_dir_t *dd; 802 803 ASSERT(lastname[0] != '@'); 804 805 ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx); 806 VERIFY(0 == dsl_dir_open_obj(dp, ddobj, lastname, FTAG, &dd)); 807 808 dsobj = dsl_dataset_create_sync_dd(dd, origin, flags, tx); 809 810 dsl_deleg_set_create_perms(dd, tx, cr); 811 812 dsl_dir_close(dd, FTAG); 813 814 return (dsobj); 815 } 816 817 struct destroyarg { 818 dsl_sync_task_group_t *dstg; 819 char *snapname; 820 char *failed; 821 }; 822 823 static int 824 dsl_snapshot_destroy_one(char *name, void *arg) 825 { 826 struct destroyarg *da = arg; 827 dsl_dataset_t *ds; 828 char *cp; 829 int err; 830 831 (void) strcat(name, "@"); 832 (void) strcat(name, da->snapname); 833 err = dsl_dataset_own(name, DS_MODE_READONLY | DS_MODE_INCONSISTENT, 834 da->dstg, &ds); 835 cp = strchr(name, '@'); 836 *cp = '\0'; 837 if (err == 0) { 838 dsl_dataset_make_exclusive(ds, da->dstg); 839 dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check, 840 dsl_dataset_destroy_sync, ds, da->dstg, 0); 841 } else if (err == ENOENT) { 842 err = 0; 843 } else { 844 (void) strcpy(da->failed, name); 845 } 846 return (err); 847 } 848 849 /* 850 * Destroy 'snapname' in all descendants of 'fsname'. 851 */ 852 #pragma weak dmu_snapshots_destroy = dsl_snapshots_destroy 853 int 854 dsl_snapshots_destroy(char *fsname, char *snapname) 855 { 856 int err; 857 struct destroyarg da; 858 dsl_sync_task_t *dst; 859 spa_t *spa; 860 861 err = spa_open(fsname, &spa, FTAG); 862 if (err) 863 return (err); 864 da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 865 da.snapname = snapname; 866 da.failed = fsname; 867 868 err = dmu_objset_find(fsname, 869 dsl_snapshot_destroy_one, &da, DS_FIND_CHILDREN); 870 871 if (err == 0) 872 err = dsl_sync_task_group_wait(da.dstg); 873 874 for (dst = list_head(&da.dstg->dstg_tasks); dst; 875 dst = list_next(&da.dstg->dstg_tasks, dst)) { 876 dsl_dataset_t *ds = dst->dst_arg1; 877 /* 878 * Return the file system name that triggered the error 879 */ 880 if (dst->dst_err) { 881 dsl_dataset_name(ds, fsname); 882 *strchr(fsname, '@') = '\0'; 883 } 884 dsl_dataset_disown(ds, da.dstg); 885 } 886 887 dsl_sync_task_group_destroy(da.dstg); 888 spa_close(spa, FTAG); 889 return (err); 890 } 891 892 /* 893 * ds must be opened as OWNER. On return (whether successful or not), 894 * ds will be closed and caller can no longer dereference it. 895 */ 896 int 897 dsl_dataset_destroy(dsl_dataset_t *ds, void *tag) 898 { 899 int err; 900 dsl_sync_task_group_t *dstg; 901 objset_t *os; 902 dsl_dir_t *dd; 903 uint64_t obj; 904 905 if (dsl_dataset_is_snapshot(ds)) { 906 /* Destroying a snapshot is simpler */ 907 dsl_dataset_make_exclusive(ds, tag); 908 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 909 dsl_dataset_destroy_check, dsl_dataset_destroy_sync, 910 ds, tag, 0); 911 goto out; 912 } 913 914 dd = ds->ds_dir; 915 916 /* 917 * Check for errors and mark this ds as inconsistent, in 918 * case we crash while freeing the objects. 919 */ 920 err = dsl_sync_task_do(dd->dd_pool, dsl_dataset_destroy_begin_check, 921 dsl_dataset_destroy_begin_sync, ds, NULL, 0); 922 if (err) 923 goto out; 924 925 err = dmu_objset_open_ds(ds, DMU_OST_ANY, &os); 926 if (err) 927 goto out; 928 929 /* 930 * remove the objects in open context, so that we won't 931 * have too much to do in syncing context. 932 */ 933 for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 934 ds->ds_phys->ds_prev_snap_txg)) { 935 /* 936 * Ignore errors, if there is not enough disk space 937 * we will deal with it in dsl_dataset_destroy_sync(). 938 */ 939 (void) dmu_free_object(os, obj); 940 } 941 942 dmu_objset_close(os); 943 if (err != ESRCH) 944 goto out; 945 946 rw_enter(&dd->dd_pool->dp_config_rwlock, RW_READER); 947 err = dsl_dir_open_obj(dd->dd_pool, dd->dd_object, NULL, FTAG, &dd); 948 rw_exit(&dd->dd_pool->dp_config_rwlock); 949 950 if (err) 951 goto out; 952 953 if (ds->ds_user_ptr) { 954 /* 955 * We need to sync out all in-flight IO before we try 956 * to evict (the dataset evict func is trying to clear 957 * the cached entries for this dataset in the ARC). 958 */ 959 txg_wait_synced(dd->dd_pool, 0); 960 } 961 962 /* 963 * Blow away the dsl_dir + head dataset. 964 */ 965 dsl_dataset_make_exclusive(ds, tag); 966 if (ds->ds_user_ptr) { 967 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 968 ds->ds_user_ptr = NULL; 969 } 970 dstg = dsl_sync_task_group_create(ds->ds_dir->dd_pool); 971 dsl_sync_task_create(dstg, dsl_dataset_destroy_check, 972 dsl_dataset_destroy_sync, ds, tag, 0); 973 dsl_sync_task_create(dstg, dsl_dir_destroy_check, 974 dsl_dir_destroy_sync, dd, FTAG, 0); 975 err = dsl_sync_task_group_wait(dstg); 976 dsl_sync_task_group_destroy(dstg); 977 /* if it is successful, dsl_dir_destroy_sync will close the dd */ 978 if (err) 979 dsl_dir_close(dd, FTAG); 980 out: 981 dsl_dataset_disown(ds, tag); 982 return (err); 983 } 984 985 int 986 dsl_dataset_rollback(dsl_dataset_t *ds, dmu_objset_type_t ost) 987 { 988 ASSERT(ds->ds_owner); 989 990 return (dsl_sync_task_do(ds->ds_dir->dd_pool, 991 dsl_dataset_rollback_check, dsl_dataset_rollback_sync, 992 ds, &ost, 0)); 993 } 994 995 void * 996 dsl_dataset_set_user_ptr(dsl_dataset_t *ds, 997 void *p, dsl_dataset_evict_func_t func) 998 { 999 void *old; 1000 1001 mutex_enter(&ds->ds_lock); 1002 old = ds->ds_user_ptr; 1003 if (old == NULL) { 1004 ds->ds_user_ptr = p; 1005 ds->ds_user_evict_func = func; 1006 } 1007 mutex_exit(&ds->ds_lock); 1008 return (old); 1009 } 1010 1011 void * 1012 dsl_dataset_get_user_ptr(dsl_dataset_t *ds) 1013 { 1014 return (ds->ds_user_ptr); 1015 } 1016 1017 1018 blkptr_t * 1019 dsl_dataset_get_blkptr(dsl_dataset_t *ds) 1020 { 1021 return (&ds->ds_phys->ds_bp); 1022 } 1023 1024 void 1025 dsl_dataset_set_blkptr(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx) 1026 { 1027 ASSERT(dmu_tx_is_syncing(tx)); 1028 /* If it's the meta-objset, set dp_meta_rootbp */ 1029 if (ds == NULL) { 1030 tx->tx_pool->dp_meta_rootbp = *bp; 1031 } else { 1032 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1033 ds->ds_phys->ds_bp = *bp; 1034 } 1035 } 1036 1037 spa_t * 1038 dsl_dataset_get_spa(dsl_dataset_t *ds) 1039 { 1040 return (ds->ds_dir->dd_pool->dp_spa); 1041 } 1042 1043 void 1044 dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx) 1045 { 1046 dsl_pool_t *dp; 1047 1048 if (ds == NULL) /* this is the meta-objset */ 1049 return; 1050 1051 ASSERT(ds->ds_user_ptr != NULL); 1052 1053 if (ds->ds_phys->ds_next_snap_obj != 0) 1054 panic("dirtying snapshot!"); 1055 1056 dp = ds->ds_dir->dd_pool; 1057 1058 if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg) == 0) { 1059 /* up the hold count until we can be written out */ 1060 dmu_buf_add_ref(ds->ds_dbuf, ds); 1061 } 1062 } 1063 1064 /* 1065 * The unique space in the head dataset can be calculated by subtracting 1066 * the space used in the most recent snapshot, that is still being used 1067 * in this file system, from the space currently in use. To figure out 1068 * the space in the most recent snapshot still in use, we need to take 1069 * the total space used in the snapshot and subtract out the space that 1070 * has been freed up since the snapshot was taken. 1071 */ 1072 static void 1073 dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds) 1074 { 1075 uint64_t mrs_used; 1076 uint64_t dlused, dlcomp, dluncomp; 1077 1078 ASSERT(ds->ds_object == ds->ds_dir->dd_phys->dd_head_dataset_obj); 1079 1080 if (ds->ds_phys->ds_prev_snap_obj != 0) 1081 mrs_used = ds->ds_prev->ds_phys->ds_used_bytes; 1082 else 1083 mrs_used = 0; 1084 1085 VERIFY(0 == bplist_space(&ds->ds_deadlist, &dlused, &dlcomp, 1086 &dluncomp)); 1087 1088 ASSERT3U(dlused, <=, mrs_used); 1089 ds->ds_phys->ds_unique_bytes = 1090 ds->ds_phys->ds_used_bytes - (mrs_used - dlused); 1091 1092 if (!DS_UNIQUE_IS_ACCURATE(ds) && 1093 spa_version(ds->ds_dir->dd_pool->dp_spa) >= 1094 SPA_VERSION_UNIQUE_ACCURATE) 1095 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 1096 } 1097 1098 static uint64_t 1099 dsl_dataset_unique(dsl_dataset_t *ds) 1100 { 1101 if (!DS_UNIQUE_IS_ACCURATE(ds) && !dsl_dataset_is_snapshot(ds)) 1102 dsl_dataset_recalc_head_uniq(ds); 1103 1104 return (ds->ds_phys->ds_unique_bytes); 1105 } 1106 1107 struct killarg { 1108 int64_t *usedp; 1109 int64_t *compressedp; 1110 int64_t *uncompressedp; 1111 zio_t *zio; 1112 dmu_tx_t *tx; 1113 }; 1114 1115 static int 1116 kill_blkptr(traverse_blk_cache_t *bc, spa_t *spa, void *arg) 1117 { 1118 struct killarg *ka = arg; 1119 blkptr_t *bp = &bc->bc_blkptr; 1120 1121 ASSERT3U(bc->bc_errno, ==, 0); 1122 1123 /* 1124 * Since this callback is not called concurrently, no lock is 1125 * needed on the accounting values. 1126 */ 1127 *ka->usedp += bp_get_dasize(spa, bp); 1128 *ka->compressedp += BP_GET_PSIZE(bp); 1129 *ka->uncompressedp += BP_GET_UCSIZE(bp); 1130 /* XXX check for EIO? */ 1131 (void) dsl_free(ka->zio, spa_get_dsl(spa), ka->tx->tx_txg, 1132 bp, NULL, NULL, ARC_NOWAIT); 1133 return (0); 1134 } 1135 1136 /* ARGSUSED */ 1137 static int 1138 dsl_dataset_rollback_check(void *arg1, void *arg2, dmu_tx_t *tx) 1139 { 1140 dsl_dataset_t *ds = arg1; 1141 dmu_objset_type_t *ost = arg2; 1142 1143 /* 1144 * We can only roll back to emptyness if it is a ZPL objset. 1145 */ 1146 if (*ost != DMU_OST_ZFS && ds->ds_phys->ds_prev_snap_txg == 0) 1147 return (EINVAL); 1148 1149 /* 1150 * This must not be a snapshot. 1151 */ 1152 if (ds->ds_phys->ds_next_snap_obj != 0) 1153 return (EINVAL); 1154 1155 /* 1156 * If we made changes this txg, traverse_dsl_dataset won't find 1157 * them. Try again. 1158 */ 1159 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) 1160 return (EAGAIN); 1161 1162 return (0); 1163 } 1164 1165 /* ARGSUSED */ 1166 static void 1167 dsl_dataset_rollback_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1168 { 1169 dsl_dataset_t *ds = arg1; 1170 dmu_objset_type_t *ost = arg2; 1171 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1172 1173 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1174 1175 /* 1176 * Before the roll back destroy the zil. 1177 */ 1178 if (ds->ds_user_ptr != NULL) { 1179 zil_rollback_destroy( 1180 ((objset_impl_t *)ds->ds_user_ptr)->os_zil, tx); 1181 1182 /* 1183 * We need to make sure that the objset_impl_t is reopened after 1184 * we do the rollback, otherwise it will have the wrong 1185 * objset_phys_t. Normally this would happen when this 1186 * dataset-open is closed, thus causing the 1187 * dataset to be immediately evicted. But when doing "zfs recv 1188 * -F", we reopen the objset before that, so that there is no 1189 * window where the dataset is closed and inconsistent. 1190 */ 1191 ds->ds_user_evict_func(ds, ds->ds_user_ptr); 1192 ds->ds_user_ptr = NULL; 1193 } 1194 1195 /* Zero out the deadlist. */ 1196 bplist_close(&ds->ds_deadlist); 1197 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx); 1198 ds->ds_phys->ds_deadlist_obj = 1199 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); 1200 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos, 1201 ds->ds_phys->ds_deadlist_obj)); 1202 1203 { 1204 /* Free blkptrs that we gave birth to */ 1205 zio_t *zio; 1206 int64_t used = 0, compressed = 0, uncompressed = 0; 1207 struct killarg ka; 1208 int64_t delta; 1209 1210 zio = zio_root(tx->tx_pool->dp_spa, NULL, NULL, 1211 ZIO_FLAG_MUSTSUCCEED); 1212 ka.usedp = &used; 1213 ka.compressedp = &compressed; 1214 ka.uncompressedp = &uncompressed; 1215 ka.zio = zio; 1216 ka.tx = tx; 1217 (void) traverse_dsl_dataset(ds, ds->ds_phys->ds_prev_snap_txg, 1218 ADVANCE_POST, kill_blkptr, &ka); 1219 (void) zio_wait(zio); 1220 1221 /* only deduct space beyond any refreservation */ 1222 delta = parent_delta(ds, -used); 1223 dsl_dir_diduse_space(ds->ds_dir, 1224 delta, -compressed, -uncompressed, tx); 1225 } 1226 1227 if (ds->ds_prev && ds->ds_prev != ds->ds_dir->dd_pool->dp_origin_snap) { 1228 /* Change our contents to that of the prev snapshot */ 1229 ASSERT3U(ds->ds_prev->ds_object, ==, 1230 ds->ds_phys->ds_prev_snap_obj); 1231 ds->ds_phys->ds_bp = ds->ds_prev->ds_phys->ds_bp; 1232 ds->ds_phys->ds_used_bytes = 1233 ds->ds_prev->ds_phys->ds_used_bytes; 1234 ds->ds_phys->ds_compressed_bytes = 1235 ds->ds_prev->ds_phys->ds_compressed_bytes; 1236 ds->ds_phys->ds_uncompressed_bytes = 1237 ds->ds_prev->ds_phys->ds_uncompressed_bytes; 1238 ds->ds_phys->ds_flags = ds->ds_prev->ds_phys->ds_flags; 1239 ds->ds_phys->ds_unique_bytes = 0; 1240 1241 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { 1242 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1243 ds->ds_prev->ds_phys->ds_unique_bytes = 0; 1244 } 1245 } else { 1246 objset_impl_t *osi; 1247 1248 /* Zero out our contents, recreate objset */ 1249 bzero(&ds->ds_phys->ds_bp, sizeof (blkptr_t)); 1250 ds->ds_phys->ds_used_bytes = 0; 1251 ds->ds_phys->ds_compressed_bytes = 0; 1252 ds->ds_phys->ds_uncompressed_bytes = 0; 1253 ds->ds_phys->ds_flags = 0; 1254 ds->ds_phys->ds_unique_bytes = 0; 1255 osi = dmu_objset_create_impl(ds->ds_dir->dd_pool->dp_spa, ds, 1256 &ds->ds_phys->ds_bp, *ost, tx); 1257 #ifdef _KERNEL 1258 zfs_create_fs(&osi->os, kcred, NULL, tx); 1259 #endif 1260 } 1261 1262 spa_history_internal_log(LOG_DS_ROLLBACK, ds->ds_dir->dd_pool->dp_spa, 1263 tx, cr, "dataset = %llu", ds->ds_object); 1264 } 1265 1266 /* ARGSUSED */ 1267 static int 1268 dsl_dataset_destroy_begin_check(void *arg1, void *arg2, dmu_tx_t *tx) 1269 { 1270 dsl_dataset_t *ds = arg1; 1271 objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset; 1272 uint64_t count; 1273 int err; 1274 1275 /* 1276 * Can't delete a head dataset if there are snapshots of it. 1277 * (Except if the only snapshots are from the branch we cloned 1278 * from.) 1279 */ 1280 if (ds->ds_prev != NULL && 1281 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1282 return (EINVAL); 1283 1284 /* 1285 * This is really a dsl_dir thing, but check it here so that 1286 * we'll be less likely to leave this dataset inconsistent & 1287 * nearly destroyed. 1288 */ 1289 err = zap_count(mos, ds->ds_dir->dd_phys->dd_child_dir_zapobj, &count); 1290 if (err) 1291 return (err); 1292 if (count != 0) 1293 return (EEXIST); 1294 1295 return (0); 1296 } 1297 1298 /* ARGSUSED */ 1299 static void 1300 dsl_dataset_destroy_begin_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1301 { 1302 dsl_dataset_t *ds = arg1; 1303 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1304 1305 /* Mark it as inconsistent on-disk, in case we crash */ 1306 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1307 ds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 1308 1309 spa_history_internal_log(LOG_DS_DESTROY_BEGIN, dp->dp_spa, tx, 1310 cr, "dataset = %llu", ds->ds_object); 1311 } 1312 1313 /* ARGSUSED */ 1314 int 1315 dsl_dataset_destroy_check(void *arg1, void *arg2, dmu_tx_t *tx) 1316 { 1317 dsl_dataset_t *ds = arg1; 1318 1319 /* we have an owner hold, so noone else can destroy us */ 1320 ASSERT(!DSL_DATASET_IS_DESTROYED(ds)); 1321 1322 /* Can't delete a branch point. */ 1323 if (ds->ds_phys->ds_num_children > 1) 1324 return (EEXIST); 1325 1326 /* 1327 * Can't delete a head dataset if there are snapshots of it. 1328 * (Except if the only snapshots are from the branch we cloned 1329 * from.) 1330 */ 1331 if (ds->ds_prev != NULL && 1332 ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) 1333 return (EINVAL); 1334 1335 /* 1336 * If we made changes this txg, traverse_dsl_dataset won't find 1337 * them. Try again. 1338 */ 1339 if (ds->ds_phys->ds_bp.blk_birth >= tx->tx_txg) 1340 return (EAGAIN); 1341 1342 /* XXX we should do some i/o error checking... */ 1343 return (0); 1344 } 1345 1346 struct refsarg { 1347 kmutex_t lock; 1348 boolean_t gone; 1349 kcondvar_t cv; 1350 }; 1351 1352 /* ARGSUSED */ 1353 static void 1354 dsl_dataset_refs_gone(dmu_buf_t *db, void *argv) 1355 { 1356 struct refsarg *arg = argv; 1357 1358 mutex_enter(&arg->lock); 1359 arg->gone = TRUE; 1360 cv_signal(&arg->cv); 1361 mutex_exit(&arg->lock); 1362 } 1363 1364 static void 1365 dsl_dataset_drain_refs(dsl_dataset_t *ds, void *tag) 1366 { 1367 struct refsarg arg; 1368 1369 mutex_init(&arg.lock, NULL, MUTEX_DEFAULT, NULL); 1370 cv_init(&arg.cv, NULL, CV_DEFAULT, NULL); 1371 arg.gone = FALSE; 1372 (void) dmu_buf_update_user(ds->ds_dbuf, ds, &arg, &ds->ds_phys, 1373 dsl_dataset_refs_gone); 1374 dmu_buf_rele(ds->ds_dbuf, tag); 1375 mutex_enter(&arg.lock); 1376 while (!arg.gone) 1377 cv_wait(&arg.cv, &arg.lock); 1378 ASSERT(arg.gone); 1379 mutex_exit(&arg.lock); 1380 ds->ds_dbuf = NULL; 1381 ds->ds_phys = NULL; 1382 mutex_destroy(&arg.lock); 1383 cv_destroy(&arg.cv); 1384 } 1385 1386 void 1387 dsl_dataset_destroy_sync(void *arg1, void *tag, cred_t *cr, dmu_tx_t *tx) 1388 { 1389 dsl_dataset_t *ds = arg1; 1390 int64_t used = 0, compressed = 0, uncompressed = 0; 1391 zio_t *zio; 1392 int err; 1393 int after_branch_point = FALSE; 1394 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1395 objset_t *mos = dp->dp_meta_objset; 1396 dsl_dataset_t *ds_prev = NULL; 1397 uint64_t obj; 1398 1399 ASSERT(ds->ds_owner); 1400 ASSERT3U(ds->ds_phys->ds_num_children, <=, 1); 1401 ASSERT(ds->ds_prev == NULL || 1402 ds->ds_prev->ds_phys->ds_next_snap_obj != ds->ds_object); 1403 ASSERT3U(ds->ds_phys->ds_bp.blk_birth, <=, tx->tx_txg); 1404 1405 /* signal any waiters that this dataset is going away */ 1406 mutex_enter(&ds->ds_lock); 1407 ds->ds_owner = dsl_reaper; 1408 cv_broadcast(&ds->ds_exclusive_cv); 1409 mutex_exit(&ds->ds_lock); 1410 1411 /* Remove our reservation */ 1412 if (ds->ds_reserved != 0) { 1413 uint64_t val = 0; 1414 dsl_dataset_set_reservation_sync(ds, &val, cr, tx); 1415 ASSERT3U(ds->ds_reserved, ==, 0); 1416 } 1417 1418 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 1419 1420 dsl_pool_ds_destroyed(ds, tx); 1421 1422 obj = ds->ds_object; 1423 1424 if (ds->ds_phys->ds_prev_snap_obj != 0) { 1425 if (ds->ds_prev) { 1426 ds_prev = ds->ds_prev; 1427 } else { 1428 VERIFY(0 == dsl_dataset_hold_obj(dp, 1429 ds->ds_phys->ds_prev_snap_obj, FTAG, &ds_prev)); 1430 } 1431 after_branch_point = 1432 (ds_prev->ds_phys->ds_next_snap_obj != obj); 1433 1434 dmu_buf_will_dirty(ds_prev->ds_dbuf, tx); 1435 if (after_branch_point && 1436 ds_prev->ds_phys->ds_next_clones_obj != 0) { 1437 VERIFY(0 == zap_remove_int(mos, 1438 ds_prev->ds_phys->ds_next_clones_obj, obj, tx)); 1439 if (ds->ds_phys->ds_next_snap_obj != 0) { 1440 VERIFY(0 == zap_add_int(mos, 1441 ds_prev->ds_phys->ds_next_clones_obj, 1442 ds->ds_phys->ds_next_snap_obj, tx)); 1443 } 1444 } 1445 if (after_branch_point && 1446 ds->ds_phys->ds_next_snap_obj == 0) { 1447 /* This clone is toast. */ 1448 ASSERT(ds_prev->ds_phys->ds_num_children > 1); 1449 ds_prev->ds_phys->ds_num_children--; 1450 } else if (!after_branch_point) { 1451 ds_prev->ds_phys->ds_next_snap_obj = 1452 ds->ds_phys->ds_next_snap_obj; 1453 } 1454 } 1455 1456 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 1457 1458 if (ds->ds_phys->ds_next_snap_obj != 0) { 1459 blkptr_t bp; 1460 dsl_dataset_t *ds_next; 1461 uint64_t itor = 0; 1462 uint64_t old_unique; 1463 1464 VERIFY(0 == dsl_dataset_hold_obj(dp, 1465 ds->ds_phys->ds_next_snap_obj, FTAG, &ds_next)); 1466 ASSERT3U(ds_next->ds_phys->ds_prev_snap_obj, ==, obj); 1467 1468 old_unique = dsl_dataset_unique(ds_next); 1469 1470 dmu_buf_will_dirty(ds_next->ds_dbuf, tx); 1471 ds_next->ds_phys->ds_prev_snap_obj = 1472 ds->ds_phys->ds_prev_snap_obj; 1473 ds_next->ds_phys->ds_prev_snap_txg = 1474 ds->ds_phys->ds_prev_snap_txg; 1475 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 1476 ds_prev ? ds_prev->ds_phys->ds_creation_txg : 0); 1477 1478 /* 1479 * Transfer to our deadlist (which will become next's 1480 * new deadlist) any entries from next's current 1481 * deadlist which were born before prev, and free the 1482 * other entries. 1483 * 1484 * XXX we're doing this long task with the config lock held 1485 */ 1486 while (bplist_iterate(&ds_next->ds_deadlist, &itor, &bp) == 0) { 1487 if (bp.blk_birth <= ds->ds_phys->ds_prev_snap_txg) { 1488 VERIFY(0 == bplist_enqueue(&ds->ds_deadlist, 1489 &bp, tx)); 1490 if (ds_prev && !after_branch_point && 1491 bp.blk_birth > 1492 ds_prev->ds_phys->ds_prev_snap_txg) { 1493 ds_prev->ds_phys->ds_unique_bytes += 1494 bp_get_dasize(dp->dp_spa, &bp); 1495 } 1496 } else { 1497 used += bp_get_dasize(dp->dp_spa, &bp); 1498 compressed += BP_GET_PSIZE(&bp); 1499 uncompressed += BP_GET_UCSIZE(&bp); 1500 /* XXX check return value? */ 1501 (void) dsl_free(zio, dp, tx->tx_txg, 1502 &bp, NULL, NULL, ARC_NOWAIT); 1503 } 1504 } 1505 1506 /* free next's deadlist */ 1507 bplist_close(&ds_next->ds_deadlist); 1508 bplist_destroy(mos, ds_next->ds_phys->ds_deadlist_obj, tx); 1509 1510 /* set next's deadlist to our deadlist */ 1511 bplist_close(&ds->ds_deadlist); 1512 ds_next->ds_phys->ds_deadlist_obj = 1513 ds->ds_phys->ds_deadlist_obj; 1514 VERIFY(0 == bplist_open(&ds_next->ds_deadlist, mos, 1515 ds_next->ds_phys->ds_deadlist_obj)); 1516 ds->ds_phys->ds_deadlist_obj = 0; 1517 1518 if (ds_next->ds_phys->ds_next_snap_obj != 0) { 1519 /* 1520 * Update next's unique to include blocks which 1521 * were previously shared by only this snapshot 1522 * and it. Those blocks will be born after the 1523 * prev snap and before this snap, and will have 1524 * died after the next snap and before the one 1525 * after that (ie. be on the snap after next's 1526 * deadlist). 1527 * 1528 * XXX we're doing this long task with the 1529 * config lock held 1530 */ 1531 dsl_dataset_t *ds_after_next; 1532 1533 VERIFY(0 == dsl_dataset_hold_obj(dp, 1534 ds_next->ds_phys->ds_next_snap_obj, 1535 FTAG, &ds_after_next)); 1536 itor = 0; 1537 while (bplist_iterate(&ds_after_next->ds_deadlist, 1538 &itor, &bp) == 0) { 1539 if (bp.blk_birth > 1540 ds->ds_phys->ds_prev_snap_txg && 1541 bp.blk_birth <= 1542 ds->ds_phys->ds_creation_txg) { 1543 ds_next->ds_phys->ds_unique_bytes += 1544 bp_get_dasize(dp->dp_spa, &bp); 1545 } 1546 } 1547 1548 dsl_dataset_rele(ds_after_next, FTAG); 1549 ASSERT3P(ds_next->ds_prev, ==, NULL); 1550 } else { 1551 ASSERT3P(ds_next->ds_prev, ==, ds); 1552 dsl_dataset_drop_ref(ds_next->ds_prev, ds_next); 1553 ds_next->ds_prev = NULL; 1554 if (ds_prev) { 1555 VERIFY(0 == dsl_dataset_get_ref(dp, 1556 ds->ds_phys->ds_prev_snap_obj, 1557 ds_next, &ds_next->ds_prev)); 1558 } 1559 1560 dsl_dataset_recalc_head_uniq(ds_next); 1561 1562 /* 1563 * Reduce the amount of our unconsmed refreservation 1564 * being charged to our parent by the amount of 1565 * new unique data we have gained. 1566 */ 1567 if (old_unique < ds_next->ds_reserved) { 1568 int64_t mrsdelta; 1569 uint64_t new_unique = 1570 ds_next->ds_phys->ds_unique_bytes; 1571 1572 ASSERT(old_unique <= new_unique); 1573 mrsdelta = MIN(new_unique - old_unique, 1574 ds_next->ds_reserved - old_unique); 1575 dsl_dir_diduse_space(ds->ds_dir, -mrsdelta, 1576 0, 0, tx); 1577 } 1578 } 1579 dsl_dataset_rele(ds_next, FTAG); 1580 1581 /* 1582 * NB: unique_bytes might not be accurate for the head objset. 1583 * Before SPA_VERSION 9, we didn't update its value when we 1584 * deleted the most recent snapshot. 1585 */ 1586 ASSERT3U(used, ==, ds->ds_phys->ds_unique_bytes); 1587 } else { 1588 /* 1589 * There's no next snapshot, so this is a head dataset. 1590 * Destroy the deadlist. Unless it's a clone, the 1591 * deadlist should be empty. (If it's a clone, it's 1592 * safe to ignore the deadlist contents.) 1593 */ 1594 struct killarg ka; 1595 1596 ASSERT(after_branch_point || bplist_empty(&ds->ds_deadlist)); 1597 bplist_close(&ds->ds_deadlist); 1598 bplist_destroy(mos, ds->ds_phys->ds_deadlist_obj, tx); 1599 ds->ds_phys->ds_deadlist_obj = 0; 1600 1601 /* 1602 * Free everything that we point to (that's born after 1603 * the previous snapshot, if we are a clone) 1604 * 1605 * XXX we're doing this long task with the config lock held 1606 */ 1607 ka.usedp = &used; 1608 ka.compressedp = &compressed; 1609 ka.uncompressedp = &uncompressed; 1610 ka.zio = zio; 1611 ka.tx = tx; 1612 err = traverse_dsl_dataset(ds, ds->ds_phys->ds_prev_snap_txg, 1613 ADVANCE_POST, kill_blkptr, &ka); 1614 ASSERT3U(err, ==, 0); 1615 ASSERT(spa_version(dp->dp_spa) < 1616 SPA_VERSION_UNIQUE_ACCURATE || 1617 used == ds->ds_phys->ds_unique_bytes); 1618 } 1619 1620 err = zio_wait(zio); 1621 ASSERT3U(err, ==, 0); 1622 1623 dsl_dir_diduse_space(ds->ds_dir, -used, -compressed, -uncompressed, tx); 1624 1625 if (ds->ds_dir->dd_phys->dd_head_dataset_obj == ds->ds_object) { 1626 /* Erase the link in the dir */ 1627 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 1628 ds->ds_dir->dd_phys->dd_head_dataset_obj = 0; 1629 ASSERT(ds->ds_phys->ds_snapnames_zapobj != 0); 1630 err = zap_destroy(mos, ds->ds_phys->ds_snapnames_zapobj, tx); 1631 ASSERT(err == 0); 1632 } else { 1633 /* remove from snapshot namespace */ 1634 dsl_dataset_t *ds_head; 1635 ASSERT(ds->ds_phys->ds_snapnames_zapobj == 0); 1636 VERIFY(0 == dsl_dataset_hold_obj(dp, 1637 ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &ds_head)); 1638 VERIFY(0 == dsl_dataset_get_snapname(ds)); 1639 #ifdef ZFS_DEBUG 1640 { 1641 uint64_t val; 1642 1643 err = dsl_dataset_snap_lookup(ds_head, 1644 ds->ds_snapname, &val); 1645 ASSERT3U(err, ==, 0); 1646 ASSERT3U(val, ==, obj); 1647 } 1648 #endif 1649 err = dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx); 1650 ASSERT(err == 0); 1651 dsl_dataset_rele(ds_head, FTAG); 1652 } 1653 1654 if (ds_prev && ds->ds_prev != ds_prev) 1655 dsl_dataset_rele(ds_prev, FTAG); 1656 1657 spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx); 1658 spa_history_internal_log(LOG_DS_DESTROY, dp->dp_spa, tx, 1659 cr, "dataset = %llu", ds->ds_object); 1660 1661 if (ds->ds_phys->ds_next_clones_obj != 0) { 1662 uint64_t count; 1663 ASSERT(0 == zap_count(mos, 1664 ds->ds_phys->ds_next_clones_obj, &count) && count == 0); 1665 VERIFY(0 == dmu_object_free(mos, 1666 ds->ds_phys->ds_next_clones_obj, tx)); 1667 } 1668 dsl_dir_close(ds->ds_dir, ds); 1669 ds->ds_dir = NULL; 1670 dsl_dataset_drain_refs(ds, tag); 1671 VERIFY(0 == dmu_object_free(mos, obj, tx)); 1672 } 1673 1674 static int 1675 dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx) 1676 { 1677 uint64_t asize; 1678 1679 if (!dmu_tx_is_syncing(tx)) 1680 return (0); 1681 1682 /* 1683 * If there's an fs-only reservation, any blocks that might become 1684 * owned by the snapshot dataset must be accommodated by space 1685 * outside of the reservation. 1686 */ 1687 asize = MIN(dsl_dataset_unique(ds), ds->ds_reserved); 1688 if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, FALSE)) 1689 return (ENOSPC); 1690 1691 /* 1692 * Propogate any reserved space for this snapshot to other 1693 * snapshot checks in this sync group. 1694 */ 1695 if (asize > 0) 1696 dsl_dir_willuse_space(ds->ds_dir, asize, tx); 1697 1698 return (0); 1699 } 1700 1701 /* ARGSUSED */ 1702 int 1703 dsl_dataset_snapshot_check(void *arg1, void *arg2, dmu_tx_t *tx) 1704 { 1705 dsl_dataset_t *ds = arg1; 1706 const char *snapname = arg2; 1707 int err; 1708 uint64_t value; 1709 1710 /* 1711 * We don't allow multiple snapshots of the same txg. If there 1712 * is already one, try again. 1713 */ 1714 if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg) 1715 return (EAGAIN); 1716 1717 /* 1718 * Check for conflicting name snapshot name. 1719 */ 1720 err = dsl_dataset_snap_lookup(ds, snapname, &value); 1721 if (err == 0) 1722 return (EEXIST); 1723 if (err != ENOENT) 1724 return (err); 1725 1726 /* 1727 * Check that the dataset's name is not too long. Name consists 1728 * of the dataset's length + 1 for the @-sign + snapshot name's length 1729 */ 1730 if (dsl_dataset_namelen(ds) + 1 + strlen(snapname) >= MAXNAMELEN) 1731 return (ENAMETOOLONG); 1732 1733 err = dsl_dataset_snapshot_reserve_space(ds, tx); 1734 if (err) 1735 return (err); 1736 1737 ds->ds_trysnap_txg = tx->tx_txg; 1738 return (0); 1739 } 1740 1741 void 1742 dsl_dataset_snapshot_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 1743 { 1744 dsl_dataset_t *ds = arg1; 1745 const char *snapname = arg2; 1746 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1747 dmu_buf_t *dbuf; 1748 dsl_dataset_phys_t *dsphys; 1749 uint64_t dsobj, crtxg; 1750 objset_t *mos = dp->dp_meta_objset; 1751 int err; 1752 1753 ASSERT(RW_WRITE_HELD(&dp->dp_config_rwlock)); 1754 1755 /* 1756 * The origin's ds_creation_txg has to be < TXG_INITIAL 1757 */ 1758 if (strcmp(snapname, ORIGIN_DIR_NAME) == 0) 1759 crtxg = 1; 1760 else 1761 crtxg = tx->tx_txg; 1762 1763 dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0, 1764 DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx); 1765 VERIFY(0 == dmu_bonus_hold(mos, dsobj, FTAG, &dbuf)); 1766 dmu_buf_will_dirty(dbuf, tx); 1767 dsphys = dbuf->db_data; 1768 bzero(dsphys, sizeof (dsl_dataset_phys_t)); 1769 dsphys->ds_dir_obj = ds->ds_dir->dd_object; 1770 dsphys->ds_fsid_guid = unique_create(); 1771 (void) random_get_pseudo_bytes((void*)&dsphys->ds_guid, 1772 sizeof (dsphys->ds_guid)); 1773 dsphys->ds_prev_snap_obj = ds->ds_phys->ds_prev_snap_obj; 1774 dsphys->ds_prev_snap_txg = ds->ds_phys->ds_prev_snap_txg; 1775 dsphys->ds_next_snap_obj = ds->ds_object; 1776 dsphys->ds_num_children = 1; 1777 dsphys->ds_creation_time = gethrestime_sec(); 1778 dsphys->ds_creation_txg = crtxg; 1779 dsphys->ds_deadlist_obj = ds->ds_phys->ds_deadlist_obj; 1780 dsphys->ds_used_bytes = ds->ds_phys->ds_used_bytes; 1781 dsphys->ds_compressed_bytes = ds->ds_phys->ds_compressed_bytes; 1782 dsphys->ds_uncompressed_bytes = ds->ds_phys->ds_uncompressed_bytes; 1783 dsphys->ds_flags = ds->ds_phys->ds_flags; 1784 dsphys->ds_bp = ds->ds_phys->ds_bp; 1785 dmu_buf_rele(dbuf, FTAG); 1786 1787 ASSERT3U(ds->ds_prev != 0, ==, ds->ds_phys->ds_prev_snap_obj != 0); 1788 if (ds->ds_prev) { 1789 uint64_t next_clones_obj = 1790 ds->ds_prev->ds_phys->ds_next_clones_obj; 1791 ASSERT(ds->ds_prev->ds_phys->ds_next_snap_obj == 1792 ds->ds_object || 1793 ds->ds_prev->ds_phys->ds_num_children > 1); 1794 if (ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object) { 1795 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1796 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, ==, 1797 ds->ds_prev->ds_phys->ds_creation_txg); 1798 ds->ds_prev->ds_phys->ds_next_snap_obj = dsobj; 1799 } else if (next_clones_obj != 0) { 1800 VERIFY3U(0, ==, zap_remove_int(mos, 1801 next_clones_obj, dsphys->ds_next_snap_obj, tx)); 1802 VERIFY3U(0, ==, zap_add_int(mos, 1803 next_clones_obj, dsobj, tx)); 1804 } 1805 } 1806 1807 /* 1808 * If we have a reference-reservation on this dataset, we will 1809 * need to increase the amount of refreservation being charged 1810 * since our unique space is going to zero. 1811 */ 1812 if (ds->ds_reserved) { 1813 int64_t add = MIN(dsl_dataset_unique(ds), ds->ds_reserved); 1814 dsl_dir_diduse_space(ds->ds_dir, add, 0, 0, tx); 1815 } 1816 1817 bplist_close(&ds->ds_deadlist); 1818 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1819 ASSERT3U(ds->ds_phys->ds_prev_snap_txg, <, tx->tx_txg); 1820 ds->ds_phys->ds_prev_snap_obj = dsobj; 1821 ds->ds_phys->ds_prev_snap_txg = crtxg; 1822 ds->ds_phys->ds_unique_bytes = 0; 1823 if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE) 1824 ds->ds_phys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE; 1825 ds->ds_phys->ds_deadlist_obj = 1826 bplist_create(mos, DSL_DEADLIST_BLOCKSIZE, tx); 1827 VERIFY(0 == bplist_open(&ds->ds_deadlist, mos, 1828 ds->ds_phys->ds_deadlist_obj)); 1829 1830 dprintf("snap '%s' -> obj %llu\n", snapname, dsobj); 1831 err = zap_add(mos, ds->ds_phys->ds_snapnames_zapobj, 1832 snapname, 8, 1, &dsobj, tx); 1833 ASSERT(err == 0); 1834 1835 if (ds->ds_prev) 1836 dsl_dataset_drop_ref(ds->ds_prev, ds); 1837 VERIFY(0 == dsl_dataset_get_ref(dp, 1838 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 1839 1840 dsl_pool_ds_snapshotted(ds, tx); 1841 1842 spa_history_internal_log(LOG_DS_SNAPSHOT, dp->dp_spa, tx, cr, 1843 "dataset = %llu", dsobj); 1844 } 1845 1846 void 1847 dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx) 1848 { 1849 ASSERT(dmu_tx_is_syncing(tx)); 1850 ASSERT(ds->ds_user_ptr != NULL); 1851 ASSERT(ds->ds_phys->ds_next_snap_obj == 0); 1852 1853 /* 1854 * in case we had to change ds_fsid_guid when we opened it, 1855 * sync it out now. 1856 */ 1857 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1858 ds->ds_phys->ds_fsid_guid = ds->ds_fsid_guid; 1859 1860 dsl_dir_dirty(ds->ds_dir, tx); 1861 dmu_objset_sync(ds->ds_user_ptr, zio, tx); 1862 } 1863 1864 void 1865 dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv) 1866 { 1867 uint64_t refd, avail, uobjs, aobjs; 1868 1869 dsl_dir_stats(ds->ds_dir, nv); 1870 1871 dsl_dataset_space(ds, &refd, &avail, &uobjs, &aobjs); 1872 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE, avail); 1873 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED, refd); 1874 1875 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION, 1876 ds->ds_phys->ds_creation_time); 1877 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG, 1878 ds->ds_phys->ds_creation_txg); 1879 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA, 1880 ds->ds_quota); 1881 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION, 1882 ds->ds_reserved); 1883 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID, 1884 ds->ds_phys->ds_guid); 1885 1886 if (ds->ds_phys->ds_next_snap_obj) { 1887 /* 1888 * This is a snapshot; override the dd's space used with 1889 * our unique space and compression ratio. 1890 */ 1891 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED, 1892 ds->ds_phys->ds_unique_bytes); 1893 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO, 1894 ds->ds_phys->ds_compressed_bytes == 0 ? 100 : 1895 (ds->ds_phys->ds_uncompressed_bytes * 100 / 1896 ds->ds_phys->ds_compressed_bytes)); 1897 } 1898 } 1899 1900 void 1901 dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat) 1902 { 1903 stat->dds_creation_txg = ds->ds_phys->ds_creation_txg; 1904 stat->dds_inconsistent = ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT; 1905 stat->dds_guid = ds->ds_phys->ds_guid; 1906 if (ds->ds_phys->ds_next_snap_obj) { 1907 stat->dds_is_snapshot = B_TRUE; 1908 stat->dds_num_clones = ds->ds_phys->ds_num_children - 1; 1909 } 1910 1911 /* clone origin is really a dsl_dir thing... */ 1912 rw_enter(&ds->ds_dir->dd_pool->dp_config_rwlock, RW_READER); 1913 if (dsl_dir_is_clone(ds->ds_dir)) { 1914 dsl_dataset_t *ods; 1915 1916 VERIFY(0 == dsl_dataset_get_ref(ds->ds_dir->dd_pool, 1917 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &ods)); 1918 dsl_dataset_name(ods, stat->dds_origin); 1919 dsl_dataset_drop_ref(ods, FTAG); 1920 } 1921 rw_exit(&ds->ds_dir->dd_pool->dp_config_rwlock); 1922 } 1923 1924 uint64_t 1925 dsl_dataset_fsid_guid(dsl_dataset_t *ds) 1926 { 1927 return (ds->ds_fsid_guid); 1928 } 1929 1930 void 1931 dsl_dataset_space(dsl_dataset_t *ds, 1932 uint64_t *refdbytesp, uint64_t *availbytesp, 1933 uint64_t *usedobjsp, uint64_t *availobjsp) 1934 { 1935 *refdbytesp = ds->ds_phys->ds_used_bytes; 1936 *availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE); 1937 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) 1938 *availbytesp += ds->ds_reserved - ds->ds_phys->ds_unique_bytes; 1939 if (ds->ds_quota != 0) { 1940 /* 1941 * Adjust available bytes according to refquota 1942 */ 1943 if (*refdbytesp < ds->ds_quota) 1944 *availbytesp = MIN(*availbytesp, 1945 ds->ds_quota - *refdbytesp); 1946 else 1947 *availbytesp = 0; 1948 } 1949 *usedobjsp = ds->ds_phys->ds_bp.blk_fill; 1950 *availobjsp = DN_MAX_OBJECT - *usedobjsp; 1951 } 1952 1953 boolean_t 1954 dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds) 1955 { 1956 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1957 1958 ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) || 1959 dsl_pool_sync_context(dp)); 1960 if (ds->ds_prev == NULL) 1961 return (B_FALSE); 1962 if (ds->ds_phys->ds_bp.blk_birth > 1963 ds->ds_prev->ds_phys->ds_creation_txg) 1964 return (B_TRUE); 1965 return (B_FALSE); 1966 } 1967 1968 /* ARGSUSED */ 1969 static int 1970 dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx) 1971 { 1972 dsl_dataset_t *ds = arg1; 1973 char *newsnapname = arg2; 1974 dsl_dir_t *dd = ds->ds_dir; 1975 dsl_dataset_t *hds; 1976 uint64_t val; 1977 int err; 1978 1979 err = dsl_dataset_hold_obj(dd->dd_pool, 1980 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds); 1981 if (err) 1982 return (err); 1983 1984 /* new name better not be in use */ 1985 err = dsl_dataset_snap_lookup(hds, newsnapname, &val); 1986 dsl_dataset_rele(hds, FTAG); 1987 1988 if (err == 0) 1989 err = EEXIST; 1990 else if (err == ENOENT) 1991 err = 0; 1992 1993 /* dataset name + 1 for the "@" + the new snapshot name must fit */ 1994 if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN) 1995 err = ENAMETOOLONG; 1996 1997 return (err); 1998 } 1999 2000 static void 2001 dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, 2002 cred_t *cr, dmu_tx_t *tx) 2003 { 2004 dsl_dataset_t *ds = arg1; 2005 const char *newsnapname = arg2; 2006 dsl_dir_t *dd = ds->ds_dir; 2007 objset_t *mos = dd->dd_pool->dp_meta_objset; 2008 dsl_dataset_t *hds; 2009 int err; 2010 2011 ASSERT(ds->ds_phys->ds_next_snap_obj != 0); 2012 2013 VERIFY(0 == dsl_dataset_hold_obj(dd->dd_pool, 2014 dd->dd_phys->dd_head_dataset_obj, FTAG, &hds)); 2015 2016 VERIFY(0 == dsl_dataset_get_snapname(ds)); 2017 err = dsl_dataset_snap_remove(hds, ds->ds_snapname, tx); 2018 ASSERT3U(err, ==, 0); 2019 mutex_enter(&ds->ds_lock); 2020 (void) strcpy(ds->ds_snapname, newsnapname); 2021 mutex_exit(&ds->ds_lock); 2022 err = zap_add(mos, hds->ds_phys->ds_snapnames_zapobj, 2023 ds->ds_snapname, 8, 1, &ds->ds_object, tx); 2024 ASSERT3U(err, ==, 0); 2025 2026 spa_history_internal_log(LOG_DS_RENAME, dd->dd_pool->dp_spa, tx, 2027 cr, "dataset = %llu", ds->ds_object); 2028 dsl_dataset_rele(hds, FTAG); 2029 } 2030 2031 struct renamesnaparg { 2032 dsl_sync_task_group_t *dstg; 2033 char failed[MAXPATHLEN]; 2034 char *oldsnap; 2035 char *newsnap; 2036 }; 2037 2038 static int 2039 dsl_snapshot_rename_one(char *name, void *arg) 2040 { 2041 struct renamesnaparg *ra = arg; 2042 dsl_dataset_t *ds = NULL; 2043 char *cp; 2044 int err; 2045 2046 cp = name + strlen(name); 2047 *cp = '@'; 2048 (void) strcpy(cp + 1, ra->oldsnap); 2049 2050 /* 2051 * For recursive snapshot renames the parent won't be changing 2052 * so we just pass name for both the to/from argument. 2053 */ 2054 if (err = zfs_secpolicy_rename_perms(name, name, CRED())) { 2055 (void) strcpy(ra->failed, name); 2056 return (err); 2057 } 2058 2059 #ifdef _KERNEL 2060 /* 2061 * For all filesystems undergoing rename, we'll need to unmount it. 2062 */ 2063 (void) zfs_unmount_snap(name, NULL); 2064 #endif 2065 err = dsl_dataset_hold(name, ra->dstg, &ds); 2066 *cp = '\0'; 2067 if (err == ENOENT) { 2068 return (0); 2069 } else if (err) { 2070 (void) strcpy(ra->failed, name); 2071 return (err); 2072 } 2073 2074 dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check, 2075 dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0); 2076 2077 return (0); 2078 } 2079 2080 static int 2081 dsl_recursive_rename(char *oldname, const char *newname) 2082 { 2083 int err; 2084 struct renamesnaparg *ra; 2085 dsl_sync_task_t *dst; 2086 spa_t *spa; 2087 char *cp, *fsname = spa_strdup(oldname); 2088 int len = strlen(oldname); 2089 2090 /* truncate the snapshot name to get the fsname */ 2091 cp = strchr(fsname, '@'); 2092 *cp = '\0'; 2093 2094 err = spa_open(fsname, &spa, FTAG); 2095 if (err) { 2096 kmem_free(fsname, len + 1); 2097 return (err); 2098 } 2099 ra = kmem_alloc(sizeof (struct renamesnaparg), KM_SLEEP); 2100 ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa)); 2101 2102 ra->oldsnap = strchr(oldname, '@') + 1; 2103 ra->newsnap = strchr(newname, '@') + 1; 2104 *ra->failed = '\0'; 2105 2106 err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra, 2107 DS_FIND_CHILDREN); 2108 kmem_free(fsname, len + 1); 2109 2110 if (err == 0) { 2111 err = dsl_sync_task_group_wait(ra->dstg); 2112 } 2113 2114 for (dst = list_head(&ra->dstg->dstg_tasks); dst; 2115 dst = list_next(&ra->dstg->dstg_tasks, dst)) { 2116 dsl_dataset_t *ds = dst->dst_arg1; 2117 if (dst->dst_err) { 2118 dsl_dir_name(ds->ds_dir, ra->failed); 2119 (void) strcat(ra->failed, "@"); 2120 (void) strcat(ra->failed, ra->newsnap); 2121 } 2122 dsl_dataset_rele(ds, ra->dstg); 2123 } 2124 2125 if (err) 2126 (void) strcpy(oldname, ra->failed); 2127 2128 dsl_sync_task_group_destroy(ra->dstg); 2129 kmem_free(ra, sizeof (struct renamesnaparg)); 2130 spa_close(spa, FTAG); 2131 return (err); 2132 } 2133 2134 static int 2135 dsl_valid_rename(char *oldname, void *arg) 2136 { 2137 int delta = *(int *)arg; 2138 2139 if (strlen(oldname) + delta >= MAXNAMELEN) 2140 return (ENAMETOOLONG); 2141 2142 return (0); 2143 } 2144 2145 #pragma weak dmu_objset_rename = dsl_dataset_rename 2146 int 2147 dsl_dataset_rename(char *oldname, const char *newname, boolean_t recursive) 2148 { 2149 dsl_dir_t *dd; 2150 dsl_dataset_t *ds; 2151 const char *tail; 2152 int err; 2153 2154 err = dsl_dir_open(oldname, FTAG, &dd, &tail); 2155 if (err) 2156 return (err); 2157 if (tail == NULL) { 2158 int delta = strlen(newname) - strlen(oldname); 2159 2160 /* if we're growing, validate child name lengths */ 2161 if (delta > 0) 2162 err = dmu_objset_find(oldname, dsl_valid_rename, 2163 &delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS); 2164 2165 if (!err) 2166 err = dsl_dir_rename(dd, newname); 2167 dsl_dir_close(dd, FTAG); 2168 return (err); 2169 } 2170 if (tail[0] != '@') { 2171 /* the name ended in a nonexistant component */ 2172 dsl_dir_close(dd, FTAG); 2173 return (ENOENT); 2174 } 2175 2176 dsl_dir_close(dd, FTAG); 2177 2178 /* new name must be snapshot in same filesystem */ 2179 tail = strchr(newname, '@'); 2180 if (tail == NULL) 2181 return (EINVAL); 2182 tail++; 2183 if (strncmp(oldname, newname, tail - newname) != 0) 2184 return (EXDEV); 2185 2186 if (recursive) { 2187 err = dsl_recursive_rename(oldname, newname); 2188 } else { 2189 err = dsl_dataset_hold(oldname, FTAG, &ds); 2190 if (err) 2191 return (err); 2192 2193 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2194 dsl_dataset_snapshot_rename_check, 2195 dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1); 2196 2197 dsl_dataset_rele(ds, FTAG); 2198 } 2199 2200 return (err); 2201 } 2202 2203 struct promotenode { 2204 list_node_t link; 2205 dsl_dataset_t *ds; 2206 }; 2207 2208 struct promotearg { 2209 list_t snap_list; 2210 dsl_dataset_t *clone_origin, *old_head; 2211 uint64_t used, comp, uncomp, unique; 2212 uint64_t newnext_obj; 2213 }; 2214 2215 /* ARGSUSED */ 2216 static int 2217 dsl_dataset_promote_check(void *arg1, void *arg2, dmu_tx_t *tx) 2218 { 2219 dsl_dataset_t *hds = arg1; 2220 struct promotearg *pa = arg2; 2221 struct promotenode *snap = list_head(&pa->snap_list); 2222 dsl_pool_t *dp = hds->ds_dir->dd_pool; 2223 dsl_dataset_t *origin_ds = snap->ds; 2224 dsl_dataset_t *newnext_ds; 2225 char *name; 2226 uint64_t itor = 0; 2227 blkptr_t bp; 2228 int err; 2229 2230 /* Check that it is a real clone */ 2231 if (!dsl_dir_is_clone(hds->ds_dir)) 2232 return (EINVAL); 2233 2234 /* Since this is so expensive, don't do the preliminary check */ 2235 if (!dmu_tx_is_syncing(tx)) 2236 return (0); 2237 2238 if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) 2239 return (EXDEV); 2240 2241 /* find origin's new next ds */ 2242 newnext_ds = hds; 2243 while (newnext_ds->ds_phys->ds_prev_snap_obj != origin_ds->ds_object) { 2244 dsl_dataset_t *prev; 2245 2246 err = dsl_dataset_hold_obj(dp, 2247 newnext_ds->ds_phys->ds_prev_snap_obj, FTAG, &prev); 2248 if (newnext_ds != hds) 2249 dsl_dataset_rele(newnext_ds, FTAG); 2250 if (err) 2251 return (err); 2252 newnext_ds = prev; 2253 } 2254 pa->newnext_obj = newnext_ds->ds_object; 2255 2256 /* compute origin's new unique space */ 2257 pa->unique = 0; 2258 while ((err = bplist_iterate(&newnext_ds->ds_deadlist, 2259 &itor, &bp)) == 0) { 2260 if (bp.blk_birth > origin_ds->ds_phys->ds_prev_snap_txg) 2261 pa->unique += bp_get_dasize(dp->dp_spa, &bp); 2262 } 2263 if (newnext_ds != hds) 2264 dsl_dataset_rele(newnext_ds, FTAG); 2265 if (err != ENOENT) 2266 return (err); 2267 2268 name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2269 2270 /* 2271 * Walk the snapshots that we are moving 2272 * 2273 * Compute space to transfer. Each snapshot gave birth to: 2274 * (my used) - (prev's used) + (deadlist's used) 2275 * So a sequence would look like: 2276 * uN - u(N-1) + dN + ... + u1 - u0 + d1 + u0 - 0 + d0 2277 * Which simplifies to: 2278 * uN + dN + ... + d1 + d0 2279 * Note however, if we stop before we reach the ORIGIN we get: 2280 * uN + dN + ... + dM - uM-1 2281 */ 2282 pa->used = origin_ds->ds_phys->ds_used_bytes; 2283 pa->comp = origin_ds->ds_phys->ds_compressed_bytes; 2284 pa->uncomp = origin_ds->ds_phys->ds_uncompressed_bytes; 2285 do { 2286 uint64_t val, dlused, dlcomp, dluncomp; 2287 dsl_dataset_t *ds = snap->ds; 2288 2289 /* Check that the snapshot name does not conflict */ 2290 dsl_dataset_name(ds, name); 2291 err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val); 2292 if (err == 0) 2293 err = EEXIST; 2294 if (err != ENOENT) 2295 break; 2296 err = 0; 2297 2298 /* The very first snapshot does not have a deadlist */ 2299 if (ds->ds_phys->ds_prev_snap_obj != 0) { 2300 if (err = bplist_space(&ds->ds_deadlist, 2301 &dlused, &dlcomp, &dluncomp)) 2302 break; 2303 pa->used += dlused; 2304 pa->comp += dlcomp; 2305 pa->uncomp += dluncomp; 2306 } 2307 } while (snap = list_next(&pa->snap_list, snap)); 2308 2309 /* 2310 * If we are a clone of a clone then we never reached ORIGIN, 2311 * so we need to subtract out the clone origin's used space. 2312 */ 2313 if (pa->clone_origin) { 2314 pa->used -= pa->clone_origin->ds_phys->ds_used_bytes; 2315 pa->comp -= pa->clone_origin->ds_phys->ds_compressed_bytes; 2316 pa->uncomp -= pa->clone_origin->ds_phys->ds_uncompressed_bytes; 2317 } 2318 2319 kmem_free(name, MAXPATHLEN); 2320 2321 /* Check that there is enough space here */ 2322 if (err == 0) { 2323 dsl_dir_t *odd = origin_ds->ds_dir; 2324 err = dsl_dir_transfer_possible(odd, hds->ds_dir, pa->used); 2325 } 2326 2327 return (err); 2328 } 2329 2330 static void 2331 dsl_dataset_promote_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 2332 { 2333 dsl_dataset_t *hds = arg1; 2334 struct promotearg *pa = arg2; 2335 struct promotenode *snap = list_head(&pa->snap_list); 2336 dsl_dataset_t *origin_ds = snap->ds; 2337 dsl_dir_t *dd = hds->ds_dir; 2338 dsl_pool_t *dp = hds->ds_dir->dd_pool; 2339 dsl_dir_t *odd = NULL; 2340 char *name; 2341 uint64_t oldnext_obj; 2342 2343 ASSERT(0 == (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE)); 2344 2345 /* 2346 * We need to explicitly open odd, since origin_ds's dd will be 2347 * changing. 2348 */ 2349 VERIFY(0 == dsl_dir_open_obj(dp, origin_ds->ds_dir->dd_object, 2350 NULL, FTAG, &odd)); 2351 2352 /* change origin's next snap */ 2353 dmu_buf_will_dirty(origin_ds->ds_dbuf, tx); 2354 oldnext_obj = origin_ds->ds_phys->ds_next_snap_obj; 2355 origin_ds->ds_phys->ds_next_snap_obj = pa->newnext_obj; 2356 2357 /* change the origin's next clone */ 2358 if (origin_ds->ds_phys->ds_next_clones_obj) { 2359 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2360 origin_ds->ds_phys->ds_next_clones_obj, 2361 pa->newnext_obj, tx)); 2362 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset, 2363 origin_ds->ds_phys->ds_next_clones_obj, 2364 oldnext_obj, tx)); 2365 } 2366 2367 /* change origin */ 2368 dmu_buf_will_dirty(dd->dd_dbuf, tx); 2369 ASSERT3U(dd->dd_phys->dd_origin_obj, ==, origin_ds->ds_object); 2370 dd->dd_phys->dd_origin_obj = odd->dd_phys->dd_origin_obj; 2371 dmu_buf_will_dirty(odd->dd_dbuf, tx); 2372 odd->dd_phys->dd_origin_obj = origin_ds->ds_object; 2373 2374 /* move snapshots to this dir */ 2375 name = kmem_alloc(MAXPATHLEN, KM_SLEEP); 2376 do { 2377 dsl_dataset_t *ds = snap->ds; 2378 2379 /* move snap name entry */ 2380 dsl_dataset_name(ds, name); 2381 VERIFY(0 == dsl_dataset_snap_remove(pa->old_head, 2382 ds->ds_snapname, tx)); 2383 VERIFY(0 == zap_add(dp->dp_meta_objset, 2384 hds->ds_phys->ds_snapnames_zapobj, ds->ds_snapname, 2385 8, 1, &ds->ds_object, tx)); 2386 2387 /* change containing dsl_dir */ 2388 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2389 ASSERT3U(ds->ds_phys->ds_dir_obj, ==, odd->dd_object); 2390 ds->ds_phys->ds_dir_obj = dd->dd_object; 2391 ASSERT3P(ds->ds_dir, ==, odd); 2392 dsl_dir_close(ds->ds_dir, ds); 2393 VERIFY(0 == dsl_dir_open_obj(dp, dd->dd_object, 2394 NULL, ds, &ds->ds_dir)); 2395 2396 ASSERT3U(dsl_prop_numcb(ds), ==, 0); 2397 } while (snap = list_next(&pa->snap_list, snap)); 2398 2399 /* change space accounting */ 2400 dsl_dir_diduse_space(odd, -pa->used, -pa->comp, -pa->uncomp, tx); 2401 dsl_dir_diduse_space(dd, pa->used, pa->comp, pa->uncomp, tx); 2402 origin_ds->ds_phys->ds_unique_bytes = pa->unique; 2403 2404 /* log history record */ 2405 spa_history_internal_log(LOG_DS_PROMOTE, dd->dd_pool->dp_spa, tx, 2406 cr, "dataset = %llu", hds->ds_object); 2407 2408 dsl_dir_close(odd, FTAG); 2409 kmem_free(name, MAXPATHLEN); 2410 } 2411 2412 int 2413 dsl_dataset_promote(const char *name) 2414 { 2415 dsl_dataset_t *ds; 2416 dsl_dir_t *dd; 2417 dsl_pool_t *dp; 2418 dmu_object_info_t doi; 2419 struct promotearg pa; 2420 struct promotenode *snap; 2421 uint64_t snap_obj; 2422 uint64_t last_snap = 0; 2423 int err; 2424 2425 err = dsl_dataset_hold(name, FTAG, &ds); 2426 if (err) 2427 return (err); 2428 dd = ds->ds_dir; 2429 dp = dd->dd_pool; 2430 2431 err = dmu_object_info(dp->dp_meta_objset, 2432 ds->ds_phys->ds_snapnames_zapobj, &doi); 2433 if (err) { 2434 dsl_dataset_rele(ds, FTAG); 2435 return (err); 2436 } 2437 2438 /* 2439 * We are going to inherit all the snapshots taken before our 2440 * origin (i.e., our new origin will be our parent's origin). 2441 * Take ownership of them so that we can rename them into our 2442 * namespace. 2443 */ 2444 pa.clone_origin = NULL; 2445 list_create(&pa.snap_list, 2446 sizeof (struct promotenode), offsetof(struct promotenode, link)); 2447 rw_enter(&dp->dp_config_rwlock, RW_READER); 2448 ASSERT(dd->dd_phys->dd_origin_obj != 0); 2449 snap_obj = dd->dd_phys->dd_origin_obj; 2450 while (snap_obj) { 2451 dsl_dataset_t *snapds; 2452 2453 /* 2454 * NB: this would be handled by the below check for 2455 * clone of a clone, but then we'd always own_obj() the 2456 * $ORIGIN, thus causing unnecessary EBUSYs. We don't 2457 * need to set pa.clone_origin because the $ORIGIN has 2458 * no data to account for. 2459 */ 2460 if (dp->dp_origin_snap && 2461 snap_obj == dp->dp_origin_snap->ds_object) 2462 break; 2463 2464 err = dsl_dataset_own_obj(dp, snap_obj, 0, FTAG, &snapds); 2465 if (err == ENOENT) { 2466 /* lost race with snapshot destroy */ 2467 struct promotenode *last = list_tail(&pa.snap_list); 2468 ASSERT(snap_obj != last->ds->ds_phys->ds_prev_snap_obj); 2469 snap_obj = last->ds->ds_phys->ds_prev_snap_obj; 2470 continue; 2471 } else if (err) { 2472 rw_exit(&dp->dp_config_rwlock); 2473 goto out; 2474 } 2475 2476 /* 2477 * We could be a clone of a clone. If we reach our 2478 * parent's branch point, we're done. 2479 */ 2480 if (last_snap && 2481 snapds->ds_phys->ds_next_snap_obj != last_snap) { 2482 pa.clone_origin = snapds; 2483 break; 2484 } 2485 2486 snap = kmem_alloc(sizeof (struct promotenode), KM_SLEEP); 2487 snap->ds = snapds; 2488 list_insert_tail(&pa.snap_list, snap); 2489 last_snap = snap_obj; 2490 snap_obj = snap->ds->ds_phys->ds_prev_snap_obj; 2491 } 2492 snap = list_head(&pa.snap_list); 2493 ASSERT(snap != NULL); 2494 err = dsl_dataset_hold_obj(dp, 2495 snap->ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &pa.old_head); 2496 rw_exit(&dp->dp_config_rwlock); 2497 2498 if (err) 2499 goto out; 2500 2501 /* 2502 * Add in 128x the snapnames zapobj size, since we will be moving 2503 * a bunch of snapnames to the promoted ds, and dirtying their 2504 * bonus buffers. 2505 */ 2506 err = dsl_sync_task_do(dp, dsl_dataset_promote_check, 2507 dsl_dataset_promote_sync, ds, &pa, 2 + 2 * doi.doi_physical_blks); 2508 2509 dsl_dataset_rele(pa.old_head, FTAG); 2510 out: 2511 while ((snap = list_tail(&pa.snap_list)) != NULL) { 2512 list_remove(&pa.snap_list, snap); 2513 dsl_dataset_disown(snap->ds, FTAG); 2514 kmem_free(snap, sizeof (struct promotenode)); 2515 } 2516 list_destroy(&pa.snap_list); 2517 if (pa.clone_origin) 2518 dsl_dataset_disown(pa.clone_origin, FTAG); 2519 dsl_dataset_rele(ds, FTAG); 2520 return (err); 2521 } 2522 2523 struct cloneswaparg { 2524 dsl_dataset_t *cds; /* clone dataset */ 2525 dsl_dataset_t *ohds; /* origin's head dataset */ 2526 boolean_t force; 2527 int64_t unused_refres_delta; /* change in unconsumed refreservation */ 2528 }; 2529 2530 /* ARGSUSED */ 2531 static int 2532 dsl_dataset_clone_swap_check(void *arg1, void *arg2, dmu_tx_t *tx) 2533 { 2534 struct cloneswaparg *csa = arg1; 2535 2536 /* they should both be heads */ 2537 if (dsl_dataset_is_snapshot(csa->cds) || 2538 dsl_dataset_is_snapshot(csa->ohds)) 2539 return (EINVAL); 2540 2541 /* the branch point should be just before them */ 2542 if (csa->cds->ds_prev != csa->ohds->ds_prev) 2543 return (EINVAL); 2544 2545 /* cds should be the clone */ 2546 if (csa->cds->ds_prev->ds_phys->ds_next_snap_obj != 2547 csa->ohds->ds_object) 2548 return (EINVAL); 2549 2550 /* the clone should be a child of the origin */ 2551 if (csa->cds->ds_dir->dd_parent != csa->ohds->ds_dir) 2552 return (EINVAL); 2553 2554 /* ohds shouldn't be modified unless 'force' */ 2555 if (!csa->force && dsl_dataset_modified_since_lastsnap(csa->ohds)) 2556 return (ETXTBSY); 2557 2558 /* adjust amount of any unconsumed refreservation */ 2559 csa->unused_refres_delta = 2560 (int64_t)MIN(csa->ohds->ds_reserved, 2561 csa->ohds->ds_phys->ds_unique_bytes) - 2562 (int64_t)MIN(csa->ohds->ds_reserved, 2563 csa->cds->ds_phys->ds_unique_bytes); 2564 2565 if (csa->unused_refres_delta > 0 && 2566 csa->unused_refres_delta > 2567 dsl_dir_space_available(csa->ohds->ds_dir, NULL, 0, TRUE)) 2568 return (ENOSPC); 2569 2570 return (0); 2571 } 2572 2573 /* ARGSUSED */ 2574 static void 2575 dsl_dataset_clone_swap_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 2576 { 2577 struct cloneswaparg *csa = arg1; 2578 dsl_pool_t *dp = csa->cds->ds_dir->dd_pool; 2579 uint64_t itor = 0; 2580 blkptr_t bp; 2581 uint64_t unique = 0; 2582 int err; 2583 2584 ASSERT(csa->cds->ds_reserved == 0); 2585 ASSERT(csa->cds->ds_quota == csa->ohds->ds_quota); 2586 2587 dmu_buf_will_dirty(csa->cds->ds_dbuf, tx); 2588 dmu_buf_will_dirty(csa->ohds->ds_dbuf, tx); 2589 dmu_buf_will_dirty(csa->cds->ds_prev->ds_dbuf, tx); 2590 2591 if (csa->cds->ds_user_ptr != NULL) { 2592 csa->cds->ds_user_evict_func(csa->cds, csa->cds->ds_user_ptr); 2593 csa->cds->ds_user_ptr = NULL; 2594 } 2595 2596 if (csa->ohds->ds_user_ptr != NULL) { 2597 csa->ohds->ds_user_evict_func(csa->ohds, 2598 csa->ohds->ds_user_ptr); 2599 csa->ohds->ds_user_ptr = NULL; 2600 } 2601 2602 /* compute unique space */ 2603 while ((err = bplist_iterate(&csa->cds->ds_deadlist, 2604 &itor, &bp)) == 0) { 2605 if (bp.blk_birth > csa->cds->ds_prev->ds_phys->ds_prev_snap_txg) 2606 unique += bp_get_dasize(dp->dp_spa, &bp); 2607 } 2608 VERIFY(err == ENOENT); 2609 2610 /* reset origin's unique bytes */ 2611 csa->cds->ds_prev->ds_phys->ds_unique_bytes = unique; 2612 2613 /* swap blkptrs */ 2614 { 2615 blkptr_t tmp; 2616 tmp = csa->ohds->ds_phys->ds_bp; 2617 csa->ohds->ds_phys->ds_bp = csa->cds->ds_phys->ds_bp; 2618 csa->cds->ds_phys->ds_bp = tmp; 2619 } 2620 2621 /* set dd_*_bytes */ 2622 { 2623 int64_t dused, dcomp, duncomp; 2624 uint64_t cdl_used, cdl_comp, cdl_uncomp; 2625 uint64_t odl_used, odl_comp, odl_uncomp; 2626 2627 VERIFY(0 == bplist_space(&csa->cds->ds_deadlist, &cdl_used, 2628 &cdl_comp, &cdl_uncomp)); 2629 VERIFY(0 == bplist_space(&csa->ohds->ds_deadlist, &odl_used, 2630 &odl_comp, &odl_uncomp)); 2631 dused = csa->cds->ds_phys->ds_used_bytes + cdl_used - 2632 (csa->ohds->ds_phys->ds_used_bytes + odl_used); 2633 dcomp = csa->cds->ds_phys->ds_compressed_bytes + cdl_comp - 2634 (csa->ohds->ds_phys->ds_compressed_bytes + odl_comp); 2635 duncomp = csa->cds->ds_phys->ds_uncompressed_bytes + 2636 cdl_uncomp - 2637 (csa->ohds->ds_phys->ds_uncompressed_bytes + odl_uncomp); 2638 2639 dsl_dir_diduse_space(csa->ohds->ds_dir, 2640 dused, dcomp, duncomp, tx); 2641 dsl_dir_diduse_space(csa->cds->ds_dir, 2642 -dused, -dcomp, -duncomp, tx); 2643 } 2644 2645 #define SWITCH64(x, y) \ 2646 { \ 2647 uint64_t __tmp = (x); \ 2648 (x) = (y); \ 2649 (y) = __tmp; \ 2650 } 2651 2652 /* swap ds_*_bytes */ 2653 SWITCH64(csa->ohds->ds_phys->ds_used_bytes, 2654 csa->cds->ds_phys->ds_used_bytes); 2655 SWITCH64(csa->ohds->ds_phys->ds_compressed_bytes, 2656 csa->cds->ds_phys->ds_compressed_bytes); 2657 SWITCH64(csa->ohds->ds_phys->ds_uncompressed_bytes, 2658 csa->cds->ds_phys->ds_uncompressed_bytes); 2659 SWITCH64(csa->ohds->ds_phys->ds_unique_bytes, 2660 csa->cds->ds_phys->ds_unique_bytes); 2661 2662 /* apply any parent delta for change in unconsumed refreservation */ 2663 dsl_dir_diduse_space(csa->ohds->ds_dir, csa->unused_refres_delta, 2664 0, 0, tx); 2665 2666 /* swap deadlists */ 2667 bplist_close(&csa->cds->ds_deadlist); 2668 bplist_close(&csa->ohds->ds_deadlist); 2669 SWITCH64(csa->ohds->ds_phys->ds_deadlist_obj, 2670 csa->cds->ds_phys->ds_deadlist_obj); 2671 VERIFY(0 == bplist_open(&csa->cds->ds_deadlist, dp->dp_meta_objset, 2672 csa->cds->ds_phys->ds_deadlist_obj)); 2673 VERIFY(0 == bplist_open(&csa->ohds->ds_deadlist, dp->dp_meta_objset, 2674 csa->ohds->ds_phys->ds_deadlist_obj)); 2675 } 2676 2677 /* 2678 * Swap 'clone' with its origin head file system. Used at the end 2679 * of "online recv" to swizzle the file system to the new version. 2680 */ 2681 int 2682 dsl_dataset_clone_swap(dsl_dataset_t *clone, dsl_dataset_t *origin_head, 2683 boolean_t force) 2684 { 2685 struct cloneswaparg csa; 2686 int error; 2687 2688 ASSERT(clone->ds_owner); 2689 ASSERT(origin_head->ds_owner); 2690 retry: 2691 /* Need exclusive access for the swap */ 2692 rw_enter(&clone->ds_rwlock, RW_WRITER); 2693 if (!rw_tryenter(&origin_head->ds_rwlock, RW_WRITER)) { 2694 rw_exit(&clone->ds_rwlock); 2695 rw_enter(&origin_head->ds_rwlock, RW_WRITER); 2696 if (!rw_tryenter(&clone->ds_rwlock, RW_WRITER)) { 2697 rw_exit(&origin_head->ds_rwlock); 2698 goto retry; 2699 } 2700 } 2701 csa.cds = clone; 2702 csa.ohds = origin_head; 2703 csa.force = force; 2704 error = dsl_sync_task_do(clone->ds_dir->dd_pool, 2705 dsl_dataset_clone_swap_check, 2706 dsl_dataset_clone_swap_sync, &csa, NULL, 9); 2707 return (error); 2708 } 2709 2710 /* 2711 * Given a pool name and a dataset object number in that pool, 2712 * return the name of that dataset. 2713 */ 2714 int 2715 dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf) 2716 { 2717 spa_t *spa; 2718 dsl_pool_t *dp; 2719 dsl_dataset_t *ds; 2720 int error; 2721 2722 if ((error = spa_open(pname, &spa, FTAG)) != 0) 2723 return (error); 2724 dp = spa_get_dsl(spa); 2725 rw_enter(&dp->dp_config_rwlock, RW_READER); 2726 if ((error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds)) == 0) { 2727 dsl_dataset_name(ds, buf); 2728 dsl_dataset_rele(ds, FTAG); 2729 } 2730 rw_exit(&dp->dp_config_rwlock); 2731 spa_close(spa, FTAG); 2732 2733 return (error); 2734 } 2735 2736 int 2737 dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota, 2738 uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv) 2739 { 2740 int error = 0; 2741 2742 ASSERT3S(asize, >, 0); 2743 2744 /* 2745 * *ref_rsrv is the portion of asize that will come from any 2746 * unconsumed refreservation space. 2747 */ 2748 *ref_rsrv = 0; 2749 2750 mutex_enter(&ds->ds_lock); 2751 /* 2752 * Make a space adjustment for reserved bytes. 2753 */ 2754 if (ds->ds_reserved > ds->ds_phys->ds_unique_bytes) { 2755 ASSERT3U(*used, >=, 2756 ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 2757 *used -= (ds->ds_reserved - ds->ds_phys->ds_unique_bytes); 2758 *ref_rsrv = 2759 asize - MIN(asize, parent_delta(ds, asize + inflight)); 2760 } 2761 2762 if (!check_quota || ds->ds_quota == 0) { 2763 mutex_exit(&ds->ds_lock); 2764 return (0); 2765 } 2766 /* 2767 * If they are requesting more space, and our current estimate 2768 * is over quota, they get to try again unless the actual 2769 * on-disk is over quota and there are no pending changes (which 2770 * may free up space for us). 2771 */ 2772 if (ds->ds_phys->ds_used_bytes + inflight >= ds->ds_quota) { 2773 if (inflight > 0 || ds->ds_phys->ds_used_bytes < ds->ds_quota) 2774 error = ERESTART; 2775 else 2776 error = EDQUOT; 2777 } 2778 mutex_exit(&ds->ds_lock); 2779 2780 return (error); 2781 } 2782 2783 /* ARGSUSED */ 2784 static int 2785 dsl_dataset_set_quota_check(void *arg1, void *arg2, dmu_tx_t *tx) 2786 { 2787 dsl_dataset_t *ds = arg1; 2788 uint64_t *quotap = arg2; 2789 uint64_t new_quota = *quotap; 2790 2791 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < SPA_VERSION_REFQUOTA) 2792 return (ENOTSUP); 2793 2794 if (new_quota == 0) 2795 return (0); 2796 2797 if (new_quota < ds->ds_phys->ds_used_bytes || 2798 new_quota < ds->ds_reserved) 2799 return (ENOSPC); 2800 2801 return (0); 2802 } 2803 2804 /* ARGSUSED */ 2805 void 2806 dsl_dataset_set_quota_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx) 2807 { 2808 dsl_dataset_t *ds = arg1; 2809 uint64_t *quotap = arg2; 2810 uint64_t new_quota = *quotap; 2811 2812 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2813 2814 ds->ds_quota = new_quota; 2815 2816 dsl_prop_set_uint64_sync(ds->ds_dir, "refquota", new_quota, cr, tx); 2817 2818 spa_history_internal_log(LOG_DS_REFQUOTA, ds->ds_dir->dd_pool->dp_spa, 2819 tx, cr, "%lld dataset = %llu ", 2820 (longlong_t)new_quota, ds->ds_object); 2821 } 2822 2823 int 2824 dsl_dataset_set_quota(const char *dsname, uint64_t quota) 2825 { 2826 dsl_dataset_t *ds; 2827 int err; 2828 2829 err = dsl_dataset_hold(dsname, FTAG, &ds); 2830 if (err) 2831 return (err); 2832 2833 if (quota != ds->ds_quota) { 2834 /* 2835 * If someone removes a file, then tries to set the quota, we 2836 * want to make sure the file freeing takes effect. 2837 */ 2838 txg_wait_open(ds->ds_dir->dd_pool, 0); 2839 2840 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2841 dsl_dataset_set_quota_check, dsl_dataset_set_quota_sync, 2842 ds, "a, 0); 2843 } 2844 dsl_dataset_rele(ds, FTAG); 2845 return (err); 2846 } 2847 2848 static int 2849 dsl_dataset_set_reservation_check(void *arg1, void *arg2, dmu_tx_t *tx) 2850 { 2851 dsl_dataset_t *ds = arg1; 2852 uint64_t *reservationp = arg2; 2853 uint64_t new_reservation = *reservationp; 2854 int64_t delta; 2855 uint64_t unique; 2856 2857 if (new_reservation > INT64_MAX) 2858 return (EOVERFLOW); 2859 2860 if (spa_version(ds->ds_dir->dd_pool->dp_spa) < 2861 SPA_VERSION_REFRESERVATION) 2862 return (ENOTSUP); 2863 2864 if (dsl_dataset_is_snapshot(ds)) 2865 return (EINVAL); 2866 2867 /* 2868 * If we are doing the preliminary check in open context, the 2869 * space estimates may be inaccurate. 2870 */ 2871 if (!dmu_tx_is_syncing(tx)) 2872 return (0); 2873 2874 mutex_enter(&ds->ds_lock); 2875 unique = dsl_dataset_unique(ds); 2876 delta = MAX(unique, new_reservation) - MAX(unique, ds->ds_reserved); 2877 mutex_exit(&ds->ds_lock); 2878 2879 if (delta > 0 && 2880 delta > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) 2881 return (ENOSPC); 2882 if (delta > 0 && ds->ds_quota > 0 && 2883 new_reservation > ds->ds_quota) 2884 return (ENOSPC); 2885 2886 return (0); 2887 } 2888 2889 /* ARGSUSED */ 2890 static void 2891 dsl_dataset_set_reservation_sync(void *arg1, void *arg2, cred_t *cr, 2892 dmu_tx_t *tx) 2893 { 2894 dsl_dataset_t *ds = arg1; 2895 uint64_t *reservationp = arg2; 2896 uint64_t new_reservation = *reservationp; 2897 uint64_t unique; 2898 int64_t delta; 2899 2900 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2901 2902 mutex_enter(&ds->ds_lock); 2903 unique = dsl_dataset_unique(ds); 2904 delta = MAX(0, (int64_t)(new_reservation - unique)) - 2905 MAX(0, (int64_t)(ds->ds_reserved - unique)); 2906 ds->ds_reserved = new_reservation; 2907 mutex_exit(&ds->ds_lock); 2908 2909 dsl_prop_set_uint64_sync(ds->ds_dir, "refreservation", 2910 new_reservation, cr, tx); 2911 2912 dsl_dir_diduse_space(ds->ds_dir, delta, 0, 0, tx); 2913 2914 spa_history_internal_log(LOG_DS_REFRESERV, 2915 ds->ds_dir->dd_pool->dp_spa, tx, cr, "%lld dataset = %llu", 2916 (longlong_t)new_reservation, 2917 ds->ds_dir->dd_phys->dd_head_dataset_obj); 2918 } 2919 2920 int 2921 dsl_dataset_set_reservation(const char *dsname, uint64_t reservation) 2922 { 2923 dsl_dataset_t *ds; 2924 int err; 2925 2926 err = dsl_dataset_hold(dsname, FTAG, &ds); 2927 if (err) 2928 return (err); 2929 2930 err = dsl_sync_task_do(ds->ds_dir->dd_pool, 2931 dsl_dataset_set_reservation_check, 2932 dsl_dataset_set_reservation_sync, ds, &reservation, 0); 2933 dsl_dataset_rele(ds, FTAG); 2934 return (err); 2935 } 2936