1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/dsl_pool.h> 27 #include <sys/dsl_dataset.h> 28 #include <sys/dsl_dir.h> 29 #include <sys/dsl_synctask.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/dmu_objset.h> 32 #include <sys/arc.h> 33 #include <sys/zap.h> 34 #include <sys/zio.h> 35 #include <sys/zfs_context.h> 36 #include <sys/fs/zfs.h> 37 #include <sys/zfs_znode.h> 38 #include <sys/spa_impl.h> 39 40 int zfs_no_write_throttle = 0; 41 int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ 42 int zfs_txg_synctime = 5; /* target secs to sync a txg */ 43 44 uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ 45 uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ 46 uint64_t zfs_write_limit_inflated = 0; 47 uint64_t zfs_write_limit_override = 0; 48 49 kmutex_t zfs_write_limit_lock; 50 51 static pgcnt_t old_physmem = 0; 52 53 static int 54 dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 55 { 56 uint64_t obj; 57 int err; 58 59 err = zap_lookup(dp->dp_meta_objset, 60 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj, 61 name, sizeof (obj), 1, &obj); 62 if (err) 63 return (err); 64 65 return (dsl_dir_open_obj(dp, obj, name, dp, ddp)); 66 } 67 68 static dsl_pool_t * 69 dsl_pool_open_impl(spa_t *spa, uint64_t txg) 70 { 71 dsl_pool_t *dp; 72 blkptr_t *bp = spa_get_rootblkptr(spa); 73 74 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 75 dp->dp_spa = spa; 76 dp->dp_meta_rootbp = *bp; 77 rw_init(&dp->dp_config_rwlock, NULL, RW_DEFAULT, NULL); 78 dp->dp_write_limit = zfs_write_limit_min; 79 txg_init(dp, txg); 80 81 txg_list_create(&dp->dp_dirty_datasets, 82 offsetof(dsl_dataset_t, ds_dirty_link)); 83 txg_list_create(&dp->dp_dirty_dirs, 84 offsetof(dsl_dir_t, dd_dirty_link)); 85 txg_list_create(&dp->dp_sync_tasks, 86 offsetof(dsl_sync_task_group_t, dstg_node)); 87 list_create(&dp->dp_synced_datasets, sizeof (dsl_dataset_t), 88 offsetof(dsl_dataset_t, ds_synced_link)); 89 90 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 91 mutex_init(&dp->dp_scrub_cancel_lock, NULL, MUTEX_DEFAULT, NULL); 92 93 return (dp); 94 } 95 96 int 97 dsl_pool_open(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 98 { 99 int err; 100 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 101 dsl_dir_t *dd; 102 dsl_dataset_t *ds; 103 objset_impl_t *osi; 104 105 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 106 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, &osi); 107 if (err) 108 goto out; 109 dp->dp_meta_objset = &osi->os; 110 111 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 112 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 113 &dp->dp_root_dir_obj); 114 if (err) 115 goto out; 116 117 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 118 NULL, dp, &dp->dp_root_dir); 119 if (err) 120 goto out; 121 122 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 123 if (err) 124 goto out; 125 126 if (spa_version(spa) >= SPA_VERSION_ORIGIN) { 127 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 128 if (err) 129 goto out; 130 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj, 131 FTAG, &ds); 132 if (err) 133 goto out; 134 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 135 dp, &dp->dp_origin_snap); 136 if (err) 137 goto out; 138 dsl_dataset_rele(ds, FTAG); 139 dsl_dir_close(dd, dp); 140 } 141 142 /* get scrub status */ 143 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 144 DMU_POOL_SCRUB_FUNC, sizeof (uint32_t), 1, 145 &dp->dp_scrub_func); 146 if (err == 0) { 147 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 148 DMU_POOL_SCRUB_QUEUE, sizeof (uint64_t), 1, 149 &dp->dp_scrub_queue_obj); 150 if (err) 151 goto out; 152 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 153 DMU_POOL_SCRUB_MIN_TXG, sizeof (uint64_t), 1, 154 &dp->dp_scrub_min_txg); 155 if (err) 156 goto out; 157 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 158 DMU_POOL_SCRUB_MAX_TXG, sizeof (uint64_t), 1, 159 &dp->dp_scrub_max_txg); 160 if (err) 161 goto out; 162 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 163 DMU_POOL_SCRUB_BOOKMARK, sizeof (uint64_t), 4, 164 &dp->dp_scrub_bookmark); 165 if (err) 166 goto out; 167 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 168 DMU_POOL_SCRUB_ERRORS, sizeof (uint64_t), 1, 169 &spa->spa_scrub_errors); 170 if (err) 171 goto out; 172 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) { 173 /* 174 * A new-type scrub was in progress on an old 175 * pool. Restart from the beginning, since the 176 * old software may have changed the pool in the 177 * meantime. 178 */ 179 dsl_pool_scrub_restart(dp); 180 } 181 } else { 182 /* 183 * It's OK if there is no scrub in progress (and if 184 * there was an I/O error, ignore it). 185 */ 186 err = 0; 187 } 188 189 out: 190 rw_exit(&dp->dp_config_rwlock); 191 if (err) 192 dsl_pool_close(dp); 193 else 194 *dpp = dp; 195 196 return (err); 197 } 198 199 void 200 dsl_pool_close(dsl_pool_t *dp) 201 { 202 /* drop our references from dsl_pool_open() */ 203 204 /* 205 * Since we held the origin_snap from "syncing" context (which 206 * includes pool-opening context), it actually only got a "ref" 207 * and not a hold, so just drop that here. 208 */ 209 if (dp->dp_origin_snap) 210 dsl_dataset_drop_ref(dp->dp_origin_snap, dp); 211 if (dp->dp_mos_dir) 212 dsl_dir_close(dp->dp_mos_dir, dp); 213 if (dp->dp_root_dir) 214 dsl_dir_close(dp->dp_root_dir, dp); 215 216 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 217 if (dp->dp_meta_objset) 218 dmu_objset_evict(NULL, dp->dp_meta_objset->os); 219 220 txg_list_destroy(&dp->dp_dirty_datasets); 221 txg_list_destroy(&dp->dp_dirty_dirs); 222 list_destroy(&dp->dp_synced_datasets); 223 224 arc_flush(dp->dp_spa); 225 txg_fini(dp); 226 rw_destroy(&dp->dp_config_rwlock); 227 mutex_destroy(&dp->dp_lock); 228 mutex_destroy(&dp->dp_scrub_cancel_lock); 229 kmem_free(dp, sizeof (dsl_pool_t)); 230 } 231 232 dsl_pool_t * 233 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) 234 { 235 int err; 236 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 237 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 238 objset_impl_t *osip; 239 dsl_dataset_t *ds; 240 uint64_t dsobj; 241 242 /* create and open the MOS (meta-objset) */ 243 dp->dp_meta_objset = &dmu_objset_create_impl(spa, 244 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx)->os; 245 246 /* create the pool directory */ 247 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 248 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 249 ASSERT3U(err, ==, 0); 250 251 /* create and open the root dir */ 252 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 253 VERIFY(0 == dsl_dir_open_obj(dp, dp->dp_root_dir_obj, 254 NULL, dp, &dp->dp_root_dir)); 255 256 /* create and open the meta-objset dir */ 257 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 258 VERIFY(0 == dsl_pool_open_special_dir(dp, 259 MOS_DIR_NAME, &dp->dp_mos_dir)); 260 261 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 262 dsl_pool_create_origin(dp, tx); 263 264 /* create the root dataset */ 265 dsobj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); 266 267 /* create the root objset */ 268 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 269 osip = dmu_objset_create_impl(dp->dp_spa, ds, 270 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 271 #ifdef _KERNEL 272 zfs_create_fs(&osip->os, kcred, zplprops, tx); 273 #endif 274 dsl_dataset_rele(ds, FTAG); 275 276 dmu_tx_commit(tx); 277 278 return (dp); 279 } 280 281 void 282 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 283 { 284 zio_t *zio; 285 dmu_tx_t *tx; 286 dsl_dir_t *dd; 287 dsl_dataset_t *ds; 288 dsl_sync_task_group_t *dstg; 289 objset_impl_t *mosi = dp->dp_meta_objset->os; 290 hrtime_t start, write_time; 291 uint64_t data_written; 292 int err; 293 294 tx = dmu_tx_create_assigned(dp, txg); 295 296 dp->dp_read_overhead = 0; 297 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 298 while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) { 299 if (!list_link_active(&ds->ds_synced_link)) 300 list_insert_tail(&dp->dp_synced_datasets, ds); 301 else 302 dmu_buf_rele(ds->ds_dbuf, ds); 303 dsl_dataset_sync(ds, zio, tx); 304 } 305 DTRACE_PROBE(pool_sync__1setup); 306 307 start = gethrtime(); 308 err = zio_wait(zio); 309 write_time = gethrtime() - start; 310 ASSERT(err == 0); 311 DTRACE_PROBE(pool_sync__2rootzio); 312 313 while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) 314 dsl_sync_task_group_sync(dstg, tx); 315 DTRACE_PROBE(pool_sync__3task); 316 317 start = gethrtime(); 318 while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) 319 dsl_dir_sync(dd, tx); 320 write_time += gethrtime() - start; 321 322 if (spa_sync_pass(dp->dp_spa) == 1) 323 dsl_pool_scrub_sync(dp, tx); 324 325 start = gethrtime(); 326 if (list_head(&mosi->os_dirty_dnodes[txg & TXG_MASK]) != NULL || 327 list_head(&mosi->os_free_dnodes[txg & TXG_MASK]) != NULL) { 328 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 329 dmu_objset_sync(mosi, zio, tx); 330 err = zio_wait(zio); 331 ASSERT(err == 0); 332 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 333 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 334 } 335 write_time += gethrtime() - start; 336 DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time, 337 hrtime_t, dp->dp_read_overhead); 338 write_time -= dp->dp_read_overhead; 339 340 dmu_tx_commit(tx); 341 342 data_written = dp->dp_space_towrite[txg & TXG_MASK]; 343 dp->dp_space_towrite[txg & TXG_MASK] = 0; 344 ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0); 345 346 /* 347 * If the write limit max has not been explicitly set, set it 348 * to a fraction of available physical memory (default 1/8th). 349 * Note that we must inflate the limit because the spa 350 * inflates write sizes to account for data replication. 351 * Check this each sync phase to catch changing memory size. 352 */ 353 if (physmem != old_physmem && zfs_write_limit_shift) { 354 mutex_enter(&zfs_write_limit_lock); 355 old_physmem = physmem; 356 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 357 zfs_write_limit_inflated = MAX(zfs_write_limit_min, 358 spa_get_asize(dp->dp_spa, zfs_write_limit_max)); 359 mutex_exit(&zfs_write_limit_lock); 360 } 361 362 /* 363 * Attempt to keep the sync time consistent by adjusting the 364 * amount of write traffic allowed into each transaction group. 365 * Weight the throughput calculation towards the current value: 366 * thru = 3/4 old_thru + 1/4 new_thru 367 */ 368 ASSERT(zfs_write_limit_min > 0); 369 if (data_written > zfs_write_limit_min / 8 && write_time > 0) { 370 uint64_t throughput = (data_written * NANOSEC) / write_time; 371 if (dp->dp_throughput) 372 dp->dp_throughput = throughput / 4 + 373 3 * dp->dp_throughput / 4; 374 else 375 dp->dp_throughput = throughput; 376 dp->dp_write_limit = MIN(zfs_write_limit_inflated, 377 MAX(zfs_write_limit_min, 378 dp->dp_throughput * zfs_txg_synctime)); 379 } 380 } 381 382 void 383 dsl_pool_zil_clean(dsl_pool_t *dp) 384 { 385 dsl_dataset_t *ds; 386 387 while (ds = list_head(&dp->dp_synced_datasets)) { 388 list_remove(&dp->dp_synced_datasets, ds); 389 ASSERT(ds->ds_user_ptr != NULL); 390 zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil); 391 dmu_buf_rele(ds->ds_dbuf, ds); 392 } 393 } 394 395 /* 396 * TRUE if the current thread is the tx_sync_thread or if we 397 * are being called from SPA context during pool initialization. 398 */ 399 int 400 dsl_pool_sync_context(dsl_pool_t *dp) 401 { 402 return (curthread == dp->dp_tx.tx_sync_thread || 403 spa_get_dsl(dp->dp_spa) == NULL); 404 } 405 406 uint64_t 407 dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) 408 { 409 uint64_t space, resv; 410 411 /* 412 * Reserve about 1.6% (1/64), or at least 32MB, for allocation 413 * efficiency. 414 * XXX The intent log is not accounted for, so it must fit 415 * within this slop. 416 * 417 * If we're trying to assess whether it's OK to do a free, 418 * cut the reservation in half to allow forward progress 419 * (e.g. make it possible to rm(1) files from a full pool). 420 */ 421 space = spa_get_dspace(dp->dp_spa); 422 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1); 423 if (netfree) 424 resv >>= 1; 425 426 return (space - resv); 427 } 428 429 int 430 dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx) 431 { 432 uint64_t reserved = 0; 433 uint64_t write_limit = (zfs_write_limit_override ? 434 zfs_write_limit_override : dp->dp_write_limit); 435 436 if (zfs_no_write_throttle) { 437 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], 438 space); 439 return (0); 440 } 441 442 /* 443 * Check to see if we have exceeded the maximum allowed IO for 444 * this transaction group. We can do this without locks since 445 * a little slop here is ok. Note that we do the reserved check 446 * with only half the requested reserve: this is because the 447 * reserve requests are worst-case, and we really don't want to 448 * throttle based off of worst-case estimates. 449 */ 450 if (write_limit > 0) { 451 reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK] 452 + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2; 453 454 if (reserved && reserved > write_limit) 455 return (ERESTART); 456 } 457 458 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space); 459 460 /* 461 * If this transaction group is over 7/8ths capacity, delay 462 * the caller 1 clock tick. This will slow down the "fill" 463 * rate until the sync process can catch up with us. 464 */ 465 if (reserved && reserved > (write_limit - (write_limit >> 3))) 466 txg_delay(dp, tx->tx_txg, 1); 467 468 return (0); 469 } 470 471 void 472 dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 473 { 474 ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space); 475 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space); 476 } 477 478 void 479 dsl_pool_memory_pressure(dsl_pool_t *dp) 480 { 481 uint64_t space_inuse = 0; 482 int i; 483 484 if (dp->dp_write_limit == zfs_write_limit_min) 485 return; 486 487 for (i = 0; i < TXG_SIZE; i++) { 488 space_inuse += dp->dp_space_towrite[i]; 489 space_inuse += dp->dp_tempreserved[i]; 490 } 491 dp->dp_write_limit = MAX(zfs_write_limit_min, 492 MIN(dp->dp_write_limit, space_inuse / 4)); 493 } 494 495 void 496 dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 497 { 498 if (space > 0) { 499 mutex_enter(&dp->dp_lock); 500 dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space; 501 mutex_exit(&dp->dp_lock); 502 } 503 } 504 505 /* ARGSUSED */ 506 static int 507 upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) 508 { 509 dmu_tx_t *tx = arg; 510 dsl_dataset_t *ds, *prev = NULL; 511 int err; 512 dsl_pool_t *dp = spa_get_dsl(spa); 513 514 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds); 515 if (err) 516 return (err); 517 518 while (ds->ds_phys->ds_prev_snap_obj != 0) { 519 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 520 FTAG, &prev); 521 if (err) { 522 dsl_dataset_rele(ds, FTAG); 523 return (err); 524 } 525 526 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object) 527 break; 528 dsl_dataset_rele(ds, FTAG); 529 ds = prev; 530 prev = NULL; 531 } 532 533 if (prev == NULL) { 534 prev = dp->dp_origin_snap; 535 536 /* 537 * The $ORIGIN can't have any data, or the accounting 538 * will be wrong. 539 */ 540 ASSERT(prev->ds_phys->ds_bp.blk_birth == 0); 541 542 /* The origin doesn't get attached to itself */ 543 if (ds->ds_object == prev->ds_object) { 544 dsl_dataset_rele(ds, FTAG); 545 return (0); 546 } 547 548 dmu_buf_will_dirty(ds->ds_dbuf, tx); 549 ds->ds_phys->ds_prev_snap_obj = prev->ds_object; 550 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg; 551 552 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 553 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object; 554 555 dmu_buf_will_dirty(prev->ds_dbuf, tx); 556 prev->ds_phys->ds_num_children++; 557 558 if (ds->ds_phys->ds_next_snap_obj == 0) { 559 ASSERT(ds->ds_prev == NULL); 560 VERIFY(0 == dsl_dataset_hold_obj(dp, 561 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev)); 562 } 563 } 564 565 ASSERT(ds->ds_dir->dd_phys->dd_origin_obj == prev->ds_object); 566 ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object); 567 568 if (prev->ds_phys->ds_next_clones_obj == 0) { 569 prev->ds_phys->ds_next_clones_obj = 570 zap_create(dp->dp_meta_objset, 571 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 572 } 573 VERIFY(0 == zap_add_int(dp->dp_meta_objset, 574 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx)); 575 576 dsl_dataset_rele(ds, FTAG); 577 if (prev != dp->dp_origin_snap) 578 dsl_dataset_rele(prev, FTAG); 579 return (0); 580 } 581 582 void 583 dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 584 { 585 ASSERT(dmu_tx_is_syncing(tx)); 586 ASSERT(dp->dp_origin_snap != NULL); 587 588 (void) dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb, 589 tx, DS_FIND_CHILDREN); 590 } 591 592 void 593 dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 594 { 595 uint64_t dsobj; 596 dsl_dataset_t *ds; 597 598 ASSERT(dmu_tx_is_syncing(tx)); 599 ASSERT(dp->dp_origin_snap == NULL); 600 601 /* create the origin dir, ds, & snap-ds */ 602 rw_enter(&dp->dp_config_rwlock, RW_WRITER); 603 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 604 NULL, 0, kcred, tx); 605 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 606 dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, kcred, tx); 607 VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj, 608 dp, &dp->dp_origin_snap)); 609 dsl_dataset_rele(ds, FTAG); 610 rw_exit(&dp->dp_config_rwlock); 611 } 612