1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 24 * Copyright (c) 2013 Steven Hartland. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright (c) 2014 Integros [integros.com] 27 */ 28 29 #include <sys/dsl_pool.h> 30 #include <sys/dsl_dataset.h> 31 #include <sys/dsl_prop.h> 32 #include <sys/dsl_dir.h> 33 #include <sys/dsl_synctask.h> 34 #include <sys/dsl_scan.h> 35 #include <sys/dnode.h> 36 #include <sys/dmu_tx.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/arc.h> 39 #include <sys/zap.h> 40 #include <sys/zio.h> 41 #include <sys/zfs_context.h> 42 #include <sys/fs/zfs.h> 43 #include <sys/zfs_znode.h> 44 #include <sys/spa_impl.h> 45 #include <sys/dsl_deadlist.h> 46 #include <sys/bptree.h> 47 #include <sys/zfeature.h> 48 #include <sys/zil_impl.h> 49 #include <sys/dsl_userhold.h> 50 51 /* 52 * ZFS Write Throttle 53 * ------------------ 54 * 55 * ZFS must limit the rate of incoming writes to the rate at which it is able 56 * to sync data modifications to the backend storage. Throttling by too much 57 * creates an artificial limit; throttling by too little can only be sustained 58 * for short periods and would lead to highly lumpy performance. On a per-pool 59 * basis, ZFS tracks the amount of modified (dirty) data. As operations change 60 * data, the amount of dirty data increases; as ZFS syncs out data, the amount 61 * of dirty data decreases. When the amount of dirty data exceeds a 62 * predetermined threshold further modifications are blocked until the amount 63 * of dirty data decreases (as data is synced out). 64 * 65 * The limit on dirty data is tunable, and should be adjusted according to 66 * both the IO capacity and available memory of the system. The larger the 67 * window, the more ZFS is able to aggregate and amortize metadata (and data) 68 * changes. However, memory is a limited resource, and allowing for more dirty 69 * data comes at the cost of keeping other useful data in memory (for example 70 * ZFS data cached by the ARC). 71 * 72 * Implementation 73 * 74 * As buffers are modified dsl_pool_willuse_space() increments both the per- 75 * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of 76 * dirty space used; dsl_pool_dirty_space() decrements those values as data 77 * is synced out from dsl_pool_sync(). While only the poolwide value is 78 * relevant, the per-txg value is useful for debugging. The tunable 79 * zfs_dirty_data_max determines the dirty space limit. Once that value is 80 * exceeded, new writes are halted until space frees up. 81 * 82 * The zfs_dirty_data_sync tunable dictates the threshold at which we 83 * ensure that there is a txg syncing (see the comment in txg.c for a full 84 * description of transaction group stages). 85 * 86 * The IO scheduler uses both the dirty space limit and current amount of 87 * dirty data as inputs. Those values affect the number of concurrent IOs ZFS 88 * issues. See the comment in vdev_queue.c for details of the IO scheduler. 89 * 90 * The delay is also calculated based on the amount of dirty data. See the 91 * comment above dmu_tx_delay() for details. 92 */ 93 94 /* 95 * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, 96 * capped at zfs_dirty_data_max_max. It can also be overridden in /etc/system. 97 */ 98 uint64_t zfs_dirty_data_max; 99 uint64_t zfs_dirty_data_max_max = 4ULL * 1024 * 1024 * 1024; 100 int zfs_dirty_data_max_percent = 10; 101 102 /* 103 * If there is at least this much dirty data, push out a txg. 104 */ 105 uint64_t zfs_dirty_data_sync = 64 * 1024 * 1024; 106 107 /* 108 * Once there is this amount of dirty data, the dmu_tx_delay() will kick in 109 * and delay each transaction. 110 * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. 111 */ 112 int zfs_delay_min_dirty_percent = 60; 113 114 /* 115 * This controls how quickly the delay approaches infinity. 116 * Larger values cause it to delay more for a given amount of dirty data. 117 * Therefore larger values will cause there to be less dirty data for a 118 * given throughput. 119 * 120 * For the smoothest delay, this value should be about 1 billion divided 121 * by the maximum number of operations per second. This will smoothly 122 * handle between 10x and 1/10th this number. 123 * 124 * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the 125 * multiply in dmu_tx_delay(). 126 */ 127 uint64_t zfs_delay_scale = 1000 * 1000 * 1000 / 2000; 128 129 130 hrtime_t zfs_throttle_delay = MSEC2NSEC(10); 131 hrtime_t zfs_throttle_resolution = MSEC2NSEC(10); 132 133 int 134 dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 135 { 136 uint64_t obj; 137 int err; 138 139 err = zap_lookup(dp->dp_meta_objset, 140 dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, 141 name, sizeof (obj), 1, &obj); 142 if (err) 143 return (err); 144 145 return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); 146 } 147 148 static dsl_pool_t * 149 dsl_pool_open_impl(spa_t *spa, uint64_t txg) 150 { 151 dsl_pool_t *dp; 152 blkptr_t *bp = spa_get_rootblkptr(spa); 153 154 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 155 dp->dp_spa = spa; 156 dp->dp_meta_rootbp = *bp; 157 rrw_init(&dp->dp_config_rwlock, B_TRUE); 158 txg_init(dp, txg); 159 160 txg_list_create(&dp->dp_dirty_datasets, 161 offsetof(dsl_dataset_t, ds_dirty_link)); 162 txg_list_create(&dp->dp_dirty_zilogs, 163 offsetof(zilog_t, zl_dirty_link)); 164 txg_list_create(&dp->dp_dirty_dirs, 165 offsetof(dsl_dir_t, dd_dirty_link)); 166 txg_list_create(&dp->dp_sync_tasks, 167 offsetof(dsl_sync_task_t, dst_node)); 168 169 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 170 cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); 171 172 dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, 173 1, 4, 0); 174 175 return (dp); 176 } 177 178 int 179 dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 180 { 181 int err; 182 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 183 184 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, 185 &dp->dp_meta_objset); 186 if (err != 0) 187 dsl_pool_close(dp); 188 else 189 *dpp = dp; 190 191 return (err); 192 } 193 194 int 195 dsl_pool_open(dsl_pool_t *dp) 196 { 197 int err; 198 dsl_dir_t *dd; 199 dsl_dataset_t *ds; 200 uint64_t obj; 201 202 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 203 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 204 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 205 &dp->dp_root_dir_obj); 206 if (err) 207 goto out; 208 209 err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 210 NULL, dp, &dp->dp_root_dir); 211 if (err) 212 goto out; 213 214 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 215 if (err) 216 goto out; 217 218 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { 219 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 220 if (err) 221 goto out; 222 err = dsl_dataset_hold_obj(dp, 223 dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); 224 if (err == 0) { 225 err = dsl_dataset_hold_obj(dp, 226 dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, 227 &dp->dp_origin_snap); 228 dsl_dataset_rele(ds, FTAG); 229 } 230 dsl_dir_rele(dd, dp); 231 if (err) 232 goto out; 233 } 234 235 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 236 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, 237 &dp->dp_free_dir); 238 if (err) 239 goto out; 240 241 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 242 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); 243 if (err) 244 goto out; 245 VERIFY0(bpobj_open(&dp->dp_free_bpobj, 246 dp->dp_meta_objset, obj)); 247 } 248 249 /* 250 * Note: errors ignored, because the leak dir will not exist if we 251 * have not encountered a leak yet. 252 */ 253 (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, 254 &dp->dp_leak_dir); 255 256 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { 257 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 258 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, 259 &dp->dp_bptree_obj); 260 if (err != 0) 261 goto out; 262 } 263 264 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { 265 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 266 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, 267 &dp->dp_empty_bpobj); 268 if (err != 0) 269 goto out; 270 } 271 272 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 273 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, 274 &dp->dp_tmp_userrefs_obj); 275 if (err == ENOENT) 276 err = 0; 277 if (err) 278 goto out; 279 280 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); 281 282 out: 283 rrw_exit(&dp->dp_config_rwlock, FTAG); 284 return (err); 285 } 286 287 void 288 dsl_pool_close(dsl_pool_t *dp) 289 { 290 /* 291 * Drop our references from dsl_pool_open(). 292 * 293 * Since we held the origin_snap from "syncing" context (which 294 * includes pool-opening context), it actually only got a "ref" 295 * and not a hold, so just drop that here. 296 */ 297 if (dp->dp_origin_snap) 298 dsl_dataset_rele(dp->dp_origin_snap, dp); 299 if (dp->dp_mos_dir) 300 dsl_dir_rele(dp->dp_mos_dir, dp); 301 if (dp->dp_free_dir) 302 dsl_dir_rele(dp->dp_free_dir, dp); 303 if (dp->dp_leak_dir) 304 dsl_dir_rele(dp->dp_leak_dir, dp); 305 if (dp->dp_root_dir) 306 dsl_dir_rele(dp->dp_root_dir, dp); 307 308 bpobj_close(&dp->dp_free_bpobj); 309 310 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 311 if (dp->dp_meta_objset) 312 dmu_objset_evict(dp->dp_meta_objset); 313 314 txg_list_destroy(&dp->dp_dirty_datasets); 315 txg_list_destroy(&dp->dp_dirty_zilogs); 316 txg_list_destroy(&dp->dp_sync_tasks); 317 txg_list_destroy(&dp->dp_dirty_dirs); 318 319 /* 320 * We can't set retry to TRUE since we're explicitly specifying 321 * a spa to flush. This is good enough; any missed buffers for 322 * this spa won't cause trouble, and they'll eventually fall 323 * out of the ARC just like any other unused buffer. 324 */ 325 arc_flush(dp->dp_spa, FALSE); 326 327 txg_fini(dp); 328 dsl_scan_fini(dp); 329 dmu_buf_user_evict_wait(); 330 331 rrw_destroy(&dp->dp_config_rwlock); 332 mutex_destroy(&dp->dp_lock); 333 taskq_destroy(dp->dp_vnrele_taskq); 334 if (dp->dp_blkstats) 335 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 336 kmem_free(dp, sizeof (dsl_pool_t)); 337 } 338 339 dsl_pool_t * 340 dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg) 341 { 342 int err; 343 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 344 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 345 objset_t *os; 346 dsl_dataset_t *ds; 347 uint64_t obj; 348 349 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 350 351 /* create and open the MOS (meta-objset) */ 352 dp->dp_meta_objset = dmu_objset_create_impl(spa, 353 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); 354 355 /* create the pool directory */ 356 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 357 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 358 ASSERT0(err); 359 360 /* Initialize scan structures */ 361 VERIFY0(dsl_scan_init(dp, txg)); 362 363 /* create and open the root dir */ 364 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 365 VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 366 NULL, dp, &dp->dp_root_dir)); 367 368 /* create and open the meta-objset dir */ 369 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 370 VERIFY0(dsl_pool_open_special_dir(dp, 371 MOS_DIR_NAME, &dp->dp_mos_dir)); 372 373 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 374 /* create and open the free dir */ 375 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 376 FREE_DIR_NAME, tx); 377 VERIFY0(dsl_pool_open_special_dir(dp, 378 FREE_DIR_NAME, &dp->dp_free_dir)); 379 380 /* create and open the free_bplist */ 381 obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); 382 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 383 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); 384 VERIFY0(bpobj_open(&dp->dp_free_bpobj, 385 dp->dp_meta_objset, obj)); 386 } 387 388 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 389 dsl_pool_create_origin(dp, tx); 390 391 /* create the root dataset */ 392 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx); 393 394 /* create the root objset */ 395 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); 396 os = dmu_objset_create_impl(dp->dp_spa, ds, 397 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 398 #ifdef _KERNEL 399 zfs_create_fs(os, kcred, zplprops, tx); 400 #endif 401 dsl_dataset_rele(ds, FTAG); 402 403 dmu_tx_commit(tx); 404 405 rrw_exit(&dp->dp_config_rwlock, FTAG); 406 407 return (dp); 408 } 409 410 /* 411 * Account for the meta-objset space in its placeholder dsl_dir. 412 */ 413 void 414 dsl_pool_mos_diduse_space(dsl_pool_t *dp, 415 int64_t used, int64_t comp, int64_t uncomp) 416 { 417 ASSERT3U(comp, ==, uncomp); /* it's all metadata */ 418 mutex_enter(&dp->dp_lock); 419 dp->dp_mos_used_delta += used; 420 dp->dp_mos_compressed_delta += comp; 421 dp->dp_mos_uncompressed_delta += uncomp; 422 mutex_exit(&dp->dp_lock); 423 } 424 425 static int 426 deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 427 { 428 dsl_deadlist_t *dl = arg; 429 dsl_deadlist_insert(dl, bp, tx); 430 return (0); 431 } 432 433 static void 434 dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) 435 { 436 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 437 dmu_objset_sync(dp->dp_meta_objset, zio, tx); 438 VERIFY0(zio_wait(zio)); 439 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 440 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 441 } 442 443 static void 444 dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) 445 { 446 ASSERT(MUTEX_HELD(&dp->dp_lock)); 447 448 if (delta < 0) 449 ASSERT3U(-delta, <=, dp->dp_dirty_total); 450 451 dp->dp_dirty_total += delta; 452 453 /* 454 * Note: we signal even when increasing dp_dirty_total. 455 * This ensures forward progress -- each thread wakes the next waiter. 456 */ 457 if (dp->dp_dirty_total <= zfs_dirty_data_max) 458 cv_signal(&dp->dp_spaceavail_cv); 459 } 460 461 void 462 dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 463 { 464 zio_t *zio; 465 dmu_tx_t *tx; 466 dsl_dir_t *dd; 467 dsl_dataset_t *ds; 468 objset_t *mos = dp->dp_meta_objset; 469 list_t synced_datasets; 470 471 list_create(&synced_datasets, sizeof (dsl_dataset_t), 472 offsetof(dsl_dataset_t, ds_synced_link)); 473 474 tx = dmu_tx_create_assigned(dp, txg); 475 476 /* 477 * Write out all dirty blocks of dirty datasets. 478 */ 479 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 480 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 481 /* 482 * We must not sync any non-MOS datasets twice, because 483 * we may have taken a snapshot of them. However, we 484 * may sync newly-created datasets on pass 2. 485 */ 486 ASSERT(!list_link_active(&ds->ds_synced_link)); 487 list_insert_tail(&synced_datasets, ds); 488 dsl_dataset_sync(ds, zio, tx); 489 } 490 VERIFY0(zio_wait(zio)); 491 492 /* 493 * We have written all of the accounted dirty data, so our 494 * dp_space_towrite should now be zero. However, some seldom-used 495 * code paths do not adhere to this (e.g. dbuf_undirty(), also 496 * rounding error in dbuf_write_physdone). 497 * Shore up the accounting of any dirtied space now. 498 */ 499 dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); 500 501 /* 502 * After the data blocks have been written (ensured by the zio_wait() 503 * above), update the user/group space accounting. 504 */ 505 for (ds = list_head(&synced_datasets); ds != NULL; 506 ds = list_next(&synced_datasets, ds)) { 507 dmu_objset_do_userquota_updates(ds->ds_objset, tx); 508 } 509 510 /* 511 * Sync the datasets again to push out the changes due to 512 * userspace updates. This must be done before we process the 513 * sync tasks, so that any snapshots will have the correct 514 * user accounting information (and we won't get confused 515 * about which blocks are part of the snapshot). 516 */ 517 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 518 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 519 ASSERT(list_link_active(&ds->ds_synced_link)); 520 dmu_buf_rele(ds->ds_dbuf, ds); 521 dsl_dataset_sync(ds, zio, tx); 522 } 523 VERIFY0(zio_wait(zio)); 524 525 /* 526 * Now that the datasets have been completely synced, we can 527 * clean up our in-memory structures accumulated while syncing: 528 * 529 * - move dead blocks from the pending deadlist to the on-disk deadlist 530 * - release hold from dsl_dataset_dirty() 531 */ 532 while ((ds = list_remove_head(&synced_datasets)) != NULL) { 533 objset_t *os = ds->ds_objset; 534 bplist_iterate(&ds->ds_pending_deadlist, 535 deadlist_enqueue_cb, &ds->ds_deadlist, tx); 536 ASSERT(!dmu_objset_is_dirty(os, txg)); 537 dmu_buf_rele(ds->ds_dbuf, ds); 538 } 539 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { 540 dsl_dir_sync(dd, tx); 541 } 542 543 /* 544 * The MOS's space is accounted for in the pool/$MOS 545 * (dp_mos_dir). We can't modify the mos while we're syncing 546 * it, so we remember the deltas and apply them here. 547 */ 548 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || 549 dp->dp_mos_uncompressed_delta != 0) { 550 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, 551 dp->dp_mos_used_delta, 552 dp->dp_mos_compressed_delta, 553 dp->dp_mos_uncompressed_delta, tx); 554 dp->dp_mos_used_delta = 0; 555 dp->dp_mos_compressed_delta = 0; 556 dp->dp_mos_uncompressed_delta = 0; 557 } 558 559 if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL || 560 list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) { 561 dsl_pool_sync_mos(dp, tx); 562 } 563 564 /* 565 * If we modify a dataset in the same txg that we want to destroy it, 566 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. 567 * dsl_dir_destroy_check() will fail if there are unexpected holds. 568 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf 569 * and clearing the hold on it) before we process the sync_tasks. 570 * The MOS data dirtied by the sync_tasks will be synced on the next 571 * pass. 572 */ 573 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { 574 dsl_sync_task_t *dst; 575 /* 576 * No more sync tasks should have been added while we 577 * were syncing. 578 */ 579 ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); 580 while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) 581 dsl_sync_task_sync(dst, tx); 582 } 583 584 dmu_tx_commit(tx); 585 586 DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); 587 } 588 589 void 590 dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) 591 { 592 zilog_t *zilog; 593 594 while (zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg)) { 595 dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 596 zil_clean(zilog, txg); 597 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); 598 dmu_buf_rele(ds->ds_dbuf, zilog); 599 } 600 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); 601 } 602 603 /* 604 * TRUE if the current thread is the tx_sync_thread or if we 605 * are being called from SPA context during pool initialization. 606 */ 607 int 608 dsl_pool_sync_context(dsl_pool_t *dp) 609 { 610 return (curthread == dp->dp_tx.tx_sync_thread || 611 spa_is_initializing(dp->dp_spa)); 612 } 613 614 uint64_t 615 dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) 616 { 617 uint64_t space, resv; 618 619 /* 620 * If we're trying to assess whether it's OK to do a free, 621 * cut the reservation in half to allow forward progress 622 * (e.g. make it possible to rm(1) files from a full pool). 623 */ 624 space = spa_get_dspace(dp->dp_spa); 625 resv = spa_get_slop_space(dp->dp_spa); 626 if (netfree) 627 resv >>= 1; 628 629 return (space - resv); 630 } 631 632 boolean_t 633 dsl_pool_need_dirty_delay(dsl_pool_t *dp) 634 { 635 uint64_t delay_min_bytes = 636 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 637 boolean_t rv; 638 639 mutex_enter(&dp->dp_lock); 640 if (dp->dp_dirty_total > zfs_dirty_data_sync) 641 txg_kick(dp); 642 rv = (dp->dp_dirty_total > delay_min_bytes); 643 mutex_exit(&dp->dp_lock); 644 return (rv); 645 } 646 647 void 648 dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 649 { 650 if (space > 0) { 651 mutex_enter(&dp->dp_lock); 652 dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; 653 dsl_pool_dirty_delta(dp, space); 654 mutex_exit(&dp->dp_lock); 655 } 656 } 657 658 void 659 dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) 660 { 661 ASSERT3S(space, >=, 0); 662 if (space == 0) 663 return; 664 mutex_enter(&dp->dp_lock); 665 if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { 666 /* XXX writing something we didn't dirty? */ 667 space = dp->dp_dirty_pertxg[txg & TXG_MASK]; 668 } 669 ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); 670 dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; 671 ASSERT3U(dp->dp_dirty_total, >=, space); 672 dsl_pool_dirty_delta(dp, -space); 673 mutex_exit(&dp->dp_lock); 674 } 675 676 /* ARGSUSED */ 677 static int 678 upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 679 { 680 dmu_tx_t *tx = arg; 681 dsl_dataset_t *ds, *prev = NULL; 682 int err; 683 684 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 685 if (err) 686 return (err); 687 688 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 689 err = dsl_dataset_hold_obj(dp, 690 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 691 if (err) { 692 dsl_dataset_rele(ds, FTAG); 693 return (err); 694 } 695 696 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) 697 break; 698 dsl_dataset_rele(ds, FTAG); 699 ds = prev; 700 prev = NULL; 701 } 702 703 if (prev == NULL) { 704 prev = dp->dp_origin_snap; 705 706 /* 707 * The $ORIGIN can't have any data, or the accounting 708 * will be wrong. 709 */ 710 ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); 711 712 /* The origin doesn't get attached to itself */ 713 if (ds->ds_object == prev->ds_object) { 714 dsl_dataset_rele(ds, FTAG); 715 return (0); 716 } 717 718 dmu_buf_will_dirty(ds->ds_dbuf, tx); 719 dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; 720 dsl_dataset_phys(ds)->ds_prev_snap_txg = 721 dsl_dataset_phys(prev)->ds_creation_txg; 722 723 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 724 dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; 725 726 dmu_buf_will_dirty(prev->ds_dbuf, tx); 727 dsl_dataset_phys(prev)->ds_num_children++; 728 729 if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { 730 ASSERT(ds->ds_prev == NULL); 731 VERIFY0(dsl_dataset_hold_obj(dp, 732 dsl_dataset_phys(ds)->ds_prev_snap_obj, 733 ds, &ds->ds_prev)); 734 } 735 } 736 737 ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); 738 ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); 739 740 if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { 741 dmu_buf_will_dirty(prev->ds_dbuf, tx); 742 dsl_dataset_phys(prev)->ds_next_clones_obj = 743 zap_create(dp->dp_meta_objset, 744 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 745 } 746 VERIFY0(zap_add_int(dp->dp_meta_objset, 747 dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); 748 749 dsl_dataset_rele(ds, FTAG); 750 if (prev != dp->dp_origin_snap) 751 dsl_dataset_rele(prev, FTAG); 752 return (0); 753 } 754 755 void 756 dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 757 { 758 ASSERT(dmu_tx_is_syncing(tx)); 759 ASSERT(dp->dp_origin_snap != NULL); 760 761 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, 762 tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 763 } 764 765 /* ARGSUSED */ 766 static int 767 upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 768 { 769 dmu_tx_t *tx = arg; 770 objset_t *mos = dp->dp_meta_objset; 771 772 if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { 773 dsl_dataset_t *origin; 774 775 VERIFY0(dsl_dataset_hold_obj(dp, 776 dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); 777 778 if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { 779 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); 780 dsl_dir_phys(origin->ds_dir)->dd_clones = 781 zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, 782 0, tx); 783 } 784 785 VERIFY0(zap_add_int(dp->dp_meta_objset, 786 dsl_dir_phys(origin->ds_dir)->dd_clones, 787 ds->ds_object, tx)); 788 789 dsl_dataset_rele(origin, FTAG); 790 } 791 return (0); 792 } 793 794 void 795 dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) 796 { 797 ASSERT(dmu_tx_is_syncing(tx)); 798 uint64_t obj; 799 800 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); 801 VERIFY0(dsl_pool_open_special_dir(dp, 802 FREE_DIR_NAME, &dp->dp_free_dir)); 803 804 /* 805 * We can't use bpobj_alloc(), because spa_version() still 806 * returns the old version, and we need a new-version bpobj with 807 * subobj support. So call dmu_object_alloc() directly. 808 */ 809 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, 810 SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); 811 VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 812 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); 813 VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); 814 815 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 816 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 817 } 818 819 void 820 dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 821 { 822 uint64_t dsobj; 823 dsl_dataset_t *ds; 824 825 ASSERT(dmu_tx_is_syncing(tx)); 826 ASSERT(dp->dp_origin_snap == NULL); 827 ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); 828 829 /* create the origin dir, ds, & snap-ds */ 830 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 831 NULL, 0, kcred, tx); 832 VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 833 dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); 834 VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, 835 dp, &dp->dp_origin_snap)); 836 dsl_dataset_rele(ds, FTAG); 837 } 838 839 taskq_t * 840 dsl_pool_vnrele_taskq(dsl_pool_t *dp) 841 { 842 return (dp->dp_vnrele_taskq); 843 } 844 845 /* 846 * Walk through the pool-wide zap object of temporary snapshot user holds 847 * and release them. 848 */ 849 void 850 dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) 851 { 852 zap_attribute_t za; 853 zap_cursor_t zc; 854 objset_t *mos = dp->dp_meta_objset; 855 uint64_t zapobj = dp->dp_tmp_userrefs_obj; 856 nvlist_t *holds; 857 858 if (zapobj == 0) 859 return; 860 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 861 862 holds = fnvlist_alloc(); 863 864 for (zap_cursor_init(&zc, mos, zapobj); 865 zap_cursor_retrieve(&zc, &za) == 0; 866 zap_cursor_advance(&zc)) { 867 char *htag; 868 nvlist_t *tags; 869 870 htag = strchr(za.za_name, '-'); 871 *htag = '\0'; 872 ++htag; 873 if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { 874 tags = fnvlist_alloc(); 875 fnvlist_add_boolean(tags, htag); 876 fnvlist_add_nvlist(holds, za.za_name, tags); 877 fnvlist_free(tags); 878 } else { 879 fnvlist_add_boolean(tags, htag); 880 } 881 } 882 dsl_dataset_user_release_tmp(dp, holds); 883 fnvlist_free(holds); 884 zap_cursor_fini(&zc); 885 } 886 887 /* 888 * Create the pool-wide zap object for storing temporary snapshot holds. 889 */ 890 void 891 dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) 892 { 893 objset_t *mos = dp->dp_meta_objset; 894 895 ASSERT(dp->dp_tmp_userrefs_obj == 0); 896 ASSERT(dmu_tx_is_syncing(tx)); 897 898 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, 899 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); 900 } 901 902 static int 903 dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, 904 const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) 905 { 906 objset_t *mos = dp->dp_meta_objset; 907 uint64_t zapobj = dp->dp_tmp_userrefs_obj; 908 char *name; 909 int error; 910 911 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 912 ASSERT(dmu_tx_is_syncing(tx)); 913 914 /* 915 * If the pool was created prior to SPA_VERSION_USERREFS, the 916 * zap object for temporary holds might not exist yet. 917 */ 918 if (zapobj == 0) { 919 if (holding) { 920 dsl_pool_user_hold_create_obj(dp, tx); 921 zapobj = dp->dp_tmp_userrefs_obj; 922 } else { 923 return (SET_ERROR(ENOENT)); 924 } 925 } 926 927 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); 928 if (holding) 929 error = zap_add(mos, zapobj, name, 8, 1, &now, tx); 930 else 931 error = zap_remove(mos, zapobj, name, tx); 932 strfree(name); 933 934 return (error); 935 } 936 937 /* 938 * Add a temporary hold for the given dataset object and tag. 939 */ 940 int 941 dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 942 uint64_t now, dmu_tx_t *tx) 943 { 944 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); 945 } 946 947 /* 948 * Release a temporary hold for the given dataset object and tag. 949 */ 950 int 951 dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 952 dmu_tx_t *tx) 953 { 954 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL, 955 tx, B_FALSE)); 956 } 957 958 /* 959 * DSL Pool Configuration Lock 960 * 961 * The dp_config_rwlock protects against changes to DSL state (e.g. dataset 962 * creation / destruction / rename / property setting). It must be held for 963 * read to hold a dataset or dsl_dir. I.e. you must call 964 * dsl_pool_config_enter() or dsl_pool_hold() before calling 965 * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock 966 * must be held continuously until all datasets and dsl_dirs are released. 967 * 968 * The only exception to this rule is that if a "long hold" is placed on 969 * a dataset, then the dp_config_rwlock may be dropped while the dataset 970 * is still held. The long hold will prevent the dataset from being 971 * destroyed -- the destroy will fail with EBUSY. A long hold can be 972 * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset 973 * (by calling dsl_{dataset,objset}_{try}own{_obj}). 974 * 975 * Legitimate long-holders (including owners) should be long-running, cancelable 976 * tasks that should cause "zfs destroy" to fail. This includes DMU 977 * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), 978 * "zfs send", and "zfs diff". There are several other long-holders whose 979 * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). 980 * 981 * The usual formula for long-holding would be: 982 * dsl_pool_hold() 983 * dsl_dataset_hold() 984 * ... perform checks ... 985 * dsl_dataset_long_hold() 986 * dsl_pool_rele() 987 * ... perform long-running task ... 988 * dsl_dataset_long_rele() 989 * dsl_dataset_rele() 990 * 991 * Note that when the long hold is released, the dataset is still held but 992 * the pool is not held. The dataset may change arbitrarily during this time 993 * (e.g. it could be destroyed). Therefore you shouldn't do anything to the 994 * dataset except release it. 995 * 996 * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only 997 * or modifying operations. 998 * 999 * Modifying operations should generally use dsl_sync_task(). The synctask 1000 * infrastructure enforces proper locking strategy with respect to the 1001 * dp_config_rwlock. See the comment above dsl_sync_task() for details. 1002 * 1003 * Read-only operations will manually hold the pool, then the dataset, obtain 1004 * information from the dataset, then release the pool and dataset. 1005 * dmu_objset_{hold,rele}() are convenience routines that also do the pool 1006 * hold/rele. 1007 */ 1008 1009 int 1010 dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) 1011 { 1012 spa_t *spa; 1013 int error; 1014 1015 error = spa_open(name, &spa, tag); 1016 if (error == 0) { 1017 *dp = spa_get_dsl(spa); 1018 dsl_pool_config_enter(*dp, tag); 1019 } 1020 return (error); 1021 } 1022 1023 void 1024 dsl_pool_rele(dsl_pool_t *dp, void *tag) 1025 { 1026 dsl_pool_config_exit(dp, tag); 1027 spa_close(dp->dp_spa, tag); 1028 } 1029 1030 void 1031 dsl_pool_config_enter(dsl_pool_t *dp, void *tag) 1032 { 1033 /* 1034 * We use a "reentrant" reader-writer lock, but not reentrantly. 1035 * 1036 * The rrwlock can (with the track_all flag) track all reading threads, 1037 * which is very useful for debugging which code path failed to release 1038 * the lock, and for verifying that the *current* thread does hold 1039 * the lock. 1040 * 1041 * (Unlike a rwlock, which knows that N threads hold it for 1042 * read, but not *which* threads, so rw_held(RW_READER) returns TRUE 1043 * if any thread holds it for read, even if this thread doesn't). 1044 */ 1045 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 1046 rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); 1047 } 1048 1049 void 1050 dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) 1051 { 1052 ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 1053 rrw_enter_read_prio(&dp->dp_config_rwlock, tag); 1054 } 1055 1056 void 1057 dsl_pool_config_exit(dsl_pool_t *dp, void *tag) 1058 { 1059 rrw_exit(&dp->dp_config_rwlock, tag); 1060 } 1061 1062 boolean_t 1063 dsl_pool_config_held(dsl_pool_t *dp) 1064 { 1065 return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); 1066 } 1067 1068 boolean_t 1069 dsl_pool_config_held_writer(dsl_pool_t *dp) 1070 { 1071 return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); 1072 } 1073