1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2016 Gary Mills 24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 25 * Copyright 2017 Joyent, Inc. 26 * Copyright (c) 2017 Datto Inc. 27 */ 28 29 #include <sys/dsl_scan.h> 30 #include <sys/dsl_pool.h> 31 #include <sys/dsl_dataset.h> 32 #include <sys/dsl_prop.h> 33 #include <sys/dsl_dir.h> 34 #include <sys/dsl_synctask.h> 35 #include <sys/dnode.h> 36 #include <sys/dmu_tx.h> 37 #include <sys/dmu_objset.h> 38 #include <sys/arc.h> 39 #include <sys/zap.h> 40 #include <sys/zio.h> 41 #include <sys/zfs_context.h> 42 #include <sys/fs/zfs.h> 43 #include <sys/zfs_znode.h> 44 #include <sys/spa_impl.h> 45 #include <sys/vdev_impl.h> 46 #include <sys/zil_impl.h> 47 #include <sys/zio_checksum.h> 48 #include <sys/ddt.h> 49 #include <sys/sa.h> 50 #include <sys/sa_impl.h> 51 #include <sys/zfeature.h> 52 #include <sys/abd.h> 53 #ifdef _KERNEL 54 #include <sys/zfs_vfsops.h> 55 #endif 56 57 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 58 const zbookmark_phys_t *); 59 60 static scan_cb_t dsl_scan_scrub_cb; 61 static void dsl_scan_cancel_sync(void *, dmu_tx_t *); 62 static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *); 63 static boolean_t dsl_scan_restarting(dsl_scan_t *, dmu_tx_t *); 64 65 int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ 66 int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ 67 int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ 68 int zfs_scan_idle = 50; /* idle window in clock ticks */ 69 70 int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ 71 int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 72 int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */ 73 int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */ 74 boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 75 boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 76 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 77 int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */ 78 /* max number of blocks to free in a single TXG */ 79 uint64_t zfs_async_block_max_blocks = UINT64_MAX; 80 81 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 82 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 83 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 84 85 extern int zfs_txg_timeout; 86 87 /* 88 * Enable/disable the processing of the free_bpobj object. 89 */ 90 boolean_t zfs_free_bpobj_enabled = B_TRUE; 91 92 /* the order has to match pool_scan_type */ 93 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 94 NULL, 95 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 96 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 97 }; 98 99 int 100 dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 101 { 102 int err; 103 dsl_scan_t *scn; 104 spa_t *spa = dp->dp_spa; 105 uint64_t f; 106 107 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 108 scn->scn_dp = dp; 109 110 /* 111 * It's possible that we're resuming a scan after a reboot so 112 * make sure that the scan_async_destroying flag is initialized 113 * appropriately. 114 */ 115 ASSERT(!scn->scn_async_destroying); 116 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 117 SPA_FEATURE_ASYNC_DESTROY); 118 119 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 120 "scrub_func", sizeof (uint64_t), 1, &f); 121 if (err == 0) { 122 /* 123 * There was an old-style scrub in progress. Restart a 124 * new-style scrub from the beginning. 125 */ 126 scn->scn_restart_txg = txg; 127 zfs_dbgmsg("old-style scrub was in progress; " 128 "restarting new-style scrub in txg %llu", 129 scn->scn_restart_txg); 130 131 /* 132 * Load the queue obj from the old location so that it 133 * can be freed by dsl_scan_done(). 134 */ 135 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 136 "scrub_queue", sizeof (uint64_t), 1, 137 &scn->scn_phys.scn_queue_obj); 138 } else { 139 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 140 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 141 &scn->scn_phys); 142 if (err == ENOENT) 143 return (0); 144 else if (err) 145 return (err); 146 147 if (scn->scn_phys.scn_state == DSS_SCANNING && 148 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 149 /* 150 * A new-type scrub was in progress on an old 151 * pool, and the pool was accessed by old 152 * software. Restart from the beginning, since 153 * the old software may have changed the pool in 154 * the meantime. 155 */ 156 scn->scn_restart_txg = txg; 157 zfs_dbgmsg("new-style scrub was modified " 158 "by old software; restarting in txg %llu", 159 scn->scn_restart_txg); 160 } 161 } 162 163 spa_scan_stat_init(spa); 164 return (0); 165 } 166 167 void 168 dsl_scan_fini(dsl_pool_t *dp) 169 { 170 if (dp->dp_scan) { 171 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 172 dp->dp_scan = NULL; 173 } 174 } 175 176 /* ARGSUSED */ 177 static int 178 dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 179 { 180 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 181 182 if (scn->scn_phys.scn_state == DSS_SCANNING) 183 return (SET_ERROR(EBUSY)); 184 185 return (0); 186 } 187 188 static void 189 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 190 { 191 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 192 pool_scan_func_t *funcp = arg; 193 dmu_object_type_t ot = 0; 194 dsl_pool_t *dp = scn->scn_dp; 195 spa_t *spa = dp->dp_spa; 196 197 ASSERT(scn->scn_phys.scn_state != DSS_SCANNING); 198 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 199 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 200 scn->scn_phys.scn_func = *funcp; 201 scn->scn_phys.scn_state = DSS_SCANNING; 202 scn->scn_phys.scn_min_txg = 0; 203 scn->scn_phys.scn_max_txg = tx->tx_txg; 204 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 205 scn->scn_phys.scn_start_time = gethrestime_sec(); 206 scn->scn_phys.scn_errors = 0; 207 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 208 scn->scn_restart_txg = 0; 209 scn->scn_done_txg = 0; 210 spa_scan_stat_init(spa); 211 212 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 213 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 214 215 /* rewrite all disk labels */ 216 vdev_config_dirty(spa->spa_root_vdev); 217 218 if (vdev_resilver_needed(spa->spa_root_vdev, 219 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 220 spa_event_notify(spa, NULL, NULL, 221 ESC_ZFS_RESILVER_START); 222 } else { 223 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); 224 } 225 226 spa->spa_scrub_started = B_TRUE; 227 /* 228 * If this is an incremental scrub, limit the DDT scrub phase 229 * to just the auto-ditto class (for correctness); the rest 230 * of the scrub should go faster using top-down pruning. 231 */ 232 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 233 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 234 235 } 236 237 /* back to the generic stuff */ 238 239 if (dp->dp_blkstats == NULL) { 240 dp->dp_blkstats = 241 kmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 242 } 243 bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 244 245 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 246 ot = DMU_OT_ZAP_OTHER; 247 248 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 249 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 250 251 dsl_scan_sync_state(scn, tx); 252 253 spa_history_log_internal(spa, "scan setup", tx, 254 "func=%u mintxg=%llu maxtxg=%llu", 255 *funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg); 256 } 257 258 /* ARGSUSED */ 259 static void 260 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 261 { 262 static const char *old_names[] = { 263 "scrub_bookmark", 264 "scrub_ddt_bookmark", 265 "scrub_ddt_class_max", 266 "scrub_queue", 267 "scrub_min_txg", 268 "scrub_max_txg", 269 "scrub_func", 270 "scrub_errors", 271 NULL 272 }; 273 274 dsl_pool_t *dp = scn->scn_dp; 275 spa_t *spa = dp->dp_spa; 276 int i; 277 278 /* Remove any remnants of an old-style scrub. */ 279 for (i = 0; old_names[i]; i++) { 280 (void) zap_remove(dp->dp_meta_objset, 281 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 282 } 283 284 if (scn->scn_phys.scn_queue_obj != 0) { 285 VERIFY(0 == dmu_object_free(dp->dp_meta_objset, 286 scn->scn_phys.scn_queue_obj, tx)); 287 scn->scn_phys.scn_queue_obj = 0; 288 } 289 290 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 291 292 /* 293 * If we were "restarted" from a stopped state, don't bother 294 * with anything else. 295 */ 296 if (scn->scn_phys.scn_state != DSS_SCANNING) 297 return; 298 299 if (complete) 300 scn->scn_phys.scn_state = DSS_FINISHED; 301 else 302 scn->scn_phys.scn_state = DSS_CANCELED; 303 304 if (dsl_scan_restarting(scn, tx)) 305 spa_history_log_internal(spa, "scan aborted, restarting", tx, 306 "errors=%llu", spa_get_errlog_size(spa)); 307 else if (!complete) 308 spa_history_log_internal(spa, "scan cancelled", tx, 309 "errors=%llu", spa_get_errlog_size(spa)); 310 else 311 spa_history_log_internal(spa, "scan done", tx, 312 "errors=%llu", spa_get_errlog_size(spa)); 313 314 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 315 mutex_enter(&spa->spa_scrub_lock); 316 while (spa->spa_scrub_inflight > 0) { 317 cv_wait(&spa->spa_scrub_io_cv, 318 &spa->spa_scrub_lock); 319 } 320 mutex_exit(&spa->spa_scrub_lock); 321 spa->spa_scrub_started = B_FALSE; 322 spa->spa_scrub_active = B_FALSE; 323 324 /* 325 * If the scrub/resilver completed, update all DTLs to 326 * reflect this. Whether it succeeded or not, vacate 327 * all temporary scrub DTLs. 328 */ 329 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 330 complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE); 331 if (complete) { 332 spa_event_notify(spa, NULL, NULL, 333 scn->scn_phys.scn_min_txg ? 334 ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH); 335 } 336 spa_errlog_rotate(spa); 337 338 /* 339 * We may have finished replacing a device. 340 * Let the async thread assess this and handle the detach. 341 */ 342 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 343 } 344 345 scn->scn_phys.scn_end_time = gethrestime_sec(); 346 } 347 348 /* ARGSUSED */ 349 static int 350 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 351 { 352 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 353 354 if (scn->scn_phys.scn_state != DSS_SCANNING) 355 return (SET_ERROR(ENOENT)); 356 return (0); 357 } 358 359 /* ARGSUSED */ 360 static void 361 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 362 { 363 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 364 365 dsl_scan_done(scn, B_FALSE, tx); 366 dsl_scan_sync_state(scn, tx); 367 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); 368 } 369 370 int 371 dsl_scan_cancel(dsl_pool_t *dp) 372 { 373 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 374 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 375 } 376 377 boolean_t 378 dsl_scan_is_paused_scrub(const dsl_scan_t *scn) 379 { 380 if (dsl_scan_scrubbing(scn->scn_dp) && 381 scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED) 382 return (B_TRUE); 383 384 return (B_FALSE); 385 } 386 387 static int 388 dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) 389 { 390 pool_scrub_cmd_t *cmd = arg; 391 dsl_pool_t *dp = dmu_tx_pool(tx); 392 dsl_scan_t *scn = dp->dp_scan; 393 394 if (*cmd == POOL_SCRUB_PAUSE) { 395 /* can't pause a scrub when there is no in-progress scrub */ 396 if (!dsl_scan_scrubbing(dp)) 397 return (SET_ERROR(ENOENT)); 398 399 /* can't pause a paused scrub */ 400 if (dsl_scan_is_paused_scrub(scn)) 401 return (SET_ERROR(EBUSY)); 402 } else if (*cmd != POOL_SCRUB_NORMAL) { 403 return (SET_ERROR(ENOTSUP)); 404 } 405 406 return (0); 407 } 408 409 static void 410 dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) 411 { 412 pool_scrub_cmd_t *cmd = arg; 413 dsl_pool_t *dp = dmu_tx_pool(tx); 414 spa_t *spa = dp->dp_spa; 415 dsl_scan_t *scn = dp->dp_scan; 416 417 if (*cmd == POOL_SCRUB_PAUSE) { 418 /* can't pause a scrub when there is no in-progress scrub */ 419 spa->spa_scan_pass_scrub_pause = gethrestime_sec(); 420 scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; 421 dsl_scan_sync_state(scn, tx); 422 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); 423 } else { 424 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); 425 if (dsl_scan_is_paused_scrub(scn)) { 426 /* 427 * We need to keep track of how much time we spend 428 * paused per pass so that we can adjust the scrub rate 429 * shown in the output of 'zpool status' 430 */ 431 spa->spa_scan_pass_scrub_spent_paused += 432 gethrestime_sec() - spa->spa_scan_pass_scrub_pause; 433 spa->spa_scan_pass_scrub_pause = 0; 434 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 435 dsl_scan_sync_state(scn, tx); 436 } 437 } 438 } 439 440 /* 441 * Set scrub pause/resume state if it makes sense to do so 442 */ 443 int 444 dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) 445 { 446 return (dsl_sync_task(spa_name(dp->dp_spa), 447 dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, 448 ZFS_SPACE_CHECK_RESERVED)); 449 } 450 451 boolean_t 452 dsl_scan_scrubbing(const dsl_pool_t *dp) 453 { 454 dsl_scan_t *scn = dp->dp_scan; 455 456 if (scn->scn_phys.scn_state == DSS_SCANNING && 457 scn->scn_phys.scn_func == POOL_SCAN_SCRUB) 458 return (B_TRUE); 459 460 return (B_FALSE); 461 } 462 463 static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 464 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 465 dmu_objset_type_t ostype, dmu_tx_t *tx); 466 static void dsl_scan_visitdnode(dsl_scan_t *, dsl_dataset_t *ds, 467 dmu_objset_type_t ostype, 468 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 469 470 void 471 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 472 { 473 zio_free(dp->dp_spa, txg, bp); 474 } 475 476 void 477 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 478 { 479 ASSERT(dsl_pool_sync_context(dp)); 480 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); 481 } 482 483 static uint64_t 484 dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 485 { 486 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 487 if (ds->ds_is_snapshot) 488 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 489 return (smt); 490 } 491 492 static void 493 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 494 { 495 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 496 DMU_POOL_DIRECTORY_OBJECT, 497 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 498 &scn->scn_phys, tx)); 499 } 500 501 extern int zfs_vdev_async_write_active_min_dirty_percent; 502 503 static boolean_t 504 dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) 505 { 506 /* we never skip user/group accounting objects */ 507 if (zb && (int64_t)zb->zb_object < 0) 508 return (B_FALSE); 509 510 if (scn->scn_suspending) 511 return (B_TRUE); /* we're already suspending */ 512 513 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 514 return (B_FALSE); /* we're resuming */ 515 516 /* We only know how to resume from level-0 blocks. */ 517 if (zb && zb->zb_level != 0) 518 return (B_FALSE); 519 520 /* 521 * We suspend if: 522 * - we have scanned for the maximum time: an entire txg 523 * timeout (default 5 sec) 524 * or 525 * - we have scanned for at least the minimum time (default 1 sec 526 * for scrub, 3 sec for resilver), and either we have sufficient 527 * dirty data that we are starting to write more quickly 528 * (default 30%), or someone is explicitly waiting for this txg 529 * to complete. 530 * or 531 * - the spa is shutting down because this pool is being exported 532 * or the machine is rebooting. 533 */ 534 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 535 zfs_resilver_min_time_ms : zfs_scan_min_time_ms; 536 uint64_t elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 537 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 538 if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout || 539 (NSEC2MSEC(elapsed_nanosecs) > mintime && 540 (txg_sync_waiting(scn->scn_dp) || 541 dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent)) || 542 spa_shutting_down(scn->scn_dp->dp_spa)) { 543 if (zb) { 544 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", 545 (longlong_t)zb->zb_objset, 546 (longlong_t)zb->zb_object, 547 (longlong_t)zb->zb_level, 548 (longlong_t)zb->zb_blkid); 549 scn->scn_phys.scn_bookmark = *zb; 550 } 551 dprintf("suspending at DDT bookmark %llx/%llx/%llx/%llx\n", 552 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 553 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 554 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 555 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 556 scn->scn_suspending = B_TRUE; 557 return (B_TRUE); 558 } 559 return (B_FALSE); 560 } 561 562 typedef struct zil_scan_arg { 563 dsl_pool_t *zsa_dp; 564 zil_header_t *zsa_zh; 565 } zil_scan_arg_t; 566 567 /* ARGSUSED */ 568 static int 569 dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg) 570 { 571 zil_scan_arg_t *zsa = arg; 572 dsl_pool_t *dp = zsa->zsa_dp; 573 dsl_scan_t *scn = dp->dp_scan; 574 zil_header_t *zh = zsa->zsa_zh; 575 zbookmark_phys_t zb; 576 577 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 578 return (0); 579 580 /* 581 * One block ("stubby") can be allocated a long time ago; we 582 * want to visit that one because it has been allocated 583 * (on-disk) even if it hasn't been claimed (even though for 584 * scrub there's nothing to do to it). 585 */ 586 if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa)) 587 return (0); 588 589 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 590 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 591 592 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 593 return (0); 594 } 595 596 /* ARGSUSED */ 597 static int 598 dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg) 599 { 600 if (lrc->lrc_txtype == TX_WRITE) { 601 zil_scan_arg_t *zsa = arg; 602 dsl_pool_t *dp = zsa->zsa_dp; 603 dsl_scan_t *scn = dp->dp_scan; 604 zil_header_t *zh = zsa->zsa_zh; 605 lr_write_t *lr = (lr_write_t *)lrc; 606 blkptr_t *bp = &lr->lr_blkptr; 607 zbookmark_phys_t zb; 608 609 if (BP_IS_HOLE(bp) || 610 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 611 return (0); 612 613 /* 614 * birth can be < claim_txg if this record's txg is 615 * already txg sync'ed (but this log block contains 616 * other records that are not synced) 617 */ 618 if (claim_txg == 0 || bp->blk_birth < claim_txg) 619 return (0); 620 621 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 622 lr->lr_foid, ZB_ZIL_LEVEL, 623 lr->lr_offset / BP_GET_LSIZE(bp)); 624 625 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 626 } 627 return (0); 628 } 629 630 static void 631 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 632 { 633 uint64_t claim_txg = zh->zh_claim_txg; 634 zil_scan_arg_t zsa = { dp, zh }; 635 zilog_t *zilog; 636 637 /* 638 * We only want to visit blocks that have been claimed but not yet 639 * replayed (or, in read-only mode, blocks that *would* be claimed). 640 */ 641 if (claim_txg == 0 && spa_writeable(dp->dp_spa)) 642 return; 643 644 zilog = zil_alloc(dp->dp_meta_objset, zh); 645 646 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 647 claim_txg); 648 649 zil_free(zilog); 650 } 651 652 /* ARGSUSED */ 653 static void 654 dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp, 655 uint64_t objset, uint64_t object, uint64_t blkid) 656 { 657 zbookmark_phys_t czb; 658 arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH; 659 660 if (zfs_no_scrub_prefetch) 661 return; 662 663 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg || 664 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)) 665 return; 666 667 SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid); 668 669 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp, 670 NULL, NULL, ZIO_PRIORITY_ASYNC_READ, 671 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb); 672 } 673 674 static boolean_t 675 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 676 const zbookmark_phys_t *zb) 677 { 678 /* 679 * We never skip over user/group accounting objects (obj<0) 680 */ 681 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 682 (int64_t)zb->zb_object >= 0) { 683 /* 684 * If we already visited this bp & everything below (in 685 * a prior txg sync), don't bother doing it again. 686 */ 687 if (zbookmark_subtree_completed(dnp, zb, 688 &scn->scn_phys.scn_bookmark)) 689 return (B_TRUE); 690 691 /* 692 * If we found the block we're trying to resume from, or 693 * we went past it to a different object, zero it out to 694 * indicate that it's OK to start checking for suspending 695 * again. 696 */ 697 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 698 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 699 dprintf("resuming at %llx/%llx/%llx/%llx\n", 700 (longlong_t)zb->zb_objset, 701 (longlong_t)zb->zb_object, 702 (longlong_t)zb->zb_level, 703 (longlong_t)zb->zb_blkid); 704 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 705 } 706 } 707 return (B_FALSE); 708 } 709 710 /* 711 * Return nonzero on i/o error. 712 * Return new buf to write out in *bufp. 713 */ 714 static int 715 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 716 dnode_phys_t *dnp, const blkptr_t *bp, 717 const zbookmark_phys_t *zb, dmu_tx_t *tx) 718 { 719 dsl_pool_t *dp = scn->scn_dp; 720 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 721 int err; 722 723 if (BP_GET_LEVEL(bp) > 0) { 724 arc_flags_t flags = ARC_FLAG_WAIT; 725 int i; 726 blkptr_t *cbp; 727 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 728 arc_buf_t *buf; 729 730 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 731 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 732 if (err) { 733 scn->scn_phys.scn_errors++; 734 return (err); 735 } 736 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 737 dsl_scan_prefetch(scn, buf, cbp, zb->zb_objset, 738 zb->zb_object, zb->zb_blkid * epb + i); 739 } 740 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 741 zbookmark_phys_t czb; 742 743 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 744 zb->zb_level - 1, 745 zb->zb_blkid * epb + i); 746 dsl_scan_visitbp(cbp, &czb, dnp, 747 ds, scn, ostype, tx); 748 } 749 arc_buf_destroy(buf, &buf); 750 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 751 arc_flags_t flags = ARC_FLAG_WAIT; 752 dnode_phys_t *cdnp; 753 int i, j; 754 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 755 arc_buf_t *buf; 756 757 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 758 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 759 if (err) { 760 scn->scn_phys.scn_errors++; 761 return (err); 762 } 763 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 764 for (j = 0; j < cdnp->dn_nblkptr; j++) { 765 blkptr_t *cbp = &cdnp->dn_blkptr[j]; 766 dsl_scan_prefetch(scn, buf, cbp, 767 zb->zb_objset, zb->zb_blkid * epb + i, j); 768 } 769 } 770 for (i = 0, cdnp = buf->b_data; i < epb; i++, cdnp++) { 771 dsl_scan_visitdnode(scn, ds, ostype, 772 cdnp, zb->zb_blkid * epb + i, tx); 773 } 774 775 arc_buf_destroy(buf, &buf); 776 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 777 arc_flags_t flags = ARC_FLAG_WAIT; 778 objset_phys_t *osp; 779 arc_buf_t *buf; 780 781 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 782 ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb); 783 if (err) { 784 scn->scn_phys.scn_errors++; 785 return (err); 786 } 787 788 osp = buf->b_data; 789 790 dsl_scan_visitdnode(scn, ds, osp->os_type, 791 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 792 793 if (OBJSET_BUF_HAS_USERUSED(buf)) { 794 /* 795 * We also always visit user/group accounting 796 * objects, and never skip them, even if we are 797 * suspending. This is necessary so that the space 798 * deltas from this txg get integrated. 799 */ 800 dsl_scan_visitdnode(scn, ds, osp->os_type, 801 &osp->os_groupused_dnode, 802 DMU_GROUPUSED_OBJECT, tx); 803 dsl_scan_visitdnode(scn, ds, osp->os_type, 804 &osp->os_userused_dnode, 805 DMU_USERUSED_OBJECT, tx); 806 } 807 arc_buf_destroy(buf, &buf); 808 } 809 810 return (0); 811 } 812 813 static void 814 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 815 dmu_objset_type_t ostype, dnode_phys_t *dnp, 816 uint64_t object, dmu_tx_t *tx) 817 { 818 int j; 819 820 for (j = 0; j < dnp->dn_nblkptr; j++) { 821 zbookmark_phys_t czb; 822 823 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 824 dnp->dn_nlevels - 1, j); 825 dsl_scan_visitbp(&dnp->dn_blkptr[j], 826 &czb, dnp, ds, scn, ostype, tx); 827 } 828 829 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 830 zbookmark_phys_t czb; 831 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 832 0, DMU_SPILL_BLKID); 833 dsl_scan_visitbp(&dnp->dn_spill, 834 &czb, dnp, ds, scn, ostype, tx); 835 } 836 } 837 838 /* 839 * The arguments are in this order because mdb can only print the 840 * first 5; we want them to be useful. 841 */ 842 static void 843 dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 844 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 845 dmu_objset_type_t ostype, dmu_tx_t *tx) 846 { 847 dsl_pool_t *dp = scn->scn_dp; 848 arc_buf_t *buf = NULL; 849 blkptr_t bp_toread = *bp; 850 851 /* ASSERT(pbuf == NULL || arc_released(pbuf)); */ 852 853 if (dsl_scan_check_suspend(scn, zb)) 854 return; 855 856 if (dsl_scan_check_resume(scn, dnp, zb)) 857 return; 858 859 if (BP_IS_HOLE(bp)) 860 return; 861 862 scn->scn_visited_this_txg++; 863 864 dprintf_bp(bp, 865 "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 866 ds, ds ? ds->ds_object : 0, 867 zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 868 bp); 869 870 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 871 return; 872 873 if (dsl_scan_recurse(scn, ds, ostype, dnp, &bp_toread, zb, tx) != 0) 874 return; 875 876 /* 877 * If dsl_scan_ddt() has already visited this block, it will have 878 * already done any translations or scrubbing, so don't call the 879 * callback again. 880 */ 881 if (ddt_class_contains(dp->dp_spa, 882 scn->scn_phys.scn_ddt_class_max, bp)) { 883 ASSERT(buf == NULL); 884 return; 885 } 886 887 /* 888 * If this block is from the future (after cur_max_txg), then we 889 * are doing this on behalf of a deleted snapshot, and we will 890 * revisit the future block on the next pass of this dataset. 891 * Don't scan it now unless we need to because something 892 * under it was modified. 893 */ 894 if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) { 895 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 896 } 897 } 898 899 static void 900 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 901 dmu_tx_t *tx) 902 { 903 zbookmark_phys_t zb; 904 905 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 906 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 907 dsl_scan_visitbp(bp, &zb, NULL, 908 ds, scn, DMU_OST_NONE, tx); 909 910 dprintf_ds(ds, "finished scan%s", ""); 911 } 912 913 void 914 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 915 { 916 dsl_pool_t *dp = ds->ds_dir->dd_pool; 917 dsl_scan_t *scn = dp->dp_scan; 918 uint64_t mintxg; 919 920 if (scn->scn_phys.scn_state != DSS_SCANNING) 921 return; 922 923 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 924 if (ds->ds_is_snapshot) { 925 /* 926 * Note: 927 * - scn_cur_{min,max}_txg stays the same. 928 * - Setting the flag is not really necessary if 929 * scn_cur_max_txg == scn_max_txg, because there 930 * is nothing after this snapshot that we care 931 * about. However, we set it anyway and then 932 * ignore it when we retraverse it in 933 * dsl_scan_visitds(). 934 */ 935 scn->scn_phys.scn_bookmark.zb_objset = 936 dsl_dataset_phys(ds)->ds_next_snap_obj; 937 zfs_dbgmsg("destroying ds %llu; currently traversing; " 938 "reset zb_objset to %llu", 939 (u_longlong_t)ds->ds_object, 940 (u_longlong_t)dsl_dataset_phys(ds)-> 941 ds_next_snap_obj); 942 scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN; 943 } else { 944 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 945 ZB_DESTROYED_OBJSET, 0, 0, 0); 946 zfs_dbgmsg("destroying ds %llu; currently traversing; " 947 "reset bookmark to -1,0,0,0", 948 (u_longlong_t)ds->ds_object); 949 } 950 } else if (zap_lookup_int_key(dp->dp_meta_objset, 951 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 952 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 953 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 954 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 955 if (ds->ds_is_snapshot) { 956 /* 957 * We keep the same mintxg; it could be > 958 * ds_creation_txg if the previous snapshot was 959 * deleted too. 960 */ 961 VERIFY(zap_add_int_key(dp->dp_meta_objset, 962 scn->scn_phys.scn_queue_obj, 963 dsl_dataset_phys(ds)->ds_next_snap_obj, 964 mintxg, tx) == 0); 965 zfs_dbgmsg("destroying ds %llu; in queue; " 966 "replacing with %llu", 967 (u_longlong_t)ds->ds_object, 968 (u_longlong_t)dsl_dataset_phys(ds)-> 969 ds_next_snap_obj); 970 } else { 971 zfs_dbgmsg("destroying ds %llu; in queue; removing", 972 (u_longlong_t)ds->ds_object); 973 } 974 } 975 976 /* 977 * dsl_scan_sync() should be called after this, and should sync 978 * out our changed state, but just to be safe, do it here. 979 */ 980 dsl_scan_sync_state(scn, tx); 981 } 982 983 void 984 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 985 { 986 dsl_pool_t *dp = ds->ds_dir->dd_pool; 987 dsl_scan_t *scn = dp->dp_scan; 988 uint64_t mintxg; 989 990 if (scn->scn_phys.scn_state != DSS_SCANNING) 991 return; 992 993 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 994 995 if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) { 996 scn->scn_phys.scn_bookmark.zb_objset = 997 dsl_dataset_phys(ds)->ds_prev_snap_obj; 998 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 999 "reset zb_objset to %llu", 1000 (u_longlong_t)ds->ds_object, 1001 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 1002 } else if (zap_lookup_int_key(dp->dp_meta_objset, 1003 scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) { 1004 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1005 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 1006 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1007 scn->scn_phys.scn_queue_obj, 1008 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 1009 zfs_dbgmsg("snapshotting ds %llu; in queue; " 1010 "replacing with %llu", 1011 (u_longlong_t)ds->ds_object, 1012 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 1013 } 1014 dsl_scan_sync_state(scn, tx); 1015 } 1016 1017 void 1018 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 1019 { 1020 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 1021 dsl_scan_t *scn = dp->dp_scan; 1022 uint64_t mintxg; 1023 1024 if (scn->scn_phys.scn_state != DSS_SCANNING) 1025 return; 1026 1027 if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) { 1028 scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object; 1029 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 1030 "reset zb_objset to %llu", 1031 (u_longlong_t)ds1->ds_object, 1032 (u_longlong_t)ds2->ds_object); 1033 } else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) { 1034 scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object; 1035 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 1036 "reset zb_objset to %llu", 1037 (u_longlong_t)ds2->ds_object, 1038 (u_longlong_t)ds1->ds_object); 1039 } 1040 1041 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1042 ds1->ds_object, &mintxg) == 0) { 1043 int err; 1044 1045 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 1046 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 1047 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1048 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 1049 err = zap_add_int_key(dp->dp_meta_objset, 1050 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx); 1051 VERIFY(err == 0 || err == EEXIST); 1052 if (err == EEXIST) { 1053 /* Both were there to begin with */ 1054 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 1055 scn->scn_phys.scn_queue_obj, 1056 ds1->ds_object, mintxg, tx)); 1057 } 1058 zfs_dbgmsg("clone_swap ds %llu; in queue; " 1059 "replacing with %llu", 1060 (u_longlong_t)ds1->ds_object, 1061 (u_longlong_t)ds2->ds_object); 1062 } else if (zap_lookup_int_key(dp->dp_meta_objset, 1063 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) { 1064 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 1065 ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 1066 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1067 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 1068 VERIFY(0 == zap_add_int_key(dp->dp_meta_objset, 1069 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx)); 1070 zfs_dbgmsg("clone_swap ds %llu; in queue; " 1071 "replacing with %llu", 1072 (u_longlong_t)ds2->ds_object, 1073 (u_longlong_t)ds1->ds_object); 1074 } 1075 1076 dsl_scan_sync_state(scn, tx); 1077 } 1078 1079 struct enqueue_clones_arg { 1080 dmu_tx_t *tx; 1081 uint64_t originobj; 1082 }; 1083 1084 /* ARGSUSED */ 1085 static int 1086 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1087 { 1088 struct enqueue_clones_arg *eca = arg; 1089 dsl_dataset_t *ds; 1090 int err; 1091 dsl_scan_t *scn = dp->dp_scan; 1092 1093 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != eca->originobj) 1094 return (0); 1095 1096 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1097 if (err) 1098 return (err); 1099 1100 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != eca->originobj) { 1101 dsl_dataset_t *prev; 1102 err = dsl_dataset_hold_obj(dp, 1103 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 1104 1105 dsl_dataset_rele(ds, FTAG); 1106 if (err) 1107 return (err); 1108 ds = prev; 1109 } 1110 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1111 scn->scn_phys.scn_queue_obj, ds->ds_object, 1112 dsl_dataset_phys(ds)->ds_prev_snap_txg, eca->tx) == 0); 1113 dsl_dataset_rele(ds, FTAG); 1114 return (0); 1115 } 1116 1117 static void 1118 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 1119 { 1120 dsl_pool_t *dp = scn->scn_dp; 1121 dsl_dataset_t *ds; 1122 1123 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1124 1125 if (scn->scn_phys.scn_cur_min_txg >= 1126 scn->scn_phys.scn_max_txg) { 1127 /* 1128 * This can happen if this snapshot was created after the 1129 * scan started, and we already completed a previous snapshot 1130 * that was created after the scan started. This snapshot 1131 * only references blocks with: 1132 * 1133 * birth < our ds_creation_txg 1134 * cur_min_txg is no less than ds_creation_txg. 1135 * We have already visited these blocks. 1136 * or 1137 * birth > scn_max_txg 1138 * The scan requested not to visit these blocks. 1139 * 1140 * Subsequent snapshots (and clones) can reference our 1141 * blocks, or blocks with even higher birth times. 1142 * Therefore we do not need to visit them either, 1143 * so we do not add them to the work queue. 1144 * 1145 * Note that checking for cur_min_txg >= cur_max_txg 1146 * is not sufficient, because in that case we may need to 1147 * visit subsequent snapshots. This happens when min_txg > 0, 1148 * which raises cur_min_txg. In this case we will visit 1149 * this dataset but skip all of its blocks, because the 1150 * rootbp's birth time is < cur_min_txg. Then we will 1151 * add the next snapshots/clones to the work queue. 1152 */ 1153 char *dsname = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1154 dsl_dataset_name(ds, dsname); 1155 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " 1156 "cur_min_txg (%llu) >= max_txg (%llu)", 1157 dsobj, dsname, 1158 scn->scn_phys.scn_cur_min_txg, 1159 scn->scn_phys.scn_max_txg); 1160 kmem_free(dsname, MAXNAMELEN); 1161 1162 goto out; 1163 } 1164 1165 /* 1166 * Only the ZIL in the head (non-snapshot) is valid. Even though 1167 * snapshots can have ZIL block pointers (which may be the same 1168 * BP as in the head), they must be ignored. In addition, $ORIGIN 1169 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't 1170 * need to look for a ZIL in it either. So we traverse the ZIL here, 1171 * rather than in scan_recurse(), because the regular snapshot 1172 * block-sharing rules don't apply to it. 1173 */ 1174 if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !dsl_dataset_is_snapshot(ds) && 1175 ds->ds_dir != dp->dp_origin_snap->ds_dir) { 1176 objset_t *os; 1177 if (dmu_objset_from_ds(ds, &os) != 0) { 1178 goto out; 1179 } 1180 dsl_scan_zil(dp, &os->os_zil_header); 1181 } 1182 1183 /* 1184 * Iterate over the bps in this ds. 1185 */ 1186 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1187 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 1188 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 1189 rrw_exit(&ds->ds_bp_rwlock, FTAG); 1190 1191 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 1192 dsl_dataset_name(ds, dsname); 1193 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 1194 "suspending=%u", 1195 (longlong_t)dsobj, dsname, 1196 (longlong_t)scn->scn_phys.scn_cur_min_txg, 1197 (longlong_t)scn->scn_phys.scn_cur_max_txg, 1198 (int)scn->scn_suspending); 1199 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); 1200 1201 if (scn->scn_suspending) 1202 goto out; 1203 1204 /* 1205 * We've finished this pass over this dataset. 1206 */ 1207 1208 /* 1209 * If we did not completely visit this dataset, do another pass. 1210 */ 1211 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 1212 zfs_dbgmsg("incomplete pass; visiting again"); 1213 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 1214 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1215 scn->scn_phys.scn_queue_obj, ds->ds_object, 1216 scn->scn_phys.scn_cur_max_txg, tx) == 0); 1217 goto out; 1218 } 1219 1220 /* 1221 * Add descendent datasets to work queue. 1222 */ 1223 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 1224 VERIFY(zap_add_int_key(dp->dp_meta_objset, 1225 scn->scn_phys.scn_queue_obj, 1226 dsl_dataset_phys(ds)->ds_next_snap_obj, 1227 dsl_dataset_phys(ds)->ds_creation_txg, tx) == 0); 1228 } 1229 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 1230 boolean_t usenext = B_FALSE; 1231 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 1232 uint64_t count; 1233 /* 1234 * A bug in a previous version of the code could 1235 * cause upgrade_clones_cb() to not set 1236 * ds_next_snap_obj when it should, leading to a 1237 * missing entry. Therefore we can only use the 1238 * next_clones_obj when its count is correct. 1239 */ 1240 int err = zap_count(dp->dp_meta_objset, 1241 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 1242 if (err == 0 && 1243 count == dsl_dataset_phys(ds)->ds_num_children - 1) 1244 usenext = B_TRUE; 1245 } 1246 1247 if (usenext) { 1248 VERIFY0(zap_join_key(dp->dp_meta_objset, 1249 dsl_dataset_phys(ds)->ds_next_clones_obj, 1250 scn->scn_phys.scn_queue_obj, 1251 dsl_dataset_phys(ds)->ds_creation_txg, tx)); 1252 } else { 1253 struct enqueue_clones_arg eca; 1254 eca.tx = tx; 1255 eca.originobj = ds->ds_object; 1256 1257 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1258 enqueue_clones_cb, &eca, DS_FIND_CHILDREN)); 1259 } 1260 } 1261 1262 out: 1263 dsl_dataset_rele(ds, FTAG); 1264 } 1265 1266 /* ARGSUSED */ 1267 static int 1268 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 1269 { 1270 dmu_tx_t *tx = arg; 1271 dsl_dataset_t *ds; 1272 int err; 1273 dsl_scan_t *scn = dp->dp_scan; 1274 1275 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 1276 if (err) 1277 return (err); 1278 1279 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 1280 dsl_dataset_t *prev; 1281 err = dsl_dataset_hold_obj(dp, 1282 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 1283 if (err) { 1284 dsl_dataset_rele(ds, FTAG); 1285 return (err); 1286 } 1287 1288 /* 1289 * If this is a clone, we don't need to worry about it for now. 1290 */ 1291 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 1292 dsl_dataset_rele(ds, FTAG); 1293 dsl_dataset_rele(prev, FTAG); 1294 return (0); 1295 } 1296 dsl_dataset_rele(ds, FTAG); 1297 ds = prev; 1298 } 1299 1300 VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 1301 ds->ds_object, dsl_dataset_phys(ds)->ds_prev_snap_txg, tx) == 0); 1302 dsl_dataset_rele(ds, FTAG); 1303 return (0); 1304 } 1305 1306 /* 1307 * Scrub/dedup interaction. 1308 * 1309 * If there are N references to a deduped block, we don't want to scrub it 1310 * N times -- ideally, we should scrub it exactly once. 1311 * 1312 * We leverage the fact that the dde's replication class (enum ddt_class) 1313 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 1314 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 1315 * 1316 * To prevent excess scrubbing, the scrub begins by walking the DDT 1317 * to find all blocks with refcnt > 1, and scrubs each of these once. 1318 * Since there are two replication classes which contain blocks with 1319 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 1320 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 1321 * 1322 * There would be nothing more to say if a block's refcnt couldn't change 1323 * during a scrub, but of course it can so we must account for changes 1324 * in a block's replication class. 1325 * 1326 * Here's an example of what can occur: 1327 * 1328 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 1329 * when visited during the top-down scrub phase, it will be scrubbed twice. 1330 * This negates our scrub optimization, but is otherwise harmless. 1331 * 1332 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 1333 * on each visit during the top-down scrub phase, it will never be scrubbed. 1334 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 1335 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 1336 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 1337 * while a scrub is in progress, it scrubs the block right then. 1338 */ 1339 static void 1340 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 1341 { 1342 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 1343 ddt_entry_t dde = { 0 }; 1344 int error; 1345 uint64_t n = 0; 1346 1347 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 1348 ddt_t *ddt; 1349 1350 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 1351 break; 1352 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 1353 (longlong_t)ddb->ddb_class, 1354 (longlong_t)ddb->ddb_type, 1355 (longlong_t)ddb->ddb_checksum, 1356 (longlong_t)ddb->ddb_cursor); 1357 1358 /* There should be no pending changes to the dedup table */ 1359 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 1360 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 1361 1362 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 1363 n++; 1364 1365 if (dsl_scan_check_suspend(scn, NULL)) 1366 break; 1367 } 1368 1369 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; " 1370 "suspending=%u", (longlong_t)n, 1371 (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); 1372 1373 ASSERT(error == 0 || error == ENOENT); 1374 ASSERT(error != ENOENT || 1375 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 1376 } 1377 1378 /* ARGSUSED */ 1379 void 1380 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 1381 ddt_entry_t *dde, dmu_tx_t *tx) 1382 { 1383 const ddt_key_t *ddk = &dde->dde_key; 1384 ddt_phys_t *ddp = dde->dde_phys; 1385 blkptr_t bp; 1386 zbookmark_phys_t zb = { 0 }; 1387 1388 if (scn->scn_phys.scn_state != DSS_SCANNING) 1389 return; 1390 1391 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 1392 if (ddp->ddp_phys_birth == 0 || 1393 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 1394 continue; 1395 ddt_bp_create(checksum, ddk, ddp, &bp); 1396 1397 scn->scn_visited_this_txg++; 1398 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 1399 } 1400 } 1401 1402 static void 1403 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 1404 { 1405 dsl_pool_t *dp = scn->scn_dp; 1406 zap_cursor_t zc; 1407 zap_attribute_t za; 1408 1409 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1410 scn->scn_phys.scn_ddt_class_max) { 1411 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1412 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1413 dsl_scan_ddt(scn, tx); 1414 if (scn->scn_suspending) 1415 return; 1416 } 1417 1418 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 1419 /* First do the MOS & ORIGIN */ 1420 1421 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 1422 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 1423 dsl_scan_visit_rootbp(scn, NULL, 1424 &dp->dp_meta_rootbp, tx); 1425 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 1426 if (scn->scn_suspending) 1427 return; 1428 1429 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 1430 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1431 enqueue_cb, tx, DS_FIND_CHILDREN)); 1432 } else { 1433 dsl_scan_visitds(scn, 1434 dp->dp_origin_snap->ds_object, tx); 1435 } 1436 ASSERT(!scn->scn_suspending); 1437 } else if (scn->scn_phys.scn_bookmark.zb_objset != 1438 ZB_DESTROYED_OBJSET) { 1439 /* 1440 * If we were suspended, continue from here. Note if the 1441 * ds we were suspended on was deleted, the zb_objset may 1442 * be -1, so we will skip this and find a new objset 1443 * below. 1444 */ 1445 dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx); 1446 if (scn->scn_suspending) 1447 return; 1448 } 1449 1450 /* 1451 * In case we were suspended right at the end of the ds, zero the 1452 * bookmark so we don't think that we're still trying to resume. 1453 */ 1454 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 1455 1456 /* keep pulling things out of the zap-object-as-queue */ 1457 while (zap_cursor_init(&zc, dp->dp_meta_objset, 1458 scn->scn_phys.scn_queue_obj), 1459 zap_cursor_retrieve(&zc, &za) == 0) { 1460 dsl_dataset_t *ds; 1461 uint64_t dsobj; 1462 1463 dsobj = zfs_strtonum(za.za_name, NULL); 1464 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 1465 scn->scn_phys.scn_queue_obj, dsobj, tx)); 1466 1467 /* Set up min/max txg */ 1468 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1469 if (za.za_first_integer != 0) { 1470 scn->scn_phys.scn_cur_min_txg = 1471 MAX(scn->scn_phys.scn_min_txg, 1472 za.za_first_integer); 1473 } else { 1474 scn->scn_phys.scn_cur_min_txg = 1475 MAX(scn->scn_phys.scn_min_txg, 1476 dsl_dataset_phys(ds)->ds_prev_snap_txg); 1477 } 1478 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 1479 dsl_dataset_rele(ds, FTAG); 1480 1481 dsl_scan_visitds(scn, dsobj, tx); 1482 zap_cursor_fini(&zc); 1483 if (scn->scn_suspending) 1484 return; 1485 } 1486 zap_cursor_fini(&zc); 1487 } 1488 1489 static boolean_t 1490 dsl_scan_async_block_should_pause(dsl_scan_t *scn) 1491 { 1492 uint64_t elapsed_nanosecs; 1493 1494 if (zfs_recover) 1495 return (B_FALSE); 1496 1497 if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks) 1498 return (B_TRUE); 1499 1500 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 1501 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 1502 (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && 1503 txg_sync_waiting(scn->scn_dp)) || 1504 spa_shutting_down(scn->scn_dp->dp_spa)); 1505 } 1506 1507 static int 1508 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1509 { 1510 dsl_scan_t *scn = arg; 1511 1512 if (!scn->scn_is_bptree || 1513 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 1514 if (dsl_scan_async_block_should_pause(scn)) 1515 return (SET_ERROR(ERESTART)); 1516 } 1517 1518 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 1519 dmu_tx_get_txg(tx), bp, 0)); 1520 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 1521 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 1522 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 1523 scn->scn_visited_this_txg++; 1524 return (0); 1525 } 1526 1527 static int 1528 dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 1529 { 1530 dsl_scan_t *scn = arg; 1531 const dva_t *dva = &bp->blk_dva[0]; 1532 1533 if (dsl_scan_async_block_should_pause(scn)) 1534 return (SET_ERROR(ERESTART)); 1535 1536 spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, 1537 DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), 1538 DVA_GET_ASIZE(dva), tx); 1539 scn->scn_visited_this_txg++; 1540 return (0); 1541 } 1542 1543 boolean_t 1544 dsl_scan_active(dsl_scan_t *scn) 1545 { 1546 spa_t *spa = scn->scn_dp->dp_spa; 1547 uint64_t used = 0, comp, uncomp; 1548 1549 if (spa->spa_load_state != SPA_LOAD_NONE) 1550 return (B_FALSE); 1551 if (spa_shutting_down(spa)) 1552 return (B_FALSE); 1553 if ((scn->scn_phys.scn_state == DSS_SCANNING && 1554 !dsl_scan_is_paused_scrub(scn)) || 1555 (scn->scn_async_destroying && !scn->scn_async_stalled)) 1556 return (B_TRUE); 1557 1558 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1559 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 1560 &used, &comp, &uncomp); 1561 } 1562 return (used != 0); 1563 } 1564 1565 /* Called whenever a txg syncs. */ 1566 void 1567 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 1568 { 1569 dsl_scan_t *scn = dp->dp_scan; 1570 spa_t *spa = dp->dp_spa; 1571 int err = 0; 1572 1573 /* 1574 * Check for scn_restart_txg before checking spa_load_state, so 1575 * that we can restart an old-style scan while the pool is being 1576 * imported (see dsl_scan_init). 1577 */ 1578 if (dsl_scan_restarting(scn, tx)) { 1579 pool_scan_func_t func = POOL_SCAN_SCRUB; 1580 dsl_scan_done(scn, B_FALSE, tx); 1581 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 1582 func = POOL_SCAN_RESILVER; 1583 zfs_dbgmsg("restarting scan func=%u txg=%llu", 1584 func, tx->tx_txg); 1585 dsl_scan_setup_sync(&func, tx); 1586 } 1587 1588 /* 1589 * Only process scans in sync pass 1. 1590 */ 1591 if (spa_sync_pass(dp->dp_spa) > 1) 1592 return; 1593 1594 /* 1595 * If the spa is shutting down, then stop scanning. This will 1596 * ensure that the scan does not dirty any new data during the 1597 * shutdown phase. 1598 */ 1599 if (spa_shutting_down(spa)) 1600 return; 1601 1602 /* 1603 * If the scan is inactive due to a stalled async destroy, try again. 1604 */ 1605 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 1606 return; 1607 1608 scn->scn_visited_this_txg = 0; 1609 scn->scn_suspending = B_FALSE; 1610 scn->scn_sync_start_time = gethrtime(); 1611 spa->spa_scrub_active = B_TRUE; 1612 1613 /* 1614 * First process the async destroys. If we suspend, don't do 1615 * any scrubbing or resilvering. This ensures that there are no 1616 * async destroys while we are scanning, so the scan code doesn't 1617 * have to worry about traversing it. It is also faster to free the 1618 * blocks than to scrub them. 1619 */ 1620 if (zfs_free_bpobj_enabled && 1621 spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 1622 scn->scn_is_bptree = B_FALSE; 1623 scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; 1624 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1625 NULL, ZIO_FLAG_MUSTSUCCEED); 1626 err = bpobj_iterate(&dp->dp_free_bpobj, 1627 dsl_scan_free_block_cb, scn, tx); 1628 VERIFY3U(0, ==, zio_wait(scn->scn_zio_root)); 1629 1630 if (err != 0 && err != ERESTART) 1631 zfs_panic_recover("error %u from bpobj_iterate()", err); 1632 } 1633 1634 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 1635 ASSERT(scn->scn_async_destroying); 1636 scn->scn_is_bptree = B_TRUE; 1637 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1638 NULL, ZIO_FLAG_MUSTSUCCEED); 1639 err = bptree_iterate(dp->dp_meta_objset, 1640 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 1641 VERIFY0(zio_wait(scn->scn_zio_root)); 1642 1643 if (err == EIO || err == ECKSUM) { 1644 err = 0; 1645 } else if (err != 0 && err != ERESTART) { 1646 zfs_panic_recover("error %u from " 1647 "traverse_dataset_destroyed()", err); 1648 } 1649 1650 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 1651 /* finished; deactivate async destroy feature */ 1652 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 1653 ASSERT(!spa_feature_is_active(spa, 1654 SPA_FEATURE_ASYNC_DESTROY)); 1655 VERIFY0(zap_remove(dp->dp_meta_objset, 1656 DMU_POOL_DIRECTORY_OBJECT, 1657 DMU_POOL_BPTREE_OBJ, tx)); 1658 VERIFY0(bptree_free(dp->dp_meta_objset, 1659 dp->dp_bptree_obj, tx)); 1660 dp->dp_bptree_obj = 0; 1661 scn->scn_async_destroying = B_FALSE; 1662 scn->scn_async_stalled = B_FALSE; 1663 } else { 1664 /* 1665 * If we didn't make progress, mark the async 1666 * destroy as stalled, so that we will not initiate 1667 * a spa_sync() on its behalf. Note that we only 1668 * check this if we are not finished, because if the 1669 * bptree had no blocks for us to visit, we can 1670 * finish without "making progress". 1671 */ 1672 scn->scn_async_stalled = 1673 (scn->scn_visited_this_txg == 0); 1674 } 1675 } 1676 if (scn->scn_visited_this_txg) { 1677 zfs_dbgmsg("freed %llu blocks in %llums from " 1678 "free_bpobj/bptree txg %llu; err=%u", 1679 (longlong_t)scn->scn_visited_this_txg, 1680 (longlong_t) 1681 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 1682 (longlong_t)tx->tx_txg, err); 1683 scn->scn_visited_this_txg = 0; 1684 1685 /* 1686 * Write out changes to the DDT that may be required as a 1687 * result of the blocks freed. This ensures that the DDT 1688 * is clean when a scrub/resilver runs. 1689 */ 1690 ddt_sync(spa, tx->tx_txg); 1691 } 1692 if (err != 0) 1693 return; 1694 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 1695 zfs_free_leak_on_eio && 1696 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 1697 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 1698 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 1699 /* 1700 * We have finished background destroying, but there is still 1701 * some space left in the dp_free_dir. Transfer this leaked 1702 * space to the dp_leak_dir. 1703 */ 1704 if (dp->dp_leak_dir == NULL) { 1705 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 1706 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 1707 LEAK_DIR_NAME, tx); 1708 VERIFY0(dsl_pool_open_special_dir(dp, 1709 LEAK_DIR_NAME, &dp->dp_leak_dir)); 1710 rrw_exit(&dp->dp_config_rwlock, FTAG); 1711 } 1712 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 1713 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 1714 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 1715 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 1716 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 1717 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 1718 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 1719 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 1720 } 1721 1722 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) { 1723 /* finished; verify that space accounting went to zero */ 1724 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 1725 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 1726 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 1727 } 1728 1729 EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), 1730 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1731 DMU_POOL_OBSOLETE_BPOBJ)); 1732 if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { 1733 ASSERT(spa_feature_is_active(dp->dp_spa, 1734 SPA_FEATURE_OBSOLETE_COUNTS)); 1735 1736 scn->scn_is_bptree = B_FALSE; 1737 scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; 1738 err = bpobj_iterate(&dp->dp_obsolete_bpobj, 1739 dsl_scan_obsolete_block_cb, scn, tx); 1740 if (err != 0 && err != ERESTART) 1741 zfs_panic_recover("error %u from bpobj_iterate()", err); 1742 1743 if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) 1744 dsl_pool_destroy_obsolete_bpobj(dp, tx); 1745 } 1746 1747 if (scn->scn_phys.scn_state != DSS_SCANNING) 1748 return; 1749 1750 if (scn->scn_done_txg == tx->tx_txg) { 1751 ASSERT(!scn->scn_suspending); 1752 /* finished with scan. */ 1753 zfs_dbgmsg("txg %llu scan complete", tx->tx_txg); 1754 dsl_scan_done(scn, B_TRUE, tx); 1755 ASSERT3U(spa->spa_scrub_inflight, ==, 0); 1756 dsl_scan_sync_state(scn, tx); 1757 return; 1758 } 1759 1760 if (dsl_scan_is_paused_scrub(scn)) 1761 return; 1762 1763 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 1764 scn->scn_phys.scn_ddt_class_max) { 1765 zfs_dbgmsg("doing scan sync txg %llu; " 1766 "ddt bm=%llu/%llu/%llu/%llx", 1767 (longlong_t)tx->tx_txg, 1768 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class, 1769 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type, 1770 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum, 1771 (longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor); 1772 ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0); 1773 ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0); 1774 ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0); 1775 ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0); 1776 } else { 1777 zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu", 1778 (longlong_t)tx->tx_txg, 1779 (longlong_t)scn->scn_phys.scn_bookmark.zb_objset, 1780 (longlong_t)scn->scn_phys.scn_bookmark.zb_object, 1781 (longlong_t)scn->scn_phys.scn_bookmark.zb_level, 1782 (longlong_t)scn->scn_phys.scn_bookmark.zb_blkid); 1783 } 1784 1785 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 1786 NULL, ZIO_FLAG_CANFAIL); 1787 dsl_pool_config_enter(dp, FTAG); 1788 dsl_scan_visit(scn, tx); 1789 dsl_pool_config_exit(dp, FTAG); 1790 (void) zio_wait(scn->scn_zio_root); 1791 scn->scn_zio_root = NULL; 1792 1793 zfs_dbgmsg("visited %llu blocks in %llums", 1794 (longlong_t)scn->scn_visited_this_txg, 1795 (longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time)); 1796 1797 if (!scn->scn_suspending) { 1798 scn->scn_done_txg = tx->tx_txg + 1; 1799 zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu", 1800 tx->tx_txg, scn->scn_done_txg); 1801 } 1802 1803 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1804 mutex_enter(&spa->spa_scrub_lock); 1805 while (spa->spa_scrub_inflight > 0) { 1806 cv_wait(&spa->spa_scrub_io_cv, 1807 &spa->spa_scrub_lock); 1808 } 1809 mutex_exit(&spa->spa_scrub_lock); 1810 } 1811 1812 dsl_scan_sync_state(scn, tx); 1813 } 1814 1815 /* 1816 * This will start a new scan, or restart an existing one. 1817 */ 1818 void 1819 dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg) 1820 { 1821 if (txg == 0) { 1822 dmu_tx_t *tx; 1823 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1824 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1825 1826 txg = dmu_tx_get_txg(tx); 1827 dp->dp_scan->scn_restart_txg = txg; 1828 dmu_tx_commit(tx); 1829 } else { 1830 dp->dp_scan->scn_restart_txg = txg; 1831 } 1832 zfs_dbgmsg("restarting resilver txg=%llu", txg); 1833 } 1834 1835 boolean_t 1836 dsl_scan_resilvering(dsl_pool_t *dp) 1837 { 1838 return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING && 1839 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 1840 } 1841 1842 /* 1843 * scrub consumers 1844 */ 1845 1846 static void 1847 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 1848 { 1849 int i; 1850 1851 /* 1852 * If we resume after a reboot, zab will be NULL; don't record 1853 * incomplete stats in that case. 1854 */ 1855 if (zab == NULL) 1856 return; 1857 1858 for (i = 0; i < 4; i++) { 1859 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 1860 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 1861 if (t & DMU_OT_NEWTYPE) 1862 t = DMU_OT_OTHER; 1863 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 1864 int equal; 1865 1866 zb->zb_count++; 1867 zb->zb_asize += BP_GET_ASIZE(bp); 1868 zb->zb_lsize += BP_GET_LSIZE(bp); 1869 zb->zb_psize += BP_GET_PSIZE(bp); 1870 zb->zb_gangs += BP_COUNT_GANG(bp); 1871 1872 switch (BP_GET_NDVAS(bp)) { 1873 case 2: 1874 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 1875 DVA_GET_VDEV(&bp->blk_dva[1])) 1876 zb->zb_ditto_2_of_2_samevdev++; 1877 break; 1878 case 3: 1879 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 1880 DVA_GET_VDEV(&bp->blk_dva[1])) + 1881 (DVA_GET_VDEV(&bp->blk_dva[0]) == 1882 DVA_GET_VDEV(&bp->blk_dva[2])) + 1883 (DVA_GET_VDEV(&bp->blk_dva[1]) == 1884 DVA_GET_VDEV(&bp->blk_dva[2])); 1885 if (equal == 1) 1886 zb->zb_ditto_2_of_3_samevdev++; 1887 else if (equal == 3) 1888 zb->zb_ditto_3_of_3_samevdev++; 1889 break; 1890 } 1891 } 1892 } 1893 1894 static void 1895 dsl_scan_scrub_done(zio_t *zio) 1896 { 1897 spa_t *spa = zio->io_spa; 1898 1899 abd_free(zio->io_abd); 1900 1901 mutex_enter(&spa->spa_scrub_lock); 1902 spa->spa_scrub_inflight--; 1903 cv_broadcast(&spa->spa_scrub_io_cv); 1904 1905 if (zio->io_error && (zio->io_error != ECKSUM || 1906 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 1907 spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++; 1908 } 1909 mutex_exit(&spa->spa_scrub_lock); 1910 } 1911 1912 static int 1913 dsl_scan_scrub_cb(dsl_pool_t *dp, 1914 const blkptr_t *bp, const zbookmark_phys_t *zb) 1915 { 1916 dsl_scan_t *scn = dp->dp_scan; 1917 size_t size = BP_GET_PSIZE(bp); 1918 spa_t *spa = dp->dp_spa; 1919 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 1920 boolean_t needs_io; 1921 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 1922 int scan_delay = 0; 1923 1924 if (phys_birth <= scn->scn_phys.scn_min_txg || 1925 phys_birth >= scn->scn_phys.scn_max_txg) 1926 return (0); 1927 1928 count_block(dp->dp_blkstats, bp); 1929 1930 if (BP_IS_EMBEDDED(bp)) 1931 return (0); 1932 1933 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 1934 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 1935 zio_flags |= ZIO_FLAG_SCRUB; 1936 needs_io = B_TRUE; 1937 scan_delay = zfs_scrub_delay; 1938 } else { 1939 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 1940 zio_flags |= ZIO_FLAG_RESILVER; 1941 needs_io = B_FALSE; 1942 scan_delay = zfs_resilver_delay; 1943 } 1944 1945 /* If it's an intent log block, failure is expected. */ 1946 if (zb->zb_level == ZB_ZIL_LEVEL) 1947 zio_flags |= ZIO_FLAG_SPECULATIVE; 1948 1949 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 1950 vdev_t *vd = vdev_lookup_top(spa, 1951 DVA_GET_VDEV(&bp->blk_dva[d])); 1952 1953 /* 1954 * Keep track of how much data we've examined so that 1955 * zpool(1M) status can make useful progress reports. 1956 */ 1957 scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]); 1958 spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]); 1959 1960 /* if it's a resilver, this may not be in the target range */ 1961 if (!needs_io) { 1962 if (DVA_GET_GANG(&bp->blk_dva[d])) { 1963 /* 1964 * Gang members may be spread across multiple 1965 * vdevs, so the best estimate we have is the 1966 * scrub range, which has already been checked. 1967 * XXX -- it would be better to change our 1968 * allocation policy to ensure that all 1969 * gang members reside on the same vdev. 1970 */ 1971 needs_io = B_TRUE; 1972 } else { 1973 needs_io = vdev_dtl_contains(vd, DTL_PARTIAL, 1974 phys_birth, 1); 1975 } 1976 } 1977 } 1978 1979 if (needs_io && !zfs_no_scrub_io) { 1980 vdev_t *rvd = spa->spa_root_vdev; 1981 uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight; 1982 1983 mutex_enter(&spa->spa_scrub_lock); 1984 while (spa->spa_scrub_inflight >= maxinflight) 1985 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1986 spa->spa_scrub_inflight++; 1987 mutex_exit(&spa->spa_scrub_lock); 1988 1989 /* 1990 * If we're seeing recent (zfs_scan_idle) "important" I/Os 1991 * then throttle our workload to limit the impact of a scan. 1992 */ 1993 if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle) 1994 delay(scan_delay); 1995 1996 zio_nowait(zio_read(NULL, spa, bp, 1997 abd_alloc_for_io(size, B_FALSE), size, dsl_scan_scrub_done, 1998 NULL, ZIO_PRIORITY_SCRUB, zio_flags, zb)); 1999 } 2000 2001 /* do not relocate this block */ 2002 return (0); 2003 } 2004 2005 /* 2006 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver. 2007 * Can also be called to resume a paused scrub. 2008 */ 2009 int 2010 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 2011 { 2012 spa_t *spa = dp->dp_spa; 2013 dsl_scan_t *scn = dp->dp_scan; 2014 2015 /* 2016 * Purge all vdev caches and probe all devices. We do this here 2017 * rather than in sync context because this requires a writer lock 2018 * on the spa_config lock, which we can't do from sync context. The 2019 * spa_scrub_reopen flag indicates that vdev_open() should not 2020 * attempt to start another scrub. 2021 */ 2022 spa_vdev_state_enter(spa, SCL_NONE); 2023 spa->spa_scrub_reopen = B_TRUE; 2024 vdev_reopen(spa->spa_root_vdev); 2025 spa->spa_scrub_reopen = B_FALSE; 2026 (void) spa_vdev_state_exit(spa, NULL, 0); 2027 2028 if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { 2029 /* got scrub start cmd, resume paused scrub */ 2030 int err = dsl_scrub_set_pause_resume(scn->scn_dp, 2031 POOL_SCRUB_NORMAL); 2032 if (err == 0) { 2033 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); 2034 return (ECANCELED); 2035 } 2036 2037 return (SET_ERROR(err)); 2038 } 2039 2040 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 2041 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE)); 2042 } 2043 2044 static boolean_t 2045 dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) 2046 { 2047 return (scn->scn_restart_txg != 0 && 2048 scn->scn_restart_txg <= tx->tx_txg); 2049 } 2050