1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2021 by Delphix. All rights reserved. 24 * Copyright 2016 Gary Mills 25 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 27 * Copyright 2019 Joyent, Inc. 28 */ 29 30 #include <sys/dsl_scan.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dsl_dataset.h> 33 #include <sys/dsl_prop.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dsl_synctask.h> 36 #include <sys/dnode.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dmu_objset.h> 39 #include <sys/arc.h> 40 #include <sys/arc_impl.h> 41 #include <sys/zap.h> 42 #include <sys/zio.h> 43 #include <sys/zfs_context.h> 44 #include <sys/fs/zfs.h> 45 #include <sys/zfs_znode.h> 46 #include <sys/spa_impl.h> 47 #include <sys/vdev_impl.h> 48 #include <sys/zil_impl.h> 49 #include <sys/zio_checksum.h> 50 #include <sys/brt.h> 51 #include <sys/ddt.h> 52 #include <sys/sa.h> 53 #include <sys/sa_impl.h> 54 #include <sys/zfeature.h> 55 #include <sys/abd.h> 56 #include <sys/range_tree.h> 57 #ifdef _KERNEL 58 #include <sys/zfs_vfsops.h> 59 #endif 60 61 /* 62 * Grand theory statement on scan queue sorting 63 * 64 * Scanning is implemented by recursively traversing all indirection levels 65 * in an object and reading all blocks referenced from said objects. This 66 * results in us approximately traversing the object from lowest logical 67 * offset to the highest. For best performance, we would want the logical 68 * blocks to be physically contiguous. However, this is frequently not the 69 * case with pools given the allocation patterns of copy-on-write filesystems. 70 * So instead, we put the I/Os into a reordering queue and issue them in a 71 * way that will most benefit physical disks (LBA-order). 72 * 73 * Queue management: 74 * 75 * Ideally, we would want to scan all metadata and queue up all block I/O 76 * prior to starting to issue it, because that allows us to do an optimal 77 * sorting job. This can however consume large amounts of memory. Therefore 78 * we continuously monitor the size of the queues and constrain them to 5% 79 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this 80 * limit, we clear out a few of the largest extents at the head of the queues 81 * to make room for more scanning. Hopefully, these extents will be fairly 82 * large and contiguous, allowing us to approach sequential I/O throughput 83 * even without a fully sorted tree. 84 * 85 * Metadata scanning takes place in dsl_scan_visit(), which is called from 86 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all 87 * metadata on the pool, or we need to make room in memory because our 88 * queues are too large, dsl_scan_visit() is postponed and 89 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies 90 * that metadata scanning and queued I/O issuing are mutually exclusive. This 91 * allows us to provide maximum sequential I/O throughput for the majority of 92 * I/O's issued since sequential I/O performance is significantly negatively 93 * impacted if it is interleaved with random I/O. 94 * 95 * Implementation Notes 96 * 97 * One side effect of the queued scanning algorithm is that the scanning code 98 * needs to be notified whenever a block is freed. This is needed to allow 99 * the scanning code to remove these I/Os from the issuing queue. Additionally, 100 * we do not attempt to queue gang blocks to be issued sequentially since this 101 * is very hard to do and would have an extremely limited performance benefit. 102 * Instead, we simply issue gang I/Os as soon as we find them using the legacy 103 * algorithm. 104 * 105 * Backwards compatibility 106 * 107 * This new algorithm is backwards compatible with the legacy on-disk data 108 * structures (and therefore does not require a new feature flag). 109 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan 110 * will stop scanning metadata (in logical order) and wait for all outstanding 111 * sorted I/O to complete. Once this is done, we write out a checkpoint 112 * bookmark, indicating that we have scanned everything logically before it. 113 * If the pool is imported on a machine without the new sorting algorithm, 114 * the scan simply resumes from the last checkpoint using the legacy algorithm. 115 */ 116 117 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 118 const zbookmark_phys_t *); 119 120 static scan_cb_t dsl_scan_scrub_cb; 121 122 static int scan_ds_queue_compare(const void *a, const void *b); 123 static int scan_prefetch_queue_compare(const void *a, const void *b); 124 static void scan_ds_queue_clear(dsl_scan_t *scn); 125 static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn); 126 static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, 127 uint64_t *txg); 128 static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); 129 static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); 130 static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); 131 static uint64_t dsl_scan_count_data_disks(spa_t *spa); 132 133 extern uint_t zfs_vdev_async_write_active_min_dirty_percent; 134 static int zfs_scan_blkstats = 0; 135 136 /* 137 * 'zpool status' uses bytes processed per pass to report throughput and 138 * estimate time remaining. We define a pass to start when the scanning 139 * phase completes for a sequential resilver. Optionally, this value 140 * may be used to reset the pass statistics every N txgs to provide an 141 * estimated completion time based on currently observed performance. 142 */ 143 static uint_t zfs_scan_report_txgs = 0; 144 145 /* 146 * By default zfs will check to ensure it is not over the hard memory 147 * limit before each txg. If finer-grained control of this is needed 148 * this value can be set to 1 to enable checking before scanning each 149 * block. 150 */ 151 static int zfs_scan_strict_mem_lim = B_FALSE; 152 153 /* 154 * Maximum number of parallelly executed bytes per leaf vdev. We attempt 155 * to strike a balance here between keeping the vdev queues full of I/Os 156 * at all times and not overflowing the queues to cause long latency, 157 * which would cause long txg sync times. No matter what, we will not 158 * overload the drives with I/O, since that is protected by 159 * zfs_vdev_scrub_max_active. 160 */ 161 static uint64_t zfs_scan_vdev_limit = 16 << 20; 162 163 static uint_t zfs_scan_issue_strategy = 0; 164 165 /* don't queue & sort zios, go direct */ 166 static int zfs_scan_legacy = B_FALSE; 167 static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ 168 169 /* 170 * fill_weight is non-tunable at runtime, so we copy it at module init from 171 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would 172 * break queue sorting. 173 */ 174 static uint_t zfs_scan_fill_weight = 3; 175 static uint64_t fill_weight; 176 177 /* See dsl_scan_should_clear() for details on the memory limit tunables */ 178 static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ 179 static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ 180 181 182 /* fraction of physmem */ 183 static uint_t zfs_scan_mem_lim_fact = 20; 184 185 /* fraction of mem lim above */ 186 static uint_t zfs_scan_mem_lim_soft_fact = 20; 187 188 /* minimum milliseconds to scrub per txg */ 189 static uint_t zfs_scrub_min_time_ms = 1000; 190 191 /* minimum milliseconds to obsolete per txg */ 192 static uint_t zfs_obsolete_min_time_ms = 500; 193 194 /* minimum milliseconds to free per txg */ 195 static uint_t zfs_free_min_time_ms = 1000; 196 197 /* minimum milliseconds to resilver per txg */ 198 static uint_t zfs_resilver_min_time_ms = 3000; 199 200 static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */ 201 int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ 202 static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 203 static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 204 static const enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 205 /* max number of blocks to free in a single TXG */ 206 static uint64_t zfs_async_block_max_blocks = UINT64_MAX; 207 /* max number of dedup blocks to free in a single TXG */ 208 static uint64_t zfs_max_async_dedup_frees = 100000; 209 210 /* set to disable resilver deferring */ 211 static int zfs_resilver_disable_defer = B_FALSE; 212 213 /* 214 * We wait a few txgs after importing a pool to begin scanning so that 215 * the import / mounting code isn't held up by scrub / resilver IO. 216 * Unfortunately, it is a bit difficult to determine exactly how long 217 * this will take since userspace will trigger fs mounts asynchronously 218 * and the kernel will create zvol minors asynchronously. As a result, 219 * the value provided here is a bit arbitrary, but represents a 220 * reasonable estimate of how many txgs it will take to finish fully 221 * importing a pool 222 */ 223 #define SCAN_IMPORT_WAIT_TXGS 5 224 225 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 226 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 227 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 228 229 /* 230 * Enable/disable the processing of the free_bpobj object. 231 */ 232 static int zfs_free_bpobj_enabled = 1; 233 234 /* the order has to match pool_scan_type */ 235 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 236 NULL, 237 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 238 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 239 }; 240 241 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ 242 typedef struct { 243 uint64_t sds_dsobj; 244 uint64_t sds_txg; 245 avl_node_t sds_node; 246 } scan_ds_t; 247 248 /* 249 * This controls what conditions are placed on dsl_scan_sync_state(): 250 * SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0 251 * SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0. 252 * SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise 253 * write out the scn_phys_cached version. 254 * See dsl_scan_sync_state for details. 255 */ 256 typedef enum { 257 SYNC_OPTIONAL, 258 SYNC_MANDATORY, 259 SYNC_CACHED 260 } state_sync_type_t; 261 262 /* 263 * This struct represents the minimum information needed to reconstruct a 264 * zio for sequential scanning. This is useful because many of these will 265 * accumulate in the sequential IO queues before being issued, so saving 266 * memory matters here. 267 */ 268 typedef struct scan_io { 269 /* fields from blkptr_t */ 270 uint64_t sio_blk_prop; 271 uint64_t sio_phys_birth; 272 uint64_t sio_birth; 273 zio_cksum_t sio_cksum; 274 uint32_t sio_nr_dvas; 275 276 /* fields from zio_t */ 277 uint32_t sio_flags; 278 zbookmark_phys_t sio_zb; 279 280 /* members for queue sorting */ 281 union { 282 avl_node_t sio_addr_node; /* link into issuing queue */ 283 list_node_t sio_list_node; /* link for issuing to disk */ 284 } sio_nodes; 285 286 /* 287 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp, 288 * depending on how many were in the original bp. Only the 289 * first DVA is really used for sorting and issuing purposes. 290 * The other DVAs (if provided) simply exist so that the zio 291 * layer can find additional copies to repair from in the 292 * event of an error. This array must go at the end of the 293 * struct to allow this for the variable number of elements. 294 */ 295 dva_t sio_dva[]; 296 } scan_io_t; 297 298 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x) 299 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x) 300 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0]) 301 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0]) 302 #define SIO_GET_END_OFFSET(sio) \ 303 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio)) 304 #define SIO_GET_MUSED(sio) \ 305 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t))) 306 307 struct dsl_scan_io_queue { 308 dsl_scan_t *q_scn; /* associated dsl_scan_t */ 309 vdev_t *q_vd; /* top-level vdev that this queue represents */ 310 zio_t *q_zio; /* scn_zio_root child for waiting on IO */ 311 312 /* trees used for sorting I/Os and extents of I/Os */ 313 range_tree_t *q_exts_by_addr; 314 zfs_btree_t q_exts_by_size; 315 avl_tree_t q_sios_by_addr; 316 uint64_t q_sio_memused; 317 uint64_t q_last_ext_addr; 318 319 /* members for zio rate limiting */ 320 uint64_t q_maxinflight_bytes; 321 uint64_t q_inflight_bytes; 322 kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ 323 324 /* per txg statistics */ 325 uint64_t q_total_seg_size_this_txg; 326 uint64_t q_segs_this_txg; 327 uint64_t q_total_zio_size_this_txg; 328 uint64_t q_zios_this_txg; 329 }; 330 331 /* private data for dsl_scan_prefetch_cb() */ 332 typedef struct scan_prefetch_ctx { 333 zfs_refcount_t spc_refcnt; /* refcount for memory management */ 334 dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ 335 boolean_t spc_root; /* is this prefetch for an objset? */ 336 uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ 337 uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ 338 } scan_prefetch_ctx_t; 339 340 /* private data for dsl_scan_prefetch() */ 341 typedef struct scan_prefetch_issue_ctx { 342 avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ 343 scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ 344 blkptr_t spic_bp; /* bp to prefetch */ 345 zbookmark_phys_t spic_zb; /* bookmark to prefetch */ 346 } scan_prefetch_issue_ctx_t; 347 348 static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 349 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); 350 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, 351 scan_io_t *sio); 352 353 static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); 354 static void scan_io_queues_destroy(dsl_scan_t *scn); 355 356 static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP]; 357 358 /* sio->sio_nr_dvas must be set so we know which cache to free from */ 359 static void 360 sio_free(scan_io_t *sio) 361 { 362 ASSERT3U(sio->sio_nr_dvas, >, 0); 363 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); 364 365 kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio); 366 } 367 368 /* It is up to the caller to set sio->sio_nr_dvas for freeing */ 369 static scan_io_t * 370 sio_alloc(unsigned short nr_dvas) 371 { 372 ASSERT3U(nr_dvas, >, 0); 373 ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP); 374 375 return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP)); 376 } 377 378 void 379 scan_init(void) 380 { 381 /* 382 * This is used in ext_size_compare() to weight segments 383 * based on how sparse they are. This cannot be changed 384 * mid-scan and the tree comparison functions don't currently 385 * have a mechanism for passing additional context to the 386 * compare functions. Thus we store this value globally and 387 * we only allow it to be set at module initialization time 388 */ 389 fill_weight = zfs_scan_fill_weight; 390 391 for (int i = 0; i < SPA_DVAS_PER_BP; i++) { 392 char name[36]; 393 394 (void) snprintf(name, sizeof (name), "sio_cache_%d", i); 395 sio_cache[i] = kmem_cache_create(name, 396 (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))), 397 0, NULL, NULL, NULL, NULL, NULL, 0); 398 } 399 } 400 401 void 402 scan_fini(void) 403 { 404 for (int i = 0; i < SPA_DVAS_PER_BP; i++) { 405 kmem_cache_destroy(sio_cache[i]); 406 } 407 } 408 409 static inline boolean_t 410 dsl_scan_is_running(const dsl_scan_t *scn) 411 { 412 return (scn->scn_phys.scn_state == DSS_SCANNING); 413 } 414 415 boolean_t 416 dsl_scan_resilvering(dsl_pool_t *dp) 417 { 418 return (dsl_scan_is_running(dp->dp_scan) && 419 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 420 } 421 422 static inline void 423 sio2bp(const scan_io_t *sio, blkptr_t *bp) 424 { 425 memset(bp, 0, sizeof (*bp)); 426 bp->blk_prop = sio->sio_blk_prop; 427 bp->blk_phys_birth = sio->sio_phys_birth; 428 bp->blk_birth = sio->sio_birth; 429 bp->blk_fill = 1; /* we always only work with data pointers */ 430 bp->blk_cksum = sio->sio_cksum; 431 432 ASSERT3U(sio->sio_nr_dvas, >, 0); 433 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); 434 435 memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t)); 436 } 437 438 static inline void 439 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) 440 { 441 sio->sio_blk_prop = bp->blk_prop; 442 sio->sio_phys_birth = bp->blk_phys_birth; 443 sio->sio_birth = bp->blk_birth; 444 sio->sio_cksum = bp->blk_cksum; 445 sio->sio_nr_dvas = BP_GET_NDVAS(bp); 446 447 /* 448 * Copy the DVAs to the sio. We need all copies of the block so 449 * that the self healing code can use the alternate copies if the 450 * first is corrupted. We want the DVA at index dva_i to be first 451 * in the sio since this is the primary one that we want to issue. 452 */ 453 for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) { 454 sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas]; 455 } 456 } 457 458 int 459 dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 460 { 461 int err; 462 dsl_scan_t *scn; 463 spa_t *spa = dp->dp_spa; 464 uint64_t f; 465 466 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 467 scn->scn_dp = dp; 468 469 /* 470 * It's possible that we're resuming a scan after a reboot so 471 * make sure that the scan_async_destroying flag is initialized 472 * appropriately. 473 */ 474 ASSERT(!scn->scn_async_destroying); 475 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 476 SPA_FEATURE_ASYNC_DESTROY); 477 478 /* 479 * Calculate the max number of in-flight bytes for pool-wide 480 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). 481 * Limits for the issuing phase are done per top-level vdev and 482 * are handled separately. 483 */ 484 scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, 485 zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); 486 487 avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), 488 offsetof(scan_ds_t, sds_node)); 489 avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, 490 sizeof (scan_prefetch_issue_ctx_t), 491 offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); 492 493 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 494 "scrub_func", sizeof (uint64_t), 1, &f); 495 if (err == 0) { 496 /* 497 * There was an old-style scrub in progress. Restart a 498 * new-style scrub from the beginning. 499 */ 500 scn->scn_restart_txg = txg; 501 zfs_dbgmsg("old-style scrub was in progress for %s; " 502 "restarting new-style scrub in txg %llu", 503 spa->spa_name, 504 (longlong_t)scn->scn_restart_txg); 505 506 /* 507 * Load the queue obj from the old location so that it 508 * can be freed by dsl_scan_done(). 509 */ 510 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 511 "scrub_queue", sizeof (uint64_t), 1, 512 &scn->scn_phys.scn_queue_obj); 513 } else { 514 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 515 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 516 &scn->scn_phys); 517 /* 518 * Detect if the pool contains the signature of #2094. If it 519 * does properly update the scn->scn_phys structure and notify 520 * the administrator by setting an errata for the pool. 521 */ 522 if (err == EOVERFLOW) { 523 uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; 524 VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); 525 VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, 526 (23 * sizeof (uint64_t))); 527 528 err = zap_lookup(dp->dp_meta_objset, 529 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, 530 sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); 531 if (err == 0) { 532 uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; 533 534 if (overflow & ~DSL_SCAN_FLAGS_MASK || 535 scn->scn_async_destroying) { 536 spa->spa_errata = 537 ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; 538 return (EOVERFLOW); 539 } 540 541 memcpy(&scn->scn_phys, zaptmp, 542 SCAN_PHYS_NUMINTS * sizeof (uint64_t)); 543 scn->scn_phys.scn_flags = overflow; 544 545 /* Required scrub already in progress. */ 546 if (scn->scn_phys.scn_state == DSS_FINISHED || 547 scn->scn_phys.scn_state == DSS_CANCELED) 548 spa->spa_errata = 549 ZPOOL_ERRATA_ZOL_2094_SCRUB; 550 } 551 } 552 553 if (err == ENOENT) 554 return (0); 555 else if (err) 556 return (err); 557 558 /* 559 * We might be restarting after a reboot, so jump the issued 560 * counter to how far we've scanned. We know we're consistent 561 * up to here. 562 */ 563 scn->scn_issued_before_pass = scn->scn_phys.scn_examined; 564 565 if (dsl_scan_is_running(scn) && 566 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 567 /* 568 * A new-type scrub was in progress on an old 569 * pool, and the pool was accessed by old 570 * software. Restart from the beginning, since 571 * the old software may have changed the pool in 572 * the meantime. 573 */ 574 scn->scn_restart_txg = txg; 575 zfs_dbgmsg("new-style scrub for %s was modified " 576 "by old software; restarting in txg %llu", 577 spa->spa_name, 578 (longlong_t)scn->scn_restart_txg); 579 } else if (dsl_scan_resilvering(dp)) { 580 /* 581 * If a resilver is in progress and there are already 582 * errors, restart it instead of finishing this scan and 583 * then restarting it. If there haven't been any errors 584 * then remember that the incore DTL is valid. 585 */ 586 if (scn->scn_phys.scn_errors > 0) { 587 scn->scn_restart_txg = txg; 588 zfs_dbgmsg("resilver can't excise DTL_MISSING " 589 "when finished; restarting on %s in txg " 590 "%llu", 591 spa->spa_name, 592 (u_longlong_t)scn->scn_restart_txg); 593 } else { 594 /* it's safe to excise DTL when finished */ 595 spa->spa_scrub_started = B_TRUE; 596 } 597 } 598 } 599 600 memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); 601 602 /* reload the queue into the in-core state */ 603 if (scn->scn_phys.scn_queue_obj != 0) { 604 zap_cursor_t zc; 605 zap_attribute_t za; 606 607 for (zap_cursor_init(&zc, dp->dp_meta_objset, 608 scn->scn_phys.scn_queue_obj); 609 zap_cursor_retrieve(&zc, &za) == 0; 610 (void) zap_cursor_advance(&zc)) { 611 scan_ds_queue_insert(scn, 612 zfs_strtonum(za.za_name, NULL), 613 za.za_first_integer); 614 } 615 zap_cursor_fini(&zc); 616 } 617 618 spa_scan_stat_init(spa); 619 vdev_scan_stat_init(spa->spa_root_vdev); 620 621 return (0); 622 } 623 624 void 625 dsl_scan_fini(dsl_pool_t *dp) 626 { 627 if (dp->dp_scan != NULL) { 628 dsl_scan_t *scn = dp->dp_scan; 629 630 if (scn->scn_taskq != NULL) 631 taskq_destroy(scn->scn_taskq); 632 633 scan_ds_queue_clear(scn); 634 avl_destroy(&scn->scn_queue); 635 scan_ds_prefetch_queue_clear(scn); 636 avl_destroy(&scn->scn_prefetch_queue); 637 638 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 639 dp->dp_scan = NULL; 640 } 641 } 642 643 static boolean_t 644 dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) 645 { 646 return (scn->scn_restart_txg != 0 && 647 scn->scn_restart_txg <= tx->tx_txg); 648 } 649 650 boolean_t 651 dsl_scan_resilver_scheduled(dsl_pool_t *dp) 652 { 653 return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) || 654 (spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER)); 655 } 656 657 boolean_t 658 dsl_scan_scrubbing(const dsl_pool_t *dp) 659 { 660 dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; 661 662 return (scn_phys->scn_state == DSS_SCANNING && 663 scn_phys->scn_func == POOL_SCAN_SCRUB); 664 } 665 666 boolean_t 667 dsl_scan_is_paused_scrub(const dsl_scan_t *scn) 668 { 669 return (dsl_scan_scrubbing(scn->scn_dp) && 670 scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); 671 } 672 673 /* 674 * Writes out a persistent dsl_scan_phys_t record to the pool directory. 675 * Because we can be running in the block sorting algorithm, we do not always 676 * want to write out the record, only when it is "safe" to do so. This safety 677 * condition is achieved by making sure that the sorting queues are empty 678 * (scn_queues_pending == 0). When this condition is not true, the sync'd state 679 * is inconsistent with how much actual scanning progress has been made. The 680 * kind of sync to be performed is specified by the sync_type argument. If the 681 * sync is optional, we only sync if the queues are empty. If the sync is 682 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The 683 * third possible state is a "cached" sync. This is done in response to: 684 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been 685 * destroyed, so we wouldn't be able to restart scanning from it. 686 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been 687 * superseded by a newer snapshot. 688 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been 689 * swapped with its clone. 690 * In all cases, a cached sync simply rewrites the last record we've written, 691 * just slightly modified. For the modifications that are performed to the 692 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, 693 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. 694 */ 695 static void 696 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) 697 { 698 int i; 699 spa_t *spa = scn->scn_dp->dp_spa; 700 701 ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0); 702 if (scn->scn_queues_pending == 0) { 703 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 704 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 705 dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; 706 707 if (q == NULL) 708 continue; 709 710 mutex_enter(&vd->vdev_scan_io_queue_lock); 711 ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); 712 ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==, 713 NULL); 714 ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); 715 mutex_exit(&vd->vdev_scan_io_queue_lock); 716 } 717 718 if (scn->scn_phys.scn_queue_obj != 0) 719 scan_ds_queue_sync(scn, tx); 720 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 721 DMU_POOL_DIRECTORY_OBJECT, 722 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 723 &scn->scn_phys, tx)); 724 memcpy(&scn->scn_phys_cached, &scn->scn_phys, 725 sizeof (scn->scn_phys)); 726 727 if (scn->scn_checkpointing) 728 zfs_dbgmsg("finish scan checkpoint for %s", 729 spa->spa_name); 730 731 scn->scn_checkpointing = B_FALSE; 732 scn->scn_last_checkpoint = ddi_get_lbolt(); 733 } else if (sync_type == SYNC_CACHED) { 734 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 735 DMU_POOL_DIRECTORY_OBJECT, 736 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 737 &scn->scn_phys_cached, tx)); 738 } 739 } 740 741 int 742 dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 743 { 744 (void) arg; 745 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 746 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 747 748 if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd)) 749 return (SET_ERROR(EBUSY)); 750 751 return (0); 752 } 753 754 void 755 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 756 { 757 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 758 pool_scan_func_t *funcp = arg; 759 dmu_object_type_t ot = 0; 760 dsl_pool_t *dp = scn->scn_dp; 761 spa_t *spa = dp->dp_spa; 762 763 ASSERT(!dsl_scan_is_running(scn)); 764 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 765 memset(&scn->scn_phys, 0, sizeof (scn->scn_phys)); 766 scn->scn_phys.scn_func = *funcp; 767 scn->scn_phys.scn_state = DSS_SCANNING; 768 scn->scn_phys.scn_min_txg = 0; 769 scn->scn_phys.scn_max_txg = tx->tx_txg; 770 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 771 scn->scn_phys.scn_start_time = gethrestime_sec(); 772 scn->scn_phys.scn_errors = 0; 773 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 774 scn->scn_issued_before_pass = 0; 775 scn->scn_restart_txg = 0; 776 scn->scn_done_txg = 0; 777 scn->scn_last_checkpoint = 0; 778 scn->scn_checkpointing = B_FALSE; 779 spa_scan_stat_init(spa); 780 vdev_scan_stat_init(spa->spa_root_vdev); 781 782 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 783 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 784 785 /* rewrite all disk labels */ 786 vdev_config_dirty(spa->spa_root_vdev); 787 788 if (vdev_resilver_needed(spa->spa_root_vdev, 789 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 790 nvlist_t *aux = fnvlist_alloc(); 791 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, 792 "healing"); 793 spa_event_notify(spa, NULL, aux, 794 ESC_ZFS_RESILVER_START); 795 nvlist_free(aux); 796 } else { 797 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); 798 } 799 800 spa->spa_scrub_started = B_TRUE; 801 /* 802 * If this is an incremental scrub, limit the DDT scrub phase 803 * to just the auto-ditto class (for correctness); the rest 804 * of the scrub should go faster using top-down pruning. 805 */ 806 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 807 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 808 809 /* 810 * When starting a resilver clear any existing rebuild state. 811 * This is required to prevent stale rebuild status from 812 * being reported when a rebuild is run, then a resilver and 813 * finally a scrub. In which case only the scrub status 814 * should be reported by 'zpool status'. 815 */ 816 if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) { 817 vdev_t *rvd = spa->spa_root_vdev; 818 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 819 vdev_t *vd = rvd->vdev_child[i]; 820 vdev_rebuild_clear_sync( 821 (void *)(uintptr_t)vd->vdev_id, tx); 822 } 823 } 824 } 825 826 /* back to the generic stuff */ 827 828 if (zfs_scan_blkstats) { 829 if (dp->dp_blkstats == NULL) { 830 dp->dp_blkstats = 831 vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 832 } 833 memset(&dp->dp_blkstats->zab_type, 0, 834 sizeof (dp->dp_blkstats->zab_type)); 835 } else { 836 if (dp->dp_blkstats) { 837 vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 838 dp->dp_blkstats = NULL; 839 } 840 } 841 842 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 843 ot = DMU_OT_ZAP_OTHER; 844 845 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 846 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 847 848 memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); 849 850 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 851 852 spa_history_log_internal(spa, "scan setup", tx, 853 "func=%u mintxg=%llu maxtxg=%llu", 854 *funcp, (u_longlong_t)scn->scn_phys.scn_min_txg, 855 (u_longlong_t)scn->scn_phys.scn_max_txg); 856 } 857 858 /* 859 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver. 860 * Can also be called to resume a paused scrub. 861 */ 862 int 863 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 864 { 865 spa_t *spa = dp->dp_spa; 866 dsl_scan_t *scn = dp->dp_scan; 867 868 /* 869 * Purge all vdev caches and probe all devices. We do this here 870 * rather than in sync context because this requires a writer lock 871 * on the spa_config lock, which we can't do from sync context. The 872 * spa_scrub_reopen flag indicates that vdev_open() should not 873 * attempt to start another scrub. 874 */ 875 spa_vdev_state_enter(spa, SCL_NONE); 876 spa->spa_scrub_reopen = B_TRUE; 877 vdev_reopen(spa->spa_root_vdev); 878 spa->spa_scrub_reopen = B_FALSE; 879 (void) spa_vdev_state_exit(spa, NULL, 0); 880 881 if (func == POOL_SCAN_RESILVER) { 882 dsl_scan_restart_resilver(spa->spa_dsl_pool, 0); 883 return (0); 884 } 885 886 if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { 887 /* got scrub start cmd, resume paused scrub */ 888 int err = dsl_scrub_set_pause_resume(scn->scn_dp, 889 POOL_SCRUB_NORMAL); 890 if (err == 0) { 891 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); 892 return (SET_ERROR(ECANCELED)); 893 } 894 895 return (SET_ERROR(err)); 896 } 897 898 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 899 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); 900 } 901 902 static void 903 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 904 { 905 static const char *old_names[] = { 906 "scrub_bookmark", 907 "scrub_ddt_bookmark", 908 "scrub_ddt_class_max", 909 "scrub_queue", 910 "scrub_min_txg", 911 "scrub_max_txg", 912 "scrub_func", 913 "scrub_errors", 914 NULL 915 }; 916 917 dsl_pool_t *dp = scn->scn_dp; 918 spa_t *spa = dp->dp_spa; 919 int i; 920 921 /* Remove any remnants of an old-style scrub. */ 922 for (i = 0; old_names[i]; i++) { 923 (void) zap_remove(dp->dp_meta_objset, 924 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 925 } 926 927 if (scn->scn_phys.scn_queue_obj != 0) { 928 VERIFY0(dmu_object_free(dp->dp_meta_objset, 929 scn->scn_phys.scn_queue_obj, tx)); 930 scn->scn_phys.scn_queue_obj = 0; 931 } 932 scan_ds_queue_clear(scn); 933 scan_ds_prefetch_queue_clear(scn); 934 935 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 936 937 /* 938 * If we were "restarted" from a stopped state, don't bother 939 * with anything else. 940 */ 941 if (!dsl_scan_is_running(scn)) { 942 ASSERT(!scn->scn_is_sorted); 943 return; 944 } 945 946 if (scn->scn_is_sorted) { 947 scan_io_queues_destroy(scn); 948 scn->scn_is_sorted = B_FALSE; 949 950 if (scn->scn_taskq != NULL) { 951 taskq_destroy(scn->scn_taskq); 952 scn->scn_taskq = NULL; 953 } 954 } 955 956 scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; 957 958 spa_notify_waiters(spa); 959 960 if (dsl_scan_restarting(scn, tx)) 961 spa_history_log_internal(spa, "scan aborted, restarting", tx, 962 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 963 else if (!complete) 964 spa_history_log_internal(spa, "scan cancelled", tx, 965 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 966 else 967 spa_history_log_internal(spa, "scan done", tx, 968 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 969 970 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 971 spa->spa_scrub_active = B_FALSE; 972 973 /* 974 * If the scrub/resilver completed, update all DTLs to 975 * reflect this. Whether it succeeded or not, vacate 976 * all temporary scrub DTLs. 977 * 978 * As the scrub does not currently support traversing 979 * data that have been freed but are part of a checkpoint, 980 * we don't mark the scrub as done in the DTLs as faults 981 * may still exist in those vdevs. 982 */ 983 if (complete && 984 !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 985 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 986 scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE); 987 988 if (scn->scn_phys.scn_min_txg) { 989 nvlist_t *aux = fnvlist_alloc(); 990 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, 991 "healing"); 992 spa_event_notify(spa, NULL, aux, 993 ESC_ZFS_RESILVER_FINISH); 994 nvlist_free(aux); 995 } else { 996 spa_event_notify(spa, NULL, NULL, 997 ESC_ZFS_SCRUB_FINISH); 998 } 999 } else { 1000 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 1001 0, B_TRUE, B_FALSE); 1002 } 1003 spa_errlog_rotate(spa); 1004 1005 /* 1006 * Don't clear flag until after vdev_dtl_reassess to ensure that 1007 * DTL_MISSING will get updated when possible. 1008 */ 1009 spa->spa_scrub_started = B_FALSE; 1010 1011 /* 1012 * We may have finished replacing a device. 1013 * Let the async thread assess this and handle the detach. 1014 */ 1015 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 1016 1017 /* 1018 * Clear any resilver_deferred flags in the config. 1019 * If there are drives that need resilvering, kick 1020 * off an asynchronous request to start resilver. 1021 * vdev_clear_resilver_deferred() may update the config 1022 * before the resilver can restart. In the event of 1023 * a crash during this period, the spa loading code 1024 * will find the drives that need to be resilvered 1025 * and start the resilver then. 1026 */ 1027 if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) && 1028 vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) { 1029 spa_history_log_internal(spa, 1030 "starting deferred resilver", tx, "errors=%llu", 1031 (u_longlong_t)spa_approx_errlog_size(spa)); 1032 spa_async_request(spa, SPA_ASYNC_RESILVER); 1033 } 1034 1035 /* Clear recent error events (i.e. duplicate events tracking) */ 1036 if (complete) 1037 zfs_ereport_clear(spa, NULL); 1038 } 1039 1040 scn->scn_phys.scn_end_time = gethrestime_sec(); 1041 1042 if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) 1043 spa->spa_errata = 0; 1044 1045 ASSERT(!dsl_scan_is_running(scn)); 1046 } 1047 1048 static int 1049 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 1050 { 1051 (void) arg; 1052 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1053 1054 if (!dsl_scan_is_running(scn)) 1055 return (SET_ERROR(ENOENT)); 1056 return (0); 1057 } 1058 1059 static void 1060 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 1061 { 1062 (void) arg; 1063 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1064 1065 dsl_scan_done(scn, B_FALSE, tx); 1066 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 1067 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); 1068 } 1069 1070 int 1071 dsl_scan_cancel(dsl_pool_t *dp) 1072 { 1073 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 1074 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 1075 } 1076 1077 static int 1078 dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) 1079 { 1080 pool_scrub_cmd_t *cmd = arg; 1081 dsl_pool_t *dp = dmu_tx_pool(tx); 1082 dsl_scan_t *scn = dp->dp_scan; 1083 1084 if (*cmd == POOL_SCRUB_PAUSE) { 1085 /* can't pause a scrub when there is no in-progress scrub */ 1086 if (!dsl_scan_scrubbing(dp)) 1087 return (SET_ERROR(ENOENT)); 1088 1089 /* can't pause a paused scrub */ 1090 if (dsl_scan_is_paused_scrub(scn)) 1091 return (SET_ERROR(EBUSY)); 1092 } else if (*cmd != POOL_SCRUB_NORMAL) { 1093 return (SET_ERROR(ENOTSUP)); 1094 } 1095 1096 return (0); 1097 } 1098 1099 static void 1100 dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) 1101 { 1102 pool_scrub_cmd_t *cmd = arg; 1103 dsl_pool_t *dp = dmu_tx_pool(tx); 1104 spa_t *spa = dp->dp_spa; 1105 dsl_scan_t *scn = dp->dp_scan; 1106 1107 if (*cmd == POOL_SCRUB_PAUSE) { 1108 /* can't pause a scrub when there is no in-progress scrub */ 1109 spa->spa_scan_pass_scrub_pause = gethrestime_sec(); 1110 scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; 1111 scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED; 1112 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1113 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); 1114 spa_notify_waiters(spa); 1115 } else { 1116 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); 1117 if (dsl_scan_is_paused_scrub(scn)) { 1118 /* 1119 * We need to keep track of how much time we spend 1120 * paused per pass so that we can adjust the scrub rate 1121 * shown in the output of 'zpool status' 1122 */ 1123 spa->spa_scan_pass_scrub_spent_paused += 1124 gethrestime_sec() - spa->spa_scan_pass_scrub_pause; 1125 spa->spa_scan_pass_scrub_pause = 0; 1126 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 1127 scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED; 1128 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1129 } 1130 } 1131 } 1132 1133 /* 1134 * Set scrub pause/resume state if it makes sense to do so 1135 */ 1136 int 1137 dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) 1138 { 1139 return (dsl_sync_task(spa_name(dp->dp_spa), 1140 dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, 1141 ZFS_SPACE_CHECK_RESERVED)); 1142 } 1143 1144 1145 /* start a new scan, or restart an existing one. */ 1146 void 1147 dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg) 1148 { 1149 if (txg == 0) { 1150 dmu_tx_t *tx; 1151 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1152 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1153 1154 txg = dmu_tx_get_txg(tx); 1155 dp->dp_scan->scn_restart_txg = txg; 1156 dmu_tx_commit(tx); 1157 } else { 1158 dp->dp_scan->scn_restart_txg = txg; 1159 } 1160 zfs_dbgmsg("restarting resilver for %s at txg=%llu", 1161 dp->dp_spa->spa_name, (longlong_t)txg); 1162 } 1163 1164 void 1165 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 1166 { 1167 zio_free(dp->dp_spa, txg, bp); 1168 } 1169 1170 void 1171 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 1172 { 1173 ASSERT(dsl_pool_sync_context(dp)); 1174 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); 1175 } 1176 1177 static int 1178 scan_ds_queue_compare(const void *a, const void *b) 1179 { 1180 const scan_ds_t *sds_a = a, *sds_b = b; 1181 1182 if (sds_a->sds_dsobj < sds_b->sds_dsobj) 1183 return (-1); 1184 if (sds_a->sds_dsobj == sds_b->sds_dsobj) 1185 return (0); 1186 return (1); 1187 } 1188 1189 static void 1190 scan_ds_queue_clear(dsl_scan_t *scn) 1191 { 1192 void *cookie = NULL; 1193 scan_ds_t *sds; 1194 while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { 1195 kmem_free(sds, sizeof (*sds)); 1196 } 1197 } 1198 1199 static boolean_t 1200 scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) 1201 { 1202 scan_ds_t srch, *sds; 1203 1204 srch.sds_dsobj = dsobj; 1205 sds = avl_find(&scn->scn_queue, &srch, NULL); 1206 if (sds != NULL && txg != NULL) 1207 *txg = sds->sds_txg; 1208 return (sds != NULL); 1209 } 1210 1211 static void 1212 scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) 1213 { 1214 scan_ds_t *sds; 1215 avl_index_t where; 1216 1217 sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); 1218 sds->sds_dsobj = dsobj; 1219 sds->sds_txg = txg; 1220 1221 VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); 1222 avl_insert(&scn->scn_queue, sds, where); 1223 } 1224 1225 static void 1226 scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) 1227 { 1228 scan_ds_t srch, *sds; 1229 1230 srch.sds_dsobj = dsobj; 1231 1232 sds = avl_find(&scn->scn_queue, &srch, NULL); 1233 VERIFY(sds != NULL); 1234 avl_remove(&scn->scn_queue, sds); 1235 kmem_free(sds, sizeof (*sds)); 1236 } 1237 1238 static void 1239 scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) 1240 { 1241 dsl_pool_t *dp = scn->scn_dp; 1242 spa_t *spa = dp->dp_spa; 1243 dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? 1244 DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; 1245 1246 ASSERT0(scn->scn_queues_pending); 1247 ASSERT(scn->scn_phys.scn_queue_obj != 0); 1248 1249 VERIFY0(dmu_object_free(dp->dp_meta_objset, 1250 scn->scn_phys.scn_queue_obj, tx)); 1251 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, 1252 DMU_OT_NONE, 0, tx); 1253 for (scan_ds_t *sds = avl_first(&scn->scn_queue); 1254 sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { 1255 VERIFY0(zap_add_int_key(dp->dp_meta_objset, 1256 scn->scn_phys.scn_queue_obj, sds->sds_dsobj, 1257 sds->sds_txg, tx)); 1258 } 1259 } 1260 1261 /* 1262 * Computes the memory limit state that we're currently in. A sorted scan 1263 * needs quite a bit of memory to hold the sorting queue, so we need to 1264 * reasonably constrain the size so it doesn't impact overall system 1265 * performance. We compute two limits: 1266 * 1) Hard memory limit: if the amount of memory used by the sorting 1267 * queues on a pool gets above this value, we stop the metadata 1268 * scanning portion and start issuing the queued up and sorted 1269 * I/Os to reduce memory usage. 1270 * This limit is calculated as a fraction of physmem (by default 5%). 1271 * We constrain the lower bound of the hard limit to an absolute 1272 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain 1273 * the upper bound to 5% of the total pool size - no chance we'll 1274 * ever need that much memory, but just to keep the value in check. 1275 * 2) Soft memory limit: once we hit the hard memory limit, we start 1276 * issuing I/O to reduce queue memory usage, but we don't want to 1277 * completely empty out the queues, since we might be able to find I/Os 1278 * that will fill in the gaps of our non-sequential IOs at some point 1279 * in the future. So we stop the issuing of I/Os once the amount of 1280 * memory used drops below the soft limit (at which point we stop issuing 1281 * I/O and start scanning metadata again). 1282 * 1283 * This limit is calculated by subtracting a fraction of the hard 1284 * limit from the hard limit. By default this fraction is 5%, so 1285 * the soft limit is 95% of the hard limit. We cap the size of the 1286 * difference between the hard and soft limits at an absolute 1287 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is 1288 * sufficient to not cause too frequent switching between the 1289 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's 1290 * worth of queues is about 1.2 GiB of on-pool data, so scanning 1291 * that should take at least a decent fraction of a second). 1292 */ 1293 static boolean_t 1294 dsl_scan_should_clear(dsl_scan_t *scn) 1295 { 1296 spa_t *spa = scn->scn_dp->dp_spa; 1297 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 1298 uint64_t alloc, mlim_hard, mlim_soft, mused; 1299 1300 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 1301 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 1302 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 1303 1304 mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, 1305 zfs_scan_mem_lim_min); 1306 mlim_hard = MIN(mlim_hard, alloc / 20); 1307 mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, 1308 zfs_scan_mem_lim_soft_max); 1309 mused = 0; 1310 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 1311 vdev_t *tvd = rvd->vdev_child[i]; 1312 dsl_scan_io_queue_t *queue; 1313 1314 mutex_enter(&tvd->vdev_scan_io_queue_lock); 1315 queue = tvd->vdev_scan_io_queue; 1316 if (queue != NULL) { 1317 /* 1318 * # of extents in exts_by_addr = # in exts_by_size. 1319 * B-tree efficiency is ~75%, but can be as low as 50%. 1320 */ 1321 mused += zfs_btree_numnodes(&queue->q_exts_by_size) * 1322 ((sizeof (range_seg_gap_t) + sizeof (uint64_t)) * 1323 3 / 2) + queue->q_sio_memused; 1324 } 1325 mutex_exit(&tvd->vdev_scan_io_queue_lock); 1326 } 1327 1328 dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); 1329 1330 if (mused == 0) 1331 ASSERT0(scn->scn_queues_pending); 1332 1333 /* 1334 * If we are above our hard limit, we need to clear out memory. 1335 * If we are below our soft limit, we need to accumulate sequential IOs. 1336 * Otherwise, we should keep doing whatever we are currently doing. 1337 */ 1338 if (mused >= mlim_hard) 1339 return (B_TRUE); 1340 else if (mused < mlim_soft) 1341 return (B_FALSE); 1342 else 1343 return (scn->scn_clearing); 1344 } 1345 1346 static boolean_t 1347 dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) 1348 { 1349 /* we never skip user/group accounting objects */ 1350 if (zb && (int64_t)zb->zb_object < 0) 1351 return (B_FALSE); 1352 1353 if (scn->scn_suspending) 1354 return (B_TRUE); /* we're already suspending */ 1355 1356 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 1357 return (B_FALSE); /* we're resuming */ 1358 1359 /* We only know how to resume from level-0 and objset blocks. */ 1360 if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL)) 1361 return (B_FALSE); 1362 1363 /* 1364 * We suspend if: 1365 * - we have scanned for at least the minimum time (default 1 sec 1366 * for scrub, 3 sec for resilver), and either we have sufficient 1367 * dirty data that we are starting to write more quickly 1368 * (default 30%), someone is explicitly waiting for this txg 1369 * to complete, or we have used up all of the time in the txg 1370 * timeout (default 5 sec). 1371 * or 1372 * - the spa is shutting down because this pool is being exported 1373 * or the machine is rebooting. 1374 * or 1375 * - the scan queue has reached its memory use limit 1376 */ 1377 uint64_t curr_time_ns = gethrtime(); 1378 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 1379 uint64_t sync_time_ns = curr_time_ns - 1380 scn->scn_dp->dp_spa->spa_sync_starttime; 1381 uint64_t dirty_min_bytes = zfs_dirty_data_max * 1382 zfs_vdev_async_write_active_min_dirty_percent / 100; 1383 uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 1384 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 1385 1386 if ((NSEC2MSEC(scan_time_ns) > mintime && 1387 (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || 1388 txg_sync_waiting(scn->scn_dp) || 1389 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 1390 spa_shutting_down(scn->scn_dp->dp_spa) || 1391 (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { 1392 if (zb && zb->zb_level == ZB_ROOT_LEVEL) { 1393 dprintf("suspending at first available bookmark " 1394 "%llx/%llx/%llx/%llx\n", 1395 (longlong_t)zb->zb_objset, 1396 (longlong_t)zb->zb_object, 1397 (longlong_t)zb->zb_level, 1398 (longlong_t)zb->zb_blkid); 1399 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 1400 zb->zb_objset, 0, 0, 0); 1401 } else if (zb != NULL) { 1402 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", 1403 (longlong_t)zb->zb_objset, 1404 (longlong_t)zb->zb_object, 1405 (longlong_t)zb->zb_level, 1406 (longlong_t)zb->zb_blkid); 1407 scn->scn_phys.scn_bookmark = *zb; 1408 } else { 1409 #ifdef ZFS_DEBUG 1410 dsl_scan_phys_t *scnp = &scn->scn_phys; 1411 dprintf("suspending at at DDT bookmark " 1412 "%llx/%llx/%llx/%llx\n", 1413 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 1414 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 1415 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 1416 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 1417 #endif 1418 } 1419 scn->scn_suspending = B_TRUE; 1420 return (B_TRUE); 1421 } 1422 return (B_FALSE); 1423 } 1424 1425 typedef struct zil_scan_arg { 1426 dsl_pool_t *zsa_dp; 1427 zil_header_t *zsa_zh; 1428 } zil_scan_arg_t; 1429 1430 static int 1431 dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, 1432 uint64_t claim_txg) 1433 { 1434 (void) zilog; 1435 zil_scan_arg_t *zsa = arg; 1436 dsl_pool_t *dp = zsa->zsa_dp; 1437 dsl_scan_t *scn = dp->dp_scan; 1438 zil_header_t *zh = zsa->zsa_zh; 1439 zbookmark_phys_t zb; 1440 1441 ASSERT(!BP_IS_REDACTED(bp)); 1442 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1443 return (0); 1444 1445 /* 1446 * One block ("stubby") can be allocated a long time ago; we 1447 * want to visit that one because it has been allocated 1448 * (on-disk) even if it hasn't been claimed (even though for 1449 * scrub there's nothing to do to it). 1450 */ 1451 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) 1452 return (0); 1453 1454 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1455 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 1456 1457 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1458 return (0); 1459 } 1460 1461 static int 1462 dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, 1463 uint64_t claim_txg) 1464 { 1465 (void) zilog; 1466 if (lrc->lrc_txtype == TX_WRITE) { 1467 zil_scan_arg_t *zsa = arg; 1468 dsl_pool_t *dp = zsa->zsa_dp; 1469 dsl_scan_t *scn = dp->dp_scan; 1470 zil_header_t *zh = zsa->zsa_zh; 1471 const lr_write_t *lr = (const lr_write_t *)lrc; 1472 const blkptr_t *bp = &lr->lr_blkptr; 1473 zbookmark_phys_t zb; 1474 1475 ASSERT(!BP_IS_REDACTED(bp)); 1476 if (BP_IS_HOLE(bp) || 1477 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1478 return (0); 1479 1480 /* 1481 * birth can be < claim_txg if this record's txg is 1482 * already txg sync'ed (but this log block contains 1483 * other records that are not synced) 1484 */ 1485 if (claim_txg == 0 || bp->blk_birth < claim_txg) 1486 return (0); 1487 1488 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 1489 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1490 lr->lr_foid, ZB_ZIL_LEVEL, 1491 lr->lr_offset / BP_GET_LSIZE(bp)); 1492 1493 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1494 } 1495 return (0); 1496 } 1497 1498 static void 1499 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 1500 { 1501 uint64_t claim_txg = zh->zh_claim_txg; 1502 zil_scan_arg_t zsa = { dp, zh }; 1503 zilog_t *zilog; 1504 1505 ASSERT(spa_writeable(dp->dp_spa)); 1506 1507 /* 1508 * We only want to visit blocks that have been claimed but not yet 1509 * replayed (or, in read-only mode, blocks that *would* be claimed). 1510 */ 1511 if (claim_txg == 0) 1512 return; 1513 1514 zilog = zil_alloc(dp->dp_meta_objset, zh); 1515 1516 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 1517 claim_txg, B_FALSE); 1518 1519 zil_free(zilog); 1520 } 1521 1522 /* 1523 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea 1524 * here is to sort the AVL tree by the order each block will be needed. 1525 */ 1526 static int 1527 scan_prefetch_queue_compare(const void *a, const void *b) 1528 { 1529 const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; 1530 const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; 1531 const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; 1532 1533 return (zbookmark_compare(spc_a->spc_datablkszsec, 1534 spc_a->spc_indblkshift, spc_b->spc_datablkszsec, 1535 spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); 1536 } 1537 1538 static void 1539 scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag) 1540 { 1541 if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { 1542 zfs_refcount_destroy(&spc->spc_refcnt); 1543 kmem_free(spc, sizeof (scan_prefetch_ctx_t)); 1544 } 1545 } 1546 1547 static scan_prefetch_ctx_t * 1548 scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag) 1549 { 1550 scan_prefetch_ctx_t *spc; 1551 1552 spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); 1553 zfs_refcount_create(&spc->spc_refcnt); 1554 zfs_refcount_add(&spc->spc_refcnt, tag); 1555 spc->spc_scn = scn; 1556 if (dnp != NULL) { 1557 spc->spc_datablkszsec = dnp->dn_datablkszsec; 1558 spc->spc_indblkshift = dnp->dn_indblkshift; 1559 spc->spc_root = B_FALSE; 1560 } else { 1561 spc->spc_datablkszsec = 0; 1562 spc->spc_indblkshift = 0; 1563 spc->spc_root = B_TRUE; 1564 } 1565 1566 return (spc); 1567 } 1568 1569 static void 1570 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag) 1571 { 1572 zfs_refcount_add(&spc->spc_refcnt, tag); 1573 } 1574 1575 static void 1576 scan_ds_prefetch_queue_clear(dsl_scan_t *scn) 1577 { 1578 spa_t *spa = scn->scn_dp->dp_spa; 1579 void *cookie = NULL; 1580 scan_prefetch_issue_ctx_t *spic = NULL; 1581 1582 mutex_enter(&spa->spa_scrub_lock); 1583 while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue, 1584 &cookie)) != NULL) { 1585 scan_prefetch_ctx_rele(spic->spic_spc, scn); 1586 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1587 } 1588 mutex_exit(&spa->spa_scrub_lock); 1589 } 1590 1591 static boolean_t 1592 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, 1593 const zbookmark_phys_t *zb) 1594 { 1595 zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; 1596 dnode_phys_t tmp_dnp; 1597 dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; 1598 1599 if (zb->zb_objset != last_zb->zb_objset) 1600 return (B_TRUE); 1601 if ((int64_t)zb->zb_object < 0) 1602 return (B_FALSE); 1603 1604 tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; 1605 tmp_dnp.dn_indblkshift = spc->spc_indblkshift; 1606 1607 if (zbookmark_subtree_completed(dnp, zb, last_zb)) 1608 return (B_TRUE); 1609 1610 return (B_FALSE); 1611 } 1612 1613 static void 1614 dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) 1615 { 1616 avl_index_t idx; 1617 dsl_scan_t *scn = spc->spc_scn; 1618 spa_t *spa = scn->scn_dp->dp_spa; 1619 scan_prefetch_issue_ctx_t *spic; 1620 1621 if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp)) 1622 return; 1623 1624 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || 1625 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && 1626 BP_GET_TYPE(bp) != DMU_OT_OBJSET)) 1627 return; 1628 1629 if (dsl_scan_check_prefetch_resume(spc, zb)) 1630 return; 1631 1632 scan_prefetch_ctx_add_ref(spc, scn); 1633 spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); 1634 spic->spic_spc = spc; 1635 spic->spic_bp = *bp; 1636 spic->spic_zb = *zb; 1637 1638 /* 1639 * Add the IO to the queue of blocks to prefetch. This allows us to 1640 * prioritize blocks that we will need first for the main traversal 1641 * thread. 1642 */ 1643 mutex_enter(&spa->spa_scrub_lock); 1644 if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { 1645 /* this block is already queued for prefetch */ 1646 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1647 scan_prefetch_ctx_rele(spc, scn); 1648 mutex_exit(&spa->spa_scrub_lock); 1649 return; 1650 } 1651 1652 avl_insert(&scn->scn_prefetch_queue, spic, idx); 1653 cv_broadcast(&spa->spa_scrub_io_cv); 1654 mutex_exit(&spa->spa_scrub_lock); 1655 } 1656 1657 static void 1658 dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, 1659 uint64_t objset, uint64_t object) 1660 { 1661 int i; 1662 zbookmark_phys_t zb; 1663 scan_prefetch_ctx_t *spc; 1664 1665 if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1666 return; 1667 1668 SET_BOOKMARK(&zb, objset, object, 0, 0); 1669 1670 spc = scan_prefetch_ctx_create(scn, dnp, FTAG); 1671 1672 for (i = 0; i < dnp->dn_nblkptr; i++) { 1673 zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); 1674 zb.zb_blkid = i; 1675 dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); 1676 } 1677 1678 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1679 zb.zb_level = 0; 1680 zb.zb_blkid = DMU_SPILL_BLKID; 1681 dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb); 1682 } 1683 1684 scan_prefetch_ctx_rele(spc, FTAG); 1685 } 1686 1687 static void 1688 dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1689 arc_buf_t *buf, void *private) 1690 { 1691 (void) zio; 1692 scan_prefetch_ctx_t *spc = private; 1693 dsl_scan_t *scn = spc->spc_scn; 1694 spa_t *spa = scn->scn_dp->dp_spa; 1695 1696 /* broadcast that the IO has completed for rate limiting purposes */ 1697 mutex_enter(&spa->spa_scrub_lock); 1698 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 1699 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 1700 cv_broadcast(&spa->spa_scrub_io_cv); 1701 mutex_exit(&spa->spa_scrub_lock); 1702 1703 /* if there was an error or we are done prefetching, just cleanup */ 1704 if (buf == NULL || scn->scn_prefetch_stop) 1705 goto out; 1706 1707 if (BP_GET_LEVEL(bp) > 0) { 1708 int i; 1709 blkptr_t *cbp; 1710 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1711 zbookmark_phys_t czb; 1712 1713 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1714 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1715 zb->zb_level - 1, zb->zb_blkid * epb + i); 1716 dsl_scan_prefetch(spc, cbp, &czb); 1717 } 1718 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1719 dnode_phys_t *cdnp; 1720 int i; 1721 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1722 1723 for (i = 0, cdnp = buf->b_data; i < epb; 1724 i += cdnp->dn_extra_slots + 1, 1725 cdnp += cdnp->dn_extra_slots + 1) { 1726 dsl_scan_prefetch_dnode(scn, cdnp, 1727 zb->zb_objset, zb->zb_blkid * epb + i); 1728 } 1729 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1730 objset_phys_t *osp = buf->b_data; 1731 1732 dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, 1733 zb->zb_objset, DMU_META_DNODE_OBJECT); 1734 1735 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1736 dsl_scan_prefetch_dnode(scn, 1737 &osp->os_groupused_dnode, zb->zb_objset, 1738 DMU_GROUPUSED_OBJECT); 1739 dsl_scan_prefetch_dnode(scn, 1740 &osp->os_userused_dnode, zb->zb_objset, 1741 DMU_USERUSED_OBJECT); 1742 } 1743 } 1744 1745 out: 1746 if (buf != NULL) 1747 arc_buf_destroy(buf, private); 1748 scan_prefetch_ctx_rele(spc, scn); 1749 } 1750 1751 static void 1752 dsl_scan_prefetch_thread(void *arg) 1753 { 1754 dsl_scan_t *scn = arg; 1755 spa_t *spa = scn->scn_dp->dp_spa; 1756 scan_prefetch_issue_ctx_t *spic; 1757 1758 /* loop until we are told to stop */ 1759 while (!scn->scn_prefetch_stop) { 1760 arc_flags_t flags = ARC_FLAG_NOWAIT | 1761 ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; 1762 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1763 1764 mutex_enter(&spa->spa_scrub_lock); 1765 1766 /* 1767 * Wait until we have an IO to issue and are not above our 1768 * maximum in flight limit. 1769 */ 1770 while (!scn->scn_prefetch_stop && 1771 (avl_numnodes(&scn->scn_prefetch_queue) == 0 || 1772 spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { 1773 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1774 } 1775 1776 /* recheck if we should stop since we waited for the cv */ 1777 if (scn->scn_prefetch_stop) { 1778 mutex_exit(&spa->spa_scrub_lock); 1779 break; 1780 } 1781 1782 /* remove the prefetch IO from the tree */ 1783 spic = avl_first(&scn->scn_prefetch_queue); 1784 spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); 1785 avl_remove(&scn->scn_prefetch_queue, spic); 1786 1787 mutex_exit(&spa->spa_scrub_lock); 1788 1789 if (BP_IS_PROTECTED(&spic->spic_bp)) { 1790 ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE || 1791 BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET); 1792 ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0); 1793 zio_flags |= ZIO_FLAG_RAW; 1794 } 1795 1796 /* issue the prefetch asynchronously */ 1797 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, 1798 &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc, 1799 ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb); 1800 1801 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1802 } 1803 1804 ASSERT(scn->scn_prefetch_stop); 1805 1806 /* free any prefetches we didn't get to complete */ 1807 mutex_enter(&spa->spa_scrub_lock); 1808 while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { 1809 avl_remove(&scn->scn_prefetch_queue, spic); 1810 scan_prefetch_ctx_rele(spic->spic_spc, scn); 1811 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1812 } 1813 ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); 1814 mutex_exit(&spa->spa_scrub_lock); 1815 } 1816 1817 static boolean_t 1818 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 1819 const zbookmark_phys_t *zb) 1820 { 1821 /* 1822 * We never skip over user/group accounting objects (obj<0) 1823 */ 1824 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 1825 (int64_t)zb->zb_object >= 0) { 1826 /* 1827 * If we already visited this bp & everything below (in 1828 * a prior txg sync), don't bother doing it again. 1829 */ 1830 if (zbookmark_subtree_completed(dnp, zb, 1831 &scn->scn_phys.scn_bookmark)) 1832 return (B_TRUE); 1833 1834 /* 1835 * If we found the block we're trying to resume from, or 1836 * we went past it, zero it out to indicate that it's OK 1837 * to start checking for suspending again. 1838 */ 1839 if (zbookmark_subtree_tbd(dnp, zb, 1840 &scn->scn_phys.scn_bookmark)) { 1841 dprintf("resuming at %llx/%llx/%llx/%llx\n", 1842 (longlong_t)zb->zb_objset, 1843 (longlong_t)zb->zb_object, 1844 (longlong_t)zb->zb_level, 1845 (longlong_t)zb->zb_blkid); 1846 memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb)); 1847 } 1848 } 1849 return (B_FALSE); 1850 } 1851 1852 static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 1853 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 1854 dmu_objset_type_t ostype, dmu_tx_t *tx); 1855 inline __attribute__((always_inline)) static void dsl_scan_visitdnode( 1856 dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1857 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 1858 1859 /* 1860 * Return nonzero on i/o error. 1861 * Return new buf to write out in *bufp. 1862 */ 1863 inline __attribute__((always_inline)) static int 1864 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1865 dnode_phys_t *dnp, const blkptr_t *bp, 1866 const zbookmark_phys_t *zb, dmu_tx_t *tx) 1867 { 1868 dsl_pool_t *dp = scn->scn_dp; 1869 spa_t *spa = dp->dp_spa; 1870 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1871 int err; 1872 1873 ASSERT(!BP_IS_REDACTED(bp)); 1874 1875 /* 1876 * There is an unlikely case of encountering dnodes with contradicting 1877 * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created 1878 * or modified before commit 4254acb was merged. As it is not possible 1879 * to know which of the two is correct, report an error. 1880 */ 1881 if (dnp != NULL && 1882 dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) { 1883 scn->scn_phys.scn_errors++; 1884 spa_log_error(spa, zb, &bp->blk_birth); 1885 return (SET_ERROR(EINVAL)); 1886 } 1887 1888 if (BP_GET_LEVEL(bp) > 0) { 1889 arc_flags_t flags = ARC_FLAG_WAIT; 1890 int i; 1891 blkptr_t *cbp; 1892 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1893 arc_buf_t *buf; 1894 1895 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1896 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1897 if (err) { 1898 scn->scn_phys.scn_errors++; 1899 return (err); 1900 } 1901 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1902 zbookmark_phys_t czb; 1903 1904 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1905 zb->zb_level - 1, 1906 zb->zb_blkid * epb + i); 1907 dsl_scan_visitbp(cbp, &czb, dnp, 1908 ds, scn, ostype, tx); 1909 } 1910 arc_buf_destroy(buf, &buf); 1911 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1912 arc_flags_t flags = ARC_FLAG_WAIT; 1913 dnode_phys_t *cdnp; 1914 int i; 1915 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1916 arc_buf_t *buf; 1917 1918 if (BP_IS_PROTECTED(bp)) { 1919 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 1920 zio_flags |= ZIO_FLAG_RAW; 1921 } 1922 1923 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1924 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1925 if (err) { 1926 scn->scn_phys.scn_errors++; 1927 return (err); 1928 } 1929 for (i = 0, cdnp = buf->b_data; i < epb; 1930 i += cdnp->dn_extra_slots + 1, 1931 cdnp += cdnp->dn_extra_slots + 1) { 1932 dsl_scan_visitdnode(scn, ds, ostype, 1933 cdnp, zb->zb_blkid * epb + i, tx); 1934 } 1935 1936 arc_buf_destroy(buf, &buf); 1937 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1938 arc_flags_t flags = ARC_FLAG_WAIT; 1939 objset_phys_t *osp; 1940 arc_buf_t *buf; 1941 1942 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 1943 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1944 if (err) { 1945 scn->scn_phys.scn_errors++; 1946 return (err); 1947 } 1948 1949 osp = buf->b_data; 1950 1951 dsl_scan_visitdnode(scn, ds, osp->os_type, 1952 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 1953 1954 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1955 /* 1956 * We also always visit user/group/project accounting 1957 * objects, and never skip them, even if we are 1958 * suspending. This is necessary so that the 1959 * space deltas from this txg get integrated. 1960 */ 1961 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 1962 dsl_scan_visitdnode(scn, ds, osp->os_type, 1963 &osp->os_projectused_dnode, 1964 DMU_PROJECTUSED_OBJECT, tx); 1965 dsl_scan_visitdnode(scn, ds, osp->os_type, 1966 &osp->os_groupused_dnode, 1967 DMU_GROUPUSED_OBJECT, tx); 1968 dsl_scan_visitdnode(scn, ds, osp->os_type, 1969 &osp->os_userused_dnode, 1970 DMU_USERUSED_OBJECT, tx); 1971 } 1972 arc_buf_destroy(buf, &buf); 1973 } else if (!zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_LOG)) { 1974 /* 1975 * Sanity check the block pointer contents, this is handled 1976 * by arc_read() for the cases above. 1977 */ 1978 scn->scn_phys.scn_errors++; 1979 spa_log_error(spa, zb, &bp->blk_birth); 1980 return (SET_ERROR(EINVAL)); 1981 } 1982 1983 return (0); 1984 } 1985 1986 inline __attribute__((always_inline)) static void 1987 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 1988 dmu_objset_type_t ostype, dnode_phys_t *dnp, 1989 uint64_t object, dmu_tx_t *tx) 1990 { 1991 int j; 1992 1993 for (j = 0; j < dnp->dn_nblkptr; j++) { 1994 zbookmark_phys_t czb; 1995 1996 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 1997 dnp->dn_nlevels - 1, j); 1998 dsl_scan_visitbp(&dnp->dn_blkptr[j], 1999 &czb, dnp, ds, scn, ostype, tx); 2000 } 2001 2002 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 2003 zbookmark_phys_t czb; 2004 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 2005 0, DMU_SPILL_BLKID); 2006 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), 2007 &czb, dnp, ds, scn, ostype, tx); 2008 } 2009 } 2010 2011 /* 2012 * The arguments are in this order because mdb can only print the 2013 * first 5; we want them to be useful. 2014 */ 2015 static void 2016 dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 2017 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 2018 dmu_objset_type_t ostype, dmu_tx_t *tx) 2019 { 2020 dsl_pool_t *dp = scn->scn_dp; 2021 blkptr_t *bp_toread = NULL; 2022 2023 if (dsl_scan_check_suspend(scn, zb)) 2024 return; 2025 2026 if (dsl_scan_check_resume(scn, dnp, zb)) 2027 return; 2028 2029 scn->scn_visited_this_txg++; 2030 2031 if (BP_IS_HOLE(bp)) { 2032 scn->scn_holes_this_txg++; 2033 return; 2034 } 2035 2036 if (BP_IS_REDACTED(bp)) { 2037 ASSERT(dsl_dataset_feature_is_active(ds, 2038 SPA_FEATURE_REDACTED_DATASETS)); 2039 return; 2040 } 2041 2042 /* 2043 * Check if this block contradicts any filesystem flags. 2044 */ 2045 spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS; 2046 if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) 2047 ASSERT(dsl_dataset_feature_is_active(ds, f)); 2048 2049 f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp)); 2050 if (f != SPA_FEATURE_NONE) 2051 ASSERT(dsl_dataset_feature_is_active(ds, f)); 2052 2053 f = zio_compress_to_feature(BP_GET_COMPRESS(bp)); 2054 if (f != SPA_FEATURE_NONE) 2055 ASSERT(dsl_dataset_feature_is_active(ds, f)); 2056 2057 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { 2058 scn->scn_lt_min_this_txg++; 2059 return; 2060 } 2061 2062 bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 2063 *bp_toread = *bp; 2064 2065 if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) 2066 goto out; 2067 2068 /* 2069 * If dsl_scan_ddt() has already visited this block, it will have 2070 * already done any translations or scrubbing, so don't call the 2071 * callback again. 2072 */ 2073 if (ddt_class_contains(dp->dp_spa, 2074 scn->scn_phys.scn_ddt_class_max, bp)) { 2075 scn->scn_ddt_contained_this_txg++; 2076 goto out; 2077 } 2078 2079 /* 2080 * If this block is from the future (after cur_max_txg), then we 2081 * are doing this on behalf of a deleted snapshot, and we will 2082 * revisit the future block on the next pass of this dataset. 2083 * Don't scan it now unless we need to because something 2084 * under it was modified. 2085 */ 2086 if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { 2087 scn->scn_gt_max_this_txg++; 2088 goto out; 2089 } 2090 2091 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 2092 2093 out: 2094 kmem_free(bp_toread, sizeof (blkptr_t)); 2095 } 2096 2097 static void 2098 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 2099 dmu_tx_t *tx) 2100 { 2101 zbookmark_phys_t zb; 2102 scan_prefetch_ctx_t *spc; 2103 2104 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 2105 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 2106 2107 if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { 2108 SET_BOOKMARK(&scn->scn_prefetch_bookmark, 2109 zb.zb_objset, 0, 0, 0); 2110 } else { 2111 scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; 2112 } 2113 2114 scn->scn_objsets_visited_this_txg++; 2115 2116 spc = scan_prefetch_ctx_create(scn, NULL, FTAG); 2117 dsl_scan_prefetch(spc, bp, &zb); 2118 scan_prefetch_ctx_rele(spc, FTAG); 2119 2120 dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); 2121 2122 dprintf_ds(ds, "finished scan%s", ""); 2123 } 2124 2125 static void 2126 ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) 2127 { 2128 if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { 2129 if (ds->ds_is_snapshot) { 2130 /* 2131 * Note: 2132 * - scn_cur_{min,max}_txg stays the same. 2133 * - Setting the flag is not really necessary if 2134 * scn_cur_max_txg == scn_max_txg, because there 2135 * is nothing after this snapshot that we care 2136 * about. However, we set it anyway and then 2137 * ignore it when we retraverse it in 2138 * dsl_scan_visitds(). 2139 */ 2140 scn_phys->scn_bookmark.zb_objset = 2141 dsl_dataset_phys(ds)->ds_next_snap_obj; 2142 zfs_dbgmsg("destroying ds %llu on %s; currently " 2143 "traversing; reset zb_objset to %llu", 2144 (u_longlong_t)ds->ds_object, 2145 ds->ds_dir->dd_pool->dp_spa->spa_name, 2146 (u_longlong_t)dsl_dataset_phys(ds)-> 2147 ds_next_snap_obj); 2148 scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; 2149 } else { 2150 SET_BOOKMARK(&scn_phys->scn_bookmark, 2151 ZB_DESTROYED_OBJSET, 0, 0, 0); 2152 zfs_dbgmsg("destroying ds %llu on %s; currently " 2153 "traversing; reset bookmark to -1,0,0,0", 2154 (u_longlong_t)ds->ds_object, 2155 ds->ds_dir->dd_pool->dp_spa->spa_name); 2156 } 2157 } 2158 } 2159 2160 /* 2161 * Invoked when a dataset is destroyed. We need to make sure that: 2162 * 2163 * 1) If it is the dataset that was currently being scanned, we write 2164 * a new dsl_scan_phys_t and marking the objset reference in it 2165 * as destroyed. 2166 * 2) Remove it from the work queue, if it was present. 2167 * 2168 * If the dataset was actually a snapshot, instead of marking the dataset 2169 * as destroyed, we instead substitute the next snapshot in line. 2170 */ 2171 void 2172 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 2173 { 2174 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2175 dsl_scan_t *scn = dp->dp_scan; 2176 uint64_t mintxg; 2177 2178 if (!dsl_scan_is_running(scn)) 2179 return; 2180 2181 ds_destroyed_scn_phys(ds, &scn->scn_phys); 2182 ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); 2183 2184 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 2185 scan_ds_queue_remove(scn, ds->ds_object); 2186 if (ds->ds_is_snapshot) 2187 scan_ds_queue_insert(scn, 2188 dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); 2189 } 2190 2191 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2192 ds->ds_object, &mintxg) == 0) { 2193 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 2194 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2195 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 2196 if (ds->ds_is_snapshot) { 2197 /* 2198 * We keep the same mintxg; it could be > 2199 * ds_creation_txg if the previous snapshot was 2200 * deleted too. 2201 */ 2202 VERIFY(zap_add_int_key(dp->dp_meta_objset, 2203 scn->scn_phys.scn_queue_obj, 2204 dsl_dataset_phys(ds)->ds_next_snap_obj, 2205 mintxg, tx) == 0); 2206 zfs_dbgmsg("destroying ds %llu on %s; in queue; " 2207 "replacing with %llu", 2208 (u_longlong_t)ds->ds_object, 2209 dp->dp_spa->spa_name, 2210 (u_longlong_t)dsl_dataset_phys(ds)-> 2211 ds_next_snap_obj); 2212 } else { 2213 zfs_dbgmsg("destroying ds %llu on %s; in queue; " 2214 "removing", 2215 (u_longlong_t)ds->ds_object, 2216 dp->dp_spa->spa_name); 2217 } 2218 } 2219 2220 /* 2221 * dsl_scan_sync() should be called after this, and should sync 2222 * out our changed state, but just to be safe, do it here. 2223 */ 2224 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2225 } 2226 2227 static void 2228 ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) 2229 { 2230 if (scn_bookmark->zb_objset == ds->ds_object) { 2231 scn_bookmark->zb_objset = 2232 dsl_dataset_phys(ds)->ds_prev_snap_obj; 2233 zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; " 2234 "reset zb_objset to %llu", 2235 (u_longlong_t)ds->ds_object, 2236 ds->ds_dir->dd_pool->dp_spa->spa_name, 2237 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 2238 } 2239 } 2240 2241 /* 2242 * Called when a dataset is snapshotted. If we were currently traversing 2243 * this snapshot, we reset our bookmark to point at the newly created 2244 * snapshot. We also modify our work queue to remove the old snapshot and 2245 * replace with the new one. 2246 */ 2247 void 2248 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 2249 { 2250 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2251 dsl_scan_t *scn = dp->dp_scan; 2252 uint64_t mintxg; 2253 2254 if (!dsl_scan_is_running(scn)) 2255 return; 2256 2257 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 2258 2259 ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); 2260 ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); 2261 2262 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 2263 scan_ds_queue_remove(scn, ds->ds_object); 2264 scan_ds_queue_insert(scn, 2265 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); 2266 } 2267 2268 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2269 ds->ds_object, &mintxg) == 0) { 2270 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2271 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 2272 VERIFY(zap_add_int_key(dp->dp_meta_objset, 2273 scn->scn_phys.scn_queue_obj, 2274 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 2275 zfs_dbgmsg("snapshotting ds %llu on %s; in queue; " 2276 "replacing with %llu", 2277 (u_longlong_t)ds->ds_object, 2278 dp->dp_spa->spa_name, 2279 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 2280 } 2281 2282 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2283 } 2284 2285 static void 2286 ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, 2287 zbookmark_phys_t *scn_bookmark) 2288 { 2289 if (scn_bookmark->zb_objset == ds1->ds_object) { 2290 scn_bookmark->zb_objset = ds2->ds_object; 2291 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " 2292 "reset zb_objset to %llu", 2293 (u_longlong_t)ds1->ds_object, 2294 ds1->ds_dir->dd_pool->dp_spa->spa_name, 2295 (u_longlong_t)ds2->ds_object); 2296 } else if (scn_bookmark->zb_objset == ds2->ds_object) { 2297 scn_bookmark->zb_objset = ds1->ds_object; 2298 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " 2299 "reset zb_objset to %llu", 2300 (u_longlong_t)ds2->ds_object, 2301 ds2->ds_dir->dd_pool->dp_spa->spa_name, 2302 (u_longlong_t)ds1->ds_object); 2303 } 2304 } 2305 2306 /* 2307 * Called when an origin dataset and its clone are swapped. If we were 2308 * currently traversing the dataset, we need to switch to traversing the 2309 * newly promoted clone. 2310 */ 2311 void 2312 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 2313 { 2314 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 2315 dsl_scan_t *scn = dp->dp_scan; 2316 uint64_t mintxg1, mintxg2; 2317 boolean_t ds1_queued, ds2_queued; 2318 2319 if (!dsl_scan_is_running(scn)) 2320 return; 2321 2322 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); 2323 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); 2324 2325 /* 2326 * Handle the in-memory scan queue. 2327 */ 2328 ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1); 2329 ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2); 2330 2331 /* Sanity checking. */ 2332 if (ds1_queued) { 2333 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2334 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2335 } 2336 if (ds2_queued) { 2337 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2338 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2339 } 2340 2341 if (ds1_queued && ds2_queued) { 2342 /* 2343 * If both are queued, we don't need to do anything. 2344 * The swapping code below would not handle this case correctly, 2345 * since we can't insert ds2 if it is already there. That's 2346 * because scan_ds_queue_insert() prohibits a duplicate insert 2347 * and panics. 2348 */ 2349 } else if (ds1_queued) { 2350 scan_ds_queue_remove(scn, ds1->ds_object); 2351 scan_ds_queue_insert(scn, ds2->ds_object, mintxg1); 2352 } else if (ds2_queued) { 2353 scan_ds_queue_remove(scn, ds2->ds_object); 2354 scan_ds_queue_insert(scn, ds1->ds_object, mintxg2); 2355 } 2356 2357 /* 2358 * Handle the on-disk scan queue. 2359 * The on-disk state is an out-of-date version of the in-memory state, 2360 * so the in-memory and on-disk values for ds1_queued and ds2_queued may 2361 * be different. Therefore we need to apply the swap logic to the 2362 * on-disk state independently of the in-memory state. 2363 */ 2364 ds1_queued = zap_lookup_int_key(dp->dp_meta_objset, 2365 scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0; 2366 ds2_queued = zap_lookup_int_key(dp->dp_meta_objset, 2367 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0; 2368 2369 /* Sanity checking. */ 2370 if (ds1_queued) { 2371 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2372 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2373 } 2374 if (ds2_queued) { 2375 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2376 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2377 } 2378 2379 if (ds1_queued && ds2_queued) { 2380 /* 2381 * If both are queued, we don't need to do anything. 2382 * Alternatively, we could check for EEXIST from 2383 * zap_add_int_key() and back out to the original state, but 2384 * that would be more work than checking for this case upfront. 2385 */ 2386 } else if (ds1_queued) { 2387 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2388 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 2389 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2390 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx)); 2391 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " 2392 "replacing with %llu", 2393 (u_longlong_t)ds1->ds_object, 2394 dp->dp_spa->spa_name, 2395 (u_longlong_t)ds2->ds_object); 2396 } else if (ds2_queued) { 2397 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2398 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 2399 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2400 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx)); 2401 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " 2402 "replacing with %llu", 2403 (u_longlong_t)ds2->ds_object, 2404 dp->dp_spa->spa_name, 2405 (u_longlong_t)ds1->ds_object); 2406 } 2407 2408 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2409 } 2410 2411 static int 2412 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2413 { 2414 uint64_t originobj = *(uint64_t *)arg; 2415 dsl_dataset_t *ds; 2416 int err; 2417 dsl_scan_t *scn = dp->dp_scan; 2418 2419 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) 2420 return (0); 2421 2422 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2423 if (err) 2424 return (err); 2425 2426 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { 2427 dsl_dataset_t *prev; 2428 err = dsl_dataset_hold_obj(dp, 2429 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2430 2431 dsl_dataset_rele(ds, FTAG); 2432 if (err) 2433 return (err); 2434 ds = prev; 2435 } 2436 scan_ds_queue_insert(scn, ds->ds_object, 2437 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2438 dsl_dataset_rele(ds, FTAG); 2439 return (0); 2440 } 2441 2442 static void 2443 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 2444 { 2445 dsl_pool_t *dp = scn->scn_dp; 2446 dsl_dataset_t *ds; 2447 2448 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2449 2450 if (scn->scn_phys.scn_cur_min_txg >= 2451 scn->scn_phys.scn_max_txg) { 2452 /* 2453 * This can happen if this snapshot was created after the 2454 * scan started, and we already completed a previous snapshot 2455 * that was created after the scan started. This snapshot 2456 * only references blocks with: 2457 * 2458 * birth < our ds_creation_txg 2459 * cur_min_txg is no less than ds_creation_txg. 2460 * We have already visited these blocks. 2461 * or 2462 * birth > scn_max_txg 2463 * The scan requested not to visit these blocks. 2464 * 2465 * Subsequent snapshots (and clones) can reference our 2466 * blocks, or blocks with even higher birth times. 2467 * Therefore we do not need to visit them either, 2468 * so we do not add them to the work queue. 2469 * 2470 * Note that checking for cur_min_txg >= cur_max_txg 2471 * is not sufficient, because in that case we may need to 2472 * visit subsequent snapshots. This happens when min_txg > 0, 2473 * which raises cur_min_txg. In this case we will visit 2474 * this dataset but skip all of its blocks, because the 2475 * rootbp's birth time is < cur_min_txg. Then we will 2476 * add the next snapshots/clones to the work queue. 2477 */ 2478 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2479 dsl_dataset_name(ds, dsname); 2480 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " 2481 "cur_min_txg (%llu) >= max_txg (%llu)", 2482 (longlong_t)dsobj, dsname, 2483 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2484 (longlong_t)scn->scn_phys.scn_max_txg); 2485 kmem_free(dsname, MAXNAMELEN); 2486 2487 goto out; 2488 } 2489 2490 /* 2491 * Only the ZIL in the head (non-snapshot) is valid. Even though 2492 * snapshots can have ZIL block pointers (which may be the same 2493 * BP as in the head), they must be ignored. In addition, $ORIGIN 2494 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't 2495 * need to look for a ZIL in it either. So we traverse the ZIL here, 2496 * rather than in scan_recurse(), because the regular snapshot 2497 * block-sharing rules don't apply to it. 2498 */ 2499 if (!dsl_dataset_is_snapshot(ds) && 2500 (dp->dp_origin_snap == NULL || 2501 ds->ds_dir != dp->dp_origin_snap->ds_dir)) { 2502 objset_t *os; 2503 if (dmu_objset_from_ds(ds, &os) != 0) { 2504 goto out; 2505 } 2506 dsl_scan_zil(dp, &os->os_zil_header); 2507 } 2508 2509 /* 2510 * Iterate over the bps in this ds. 2511 */ 2512 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2513 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 2514 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 2515 rrw_exit(&ds->ds_bp_rwlock, FTAG); 2516 2517 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2518 dsl_dataset_name(ds, dsname); 2519 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 2520 "suspending=%u", 2521 (longlong_t)dsobj, dsname, 2522 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2523 (longlong_t)scn->scn_phys.scn_cur_max_txg, 2524 (int)scn->scn_suspending); 2525 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); 2526 2527 if (scn->scn_suspending) 2528 goto out; 2529 2530 /* 2531 * We've finished this pass over this dataset. 2532 */ 2533 2534 /* 2535 * If we did not completely visit this dataset, do another pass. 2536 */ 2537 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 2538 zfs_dbgmsg("incomplete pass on %s; visiting again", 2539 dp->dp_spa->spa_name); 2540 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 2541 scan_ds_queue_insert(scn, ds->ds_object, 2542 scn->scn_phys.scn_cur_max_txg); 2543 goto out; 2544 } 2545 2546 /* 2547 * Add descendant datasets to work queue. 2548 */ 2549 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 2550 scan_ds_queue_insert(scn, 2551 dsl_dataset_phys(ds)->ds_next_snap_obj, 2552 dsl_dataset_phys(ds)->ds_creation_txg); 2553 } 2554 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 2555 boolean_t usenext = B_FALSE; 2556 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 2557 uint64_t count; 2558 /* 2559 * A bug in a previous version of the code could 2560 * cause upgrade_clones_cb() to not set 2561 * ds_next_snap_obj when it should, leading to a 2562 * missing entry. Therefore we can only use the 2563 * next_clones_obj when its count is correct. 2564 */ 2565 int err = zap_count(dp->dp_meta_objset, 2566 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 2567 if (err == 0 && 2568 count == dsl_dataset_phys(ds)->ds_num_children - 1) 2569 usenext = B_TRUE; 2570 } 2571 2572 if (usenext) { 2573 zap_cursor_t zc; 2574 zap_attribute_t za; 2575 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2576 dsl_dataset_phys(ds)->ds_next_clones_obj); 2577 zap_cursor_retrieve(&zc, &za) == 0; 2578 (void) zap_cursor_advance(&zc)) { 2579 scan_ds_queue_insert(scn, 2580 zfs_strtonum(za.za_name, NULL), 2581 dsl_dataset_phys(ds)->ds_creation_txg); 2582 } 2583 zap_cursor_fini(&zc); 2584 } else { 2585 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2586 enqueue_clones_cb, &ds->ds_object, 2587 DS_FIND_CHILDREN)); 2588 } 2589 } 2590 2591 out: 2592 dsl_dataset_rele(ds, FTAG); 2593 } 2594 2595 static int 2596 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2597 { 2598 (void) arg; 2599 dsl_dataset_t *ds; 2600 int err; 2601 dsl_scan_t *scn = dp->dp_scan; 2602 2603 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2604 if (err) 2605 return (err); 2606 2607 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 2608 dsl_dataset_t *prev; 2609 err = dsl_dataset_hold_obj(dp, 2610 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2611 if (err) { 2612 dsl_dataset_rele(ds, FTAG); 2613 return (err); 2614 } 2615 2616 /* 2617 * If this is a clone, we don't need to worry about it for now. 2618 */ 2619 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 2620 dsl_dataset_rele(ds, FTAG); 2621 dsl_dataset_rele(prev, FTAG); 2622 return (0); 2623 } 2624 dsl_dataset_rele(ds, FTAG); 2625 ds = prev; 2626 } 2627 2628 scan_ds_queue_insert(scn, ds->ds_object, 2629 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2630 dsl_dataset_rele(ds, FTAG); 2631 return (0); 2632 } 2633 2634 void 2635 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 2636 ddt_entry_t *dde, dmu_tx_t *tx) 2637 { 2638 (void) tx; 2639 const ddt_key_t *ddk = &dde->dde_key; 2640 ddt_phys_t *ddp = dde->dde_phys; 2641 blkptr_t bp; 2642 zbookmark_phys_t zb = { 0 }; 2643 2644 if (!dsl_scan_is_running(scn)) 2645 return; 2646 2647 /* 2648 * This function is special because it is the only thing 2649 * that can add scan_io_t's to the vdev scan queues from 2650 * outside dsl_scan_sync(). For the most part this is ok 2651 * as long as it is called from within syncing context. 2652 * However, dsl_scan_sync() expects that no new sio's will 2653 * be added between when all the work for a scan is done 2654 * and the next txg when the scan is actually marked as 2655 * completed. This check ensures we do not issue new sio's 2656 * during this period. 2657 */ 2658 if (scn->scn_done_txg != 0) 2659 return; 2660 2661 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2662 if (ddp->ddp_phys_birth == 0 || 2663 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 2664 continue; 2665 ddt_bp_create(checksum, ddk, ddp, &bp); 2666 2667 scn->scn_visited_this_txg++; 2668 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 2669 } 2670 } 2671 2672 /* 2673 * Scrub/dedup interaction. 2674 * 2675 * If there are N references to a deduped block, we don't want to scrub it 2676 * N times -- ideally, we should scrub it exactly once. 2677 * 2678 * We leverage the fact that the dde's replication class (enum ddt_class) 2679 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 2680 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 2681 * 2682 * To prevent excess scrubbing, the scrub begins by walking the DDT 2683 * to find all blocks with refcnt > 1, and scrubs each of these once. 2684 * Since there are two replication classes which contain blocks with 2685 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 2686 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 2687 * 2688 * There would be nothing more to say if a block's refcnt couldn't change 2689 * during a scrub, but of course it can so we must account for changes 2690 * in a block's replication class. 2691 * 2692 * Here's an example of what can occur: 2693 * 2694 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 2695 * when visited during the top-down scrub phase, it will be scrubbed twice. 2696 * This negates our scrub optimization, but is otherwise harmless. 2697 * 2698 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 2699 * on each visit during the top-down scrub phase, it will never be scrubbed. 2700 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 2701 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 2702 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 2703 * while a scrub is in progress, it scrubs the block right then. 2704 */ 2705 static void 2706 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 2707 { 2708 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 2709 ddt_entry_t dde = {{{{0}}}}; 2710 int error; 2711 uint64_t n = 0; 2712 2713 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 2714 ddt_t *ddt; 2715 2716 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 2717 break; 2718 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 2719 (longlong_t)ddb->ddb_class, 2720 (longlong_t)ddb->ddb_type, 2721 (longlong_t)ddb->ddb_checksum, 2722 (longlong_t)ddb->ddb_cursor); 2723 2724 /* There should be no pending changes to the dedup table */ 2725 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 2726 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 2727 2728 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 2729 n++; 2730 2731 if (dsl_scan_check_suspend(scn, NULL)) 2732 break; 2733 } 2734 2735 zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; " 2736 "suspending=%u", (longlong_t)n, scn->scn_dp->dp_spa->spa_name, 2737 (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); 2738 2739 ASSERT(error == 0 || error == ENOENT); 2740 ASSERT(error != ENOENT || 2741 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 2742 } 2743 2744 static uint64_t 2745 dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 2746 { 2747 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 2748 if (ds->ds_is_snapshot) 2749 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 2750 return (smt); 2751 } 2752 2753 static void 2754 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 2755 { 2756 scan_ds_t *sds; 2757 dsl_pool_t *dp = scn->scn_dp; 2758 2759 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 2760 scn->scn_phys.scn_ddt_class_max) { 2761 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2762 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2763 dsl_scan_ddt(scn, tx); 2764 if (scn->scn_suspending) 2765 return; 2766 } 2767 2768 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 2769 /* First do the MOS & ORIGIN */ 2770 2771 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2772 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2773 dsl_scan_visit_rootbp(scn, NULL, 2774 &dp->dp_meta_rootbp, tx); 2775 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 2776 if (scn->scn_suspending) 2777 return; 2778 2779 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 2780 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2781 enqueue_cb, NULL, DS_FIND_CHILDREN)); 2782 } else { 2783 dsl_scan_visitds(scn, 2784 dp->dp_origin_snap->ds_object, tx); 2785 } 2786 ASSERT(!scn->scn_suspending); 2787 } else if (scn->scn_phys.scn_bookmark.zb_objset != 2788 ZB_DESTROYED_OBJSET) { 2789 uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; 2790 /* 2791 * If we were suspended, continue from here. Note if the 2792 * ds we were suspended on was deleted, the zb_objset may 2793 * be -1, so we will skip this and find a new objset 2794 * below. 2795 */ 2796 dsl_scan_visitds(scn, dsobj, tx); 2797 if (scn->scn_suspending) 2798 return; 2799 } 2800 2801 /* 2802 * In case we suspended right at the end of the ds, zero the 2803 * bookmark so we don't think that we're still trying to resume. 2804 */ 2805 memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t)); 2806 2807 /* 2808 * Keep pulling things out of the dataset avl queue. Updates to the 2809 * persistent zap-object-as-queue happen only at checkpoints. 2810 */ 2811 while ((sds = avl_first(&scn->scn_queue)) != NULL) { 2812 dsl_dataset_t *ds; 2813 uint64_t dsobj = sds->sds_dsobj; 2814 uint64_t txg = sds->sds_txg; 2815 2816 /* dequeue and free the ds from the queue */ 2817 scan_ds_queue_remove(scn, dsobj); 2818 sds = NULL; 2819 2820 /* set up min / max txg */ 2821 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2822 if (txg != 0) { 2823 scn->scn_phys.scn_cur_min_txg = 2824 MAX(scn->scn_phys.scn_min_txg, txg); 2825 } else { 2826 scn->scn_phys.scn_cur_min_txg = 2827 MAX(scn->scn_phys.scn_min_txg, 2828 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2829 } 2830 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 2831 dsl_dataset_rele(ds, FTAG); 2832 2833 dsl_scan_visitds(scn, dsobj, tx); 2834 if (scn->scn_suspending) 2835 return; 2836 } 2837 2838 /* No more objsets to fetch, we're done */ 2839 scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; 2840 ASSERT0(scn->scn_suspending); 2841 } 2842 2843 static uint64_t 2844 dsl_scan_count_data_disks(spa_t *spa) 2845 { 2846 vdev_t *rvd = spa->spa_root_vdev; 2847 uint64_t i, leaves = 0; 2848 2849 for (i = 0; i < rvd->vdev_children; i++) { 2850 vdev_t *vd = rvd->vdev_child[i]; 2851 if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache) 2852 continue; 2853 leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd); 2854 } 2855 return (leaves); 2856 } 2857 2858 static void 2859 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) 2860 { 2861 int i; 2862 uint64_t cur_size = 0; 2863 2864 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 2865 cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); 2866 } 2867 2868 q->q_total_zio_size_this_txg += cur_size; 2869 q->q_zios_this_txg++; 2870 } 2871 2872 static void 2873 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, 2874 uint64_t end) 2875 { 2876 q->q_total_seg_size_this_txg += end - start; 2877 q->q_segs_this_txg++; 2878 } 2879 2880 static boolean_t 2881 scan_io_queue_check_suspend(dsl_scan_t *scn) 2882 { 2883 /* See comment in dsl_scan_check_suspend() */ 2884 uint64_t curr_time_ns = gethrtime(); 2885 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 2886 uint64_t sync_time_ns = curr_time_ns - 2887 scn->scn_dp->dp_spa->spa_sync_starttime; 2888 uint64_t dirty_min_bytes = zfs_dirty_data_max * 2889 zfs_vdev_async_write_active_min_dirty_percent / 100; 2890 uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 2891 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 2892 2893 return ((NSEC2MSEC(scan_time_ns) > mintime && 2894 (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || 2895 txg_sync_waiting(scn->scn_dp) || 2896 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 2897 spa_shutting_down(scn->scn_dp->dp_spa)); 2898 } 2899 2900 /* 2901 * Given a list of scan_io_t's in io_list, this issues the I/Os out to 2902 * disk. This consumes the io_list and frees the scan_io_t's. This is 2903 * called when emptying queues, either when we're up against the memory 2904 * limit or when we have finished scanning. Returns B_TRUE if we stopped 2905 * processing the list before we finished. Any sios that were not issued 2906 * will remain in the io_list. 2907 */ 2908 static boolean_t 2909 scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) 2910 { 2911 dsl_scan_t *scn = queue->q_scn; 2912 scan_io_t *sio; 2913 boolean_t suspended = B_FALSE; 2914 2915 while ((sio = list_head(io_list)) != NULL) { 2916 blkptr_t bp; 2917 2918 if (scan_io_queue_check_suspend(scn)) { 2919 suspended = B_TRUE; 2920 break; 2921 } 2922 2923 sio2bp(sio, &bp); 2924 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, 2925 &sio->sio_zb, queue); 2926 (void) list_remove_head(io_list); 2927 scan_io_queues_update_zio_stats(queue, &bp); 2928 sio_free(sio); 2929 } 2930 return (suspended); 2931 } 2932 2933 /* 2934 * This function removes sios from an IO queue which reside within a given 2935 * range_seg_t and inserts them (in offset order) into a list. Note that 2936 * we only ever return a maximum of 32 sios at once. If there are more sios 2937 * to process within this segment that did not make it onto the list we 2938 * return B_TRUE and otherwise B_FALSE. 2939 */ 2940 static boolean_t 2941 scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) 2942 { 2943 scan_io_t *srch_sio, *sio, *next_sio; 2944 avl_index_t idx; 2945 uint_t num_sios = 0; 2946 int64_t bytes_issued = 0; 2947 2948 ASSERT(rs != NULL); 2949 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 2950 2951 srch_sio = sio_alloc(1); 2952 srch_sio->sio_nr_dvas = 1; 2953 SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr)); 2954 2955 /* 2956 * The exact start of the extent might not contain any matching zios, 2957 * so if that's the case, examine the next one in the tree. 2958 */ 2959 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); 2960 sio_free(srch_sio); 2961 2962 if (sio == NULL) 2963 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); 2964 2965 while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, 2966 queue->q_exts_by_addr) && num_sios <= 32) { 2967 ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs, 2968 queue->q_exts_by_addr)); 2969 ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs, 2970 queue->q_exts_by_addr)); 2971 2972 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); 2973 avl_remove(&queue->q_sios_by_addr, sio); 2974 if (avl_is_empty(&queue->q_sios_by_addr)) 2975 atomic_add_64(&queue->q_scn->scn_queues_pending, -1); 2976 queue->q_sio_memused -= SIO_GET_MUSED(sio); 2977 2978 bytes_issued += SIO_GET_ASIZE(sio); 2979 num_sios++; 2980 list_insert_tail(list, sio); 2981 sio = next_sio; 2982 } 2983 2984 /* 2985 * We limit the number of sios we process at once to 32 to avoid 2986 * biting off more than we can chew. If we didn't take everything 2987 * in the segment we update it to reflect the work we were able to 2988 * complete. Otherwise, we remove it from the range tree entirely. 2989 */ 2990 if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, 2991 queue->q_exts_by_addr)) { 2992 range_tree_adjust_fill(queue->q_exts_by_addr, rs, 2993 -bytes_issued); 2994 range_tree_resize_segment(queue->q_exts_by_addr, rs, 2995 SIO_GET_OFFSET(sio), rs_get_end(rs, 2996 queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); 2997 queue->q_last_ext_addr = SIO_GET_OFFSET(sio); 2998 return (B_TRUE); 2999 } else { 3000 uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr); 3001 uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr); 3002 range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart); 3003 queue->q_last_ext_addr = -1; 3004 return (B_FALSE); 3005 } 3006 } 3007 3008 /* 3009 * This is called from the queue emptying thread and selects the next 3010 * extent from which we are to issue I/Os. The behavior of this function 3011 * depends on the state of the scan, the current memory consumption and 3012 * whether or not we are performing a scan shutdown. 3013 * 1) We select extents in an elevator algorithm (LBA-order) if the scan 3014 * needs to perform a checkpoint 3015 * 2) We select the largest available extent if we are up against the 3016 * memory limit. 3017 * 3) Otherwise we don't select any extents. 3018 */ 3019 static range_seg_t * 3020 scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) 3021 { 3022 dsl_scan_t *scn = queue->q_scn; 3023 range_tree_t *rt = queue->q_exts_by_addr; 3024 3025 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3026 ASSERT(scn->scn_is_sorted); 3027 3028 if (!scn->scn_checkpointing && !scn->scn_clearing) 3029 return (NULL); 3030 3031 /* 3032 * During normal clearing, we want to issue our largest segments 3033 * first, keeping IO as sequential as possible, and leaving the 3034 * smaller extents for later with the hope that they might eventually 3035 * grow to larger sequential segments. However, when the scan is 3036 * checkpointing, no new extents will be added to the sorting queue, 3037 * so the way we are sorted now is as good as it will ever get. 3038 * In this case, we instead switch to issuing extents in LBA order. 3039 */ 3040 if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) || 3041 zfs_scan_issue_strategy == 1) 3042 return (range_tree_first(rt)); 3043 3044 /* 3045 * Try to continue previous extent if it is not completed yet. After 3046 * shrink in scan_io_queue_gather() it may no longer be the best, but 3047 * otherwise we leave shorter remnant every txg. 3048 */ 3049 uint64_t start; 3050 uint64_t size = 1ULL << rt->rt_shift; 3051 range_seg_t *addr_rs; 3052 if (queue->q_last_ext_addr != -1) { 3053 start = queue->q_last_ext_addr; 3054 addr_rs = range_tree_find(rt, start, size); 3055 if (addr_rs != NULL) 3056 return (addr_rs); 3057 } 3058 3059 /* 3060 * Nothing to continue, so find new best extent. 3061 */ 3062 uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL); 3063 if (v == NULL) 3064 return (NULL); 3065 queue->q_last_ext_addr = start = *v << rt->rt_shift; 3066 3067 /* 3068 * We need to get the original entry in the by_addr tree so we can 3069 * modify it. 3070 */ 3071 addr_rs = range_tree_find(rt, start, size); 3072 ASSERT3P(addr_rs, !=, NULL); 3073 ASSERT3U(rs_get_start(addr_rs, rt), ==, start); 3074 ASSERT3U(rs_get_end(addr_rs, rt), >, start); 3075 return (addr_rs); 3076 } 3077 3078 static void 3079 scan_io_queues_run_one(void *arg) 3080 { 3081 dsl_scan_io_queue_t *queue = arg; 3082 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 3083 boolean_t suspended = B_FALSE; 3084 range_seg_t *rs; 3085 scan_io_t *sio; 3086 zio_t *zio; 3087 list_t sio_list; 3088 3089 ASSERT(queue->q_scn->scn_is_sorted); 3090 3091 list_create(&sio_list, sizeof (scan_io_t), 3092 offsetof(scan_io_t, sio_nodes.sio_list_node)); 3093 zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa, 3094 NULL, NULL, NULL, ZIO_FLAG_CANFAIL); 3095 mutex_enter(q_lock); 3096 queue->q_zio = zio; 3097 3098 /* Calculate maximum in-flight bytes for this vdev. */ 3099 queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit * 3100 (vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd))); 3101 3102 /* reset per-queue scan statistics for this txg */ 3103 queue->q_total_seg_size_this_txg = 0; 3104 queue->q_segs_this_txg = 0; 3105 queue->q_total_zio_size_this_txg = 0; 3106 queue->q_zios_this_txg = 0; 3107 3108 /* loop until we run out of time or sios */ 3109 while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) { 3110 uint64_t seg_start = 0, seg_end = 0; 3111 boolean_t more_left; 3112 3113 ASSERT(list_is_empty(&sio_list)); 3114 3115 /* loop while we still have sios left to process in this rs */ 3116 do { 3117 scan_io_t *first_sio, *last_sio; 3118 3119 /* 3120 * We have selected which extent needs to be 3121 * processed next. Gather up the corresponding sios. 3122 */ 3123 more_left = scan_io_queue_gather(queue, rs, &sio_list); 3124 ASSERT(!list_is_empty(&sio_list)); 3125 first_sio = list_head(&sio_list); 3126 last_sio = list_tail(&sio_list); 3127 3128 seg_end = SIO_GET_END_OFFSET(last_sio); 3129 if (seg_start == 0) 3130 seg_start = SIO_GET_OFFSET(first_sio); 3131 3132 /* 3133 * Issuing sios can take a long time so drop the 3134 * queue lock. The sio queue won't be updated by 3135 * other threads since we're in syncing context so 3136 * we can be sure that our trees will remain exactly 3137 * as we left them. 3138 */ 3139 mutex_exit(q_lock); 3140 suspended = scan_io_queue_issue(queue, &sio_list); 3141 mutex_enter(q_lock); 3142 3143 if (suspended) 3144 break; 3145 } while (more_left); 3146 3147 /* update statistics for debugging purposes */ 3148 scan_io_queues_update_seg_stats(queue, seg_start, seg_end); 3149 3150 if (suspended) 3151 break; 3152 } 3153 3154 /* 3155 * If we were suspended in the middle of processing, 3156 * requeue any unfinished sios and exit. 3157 */ 3158 while ((sio = list_head(&sio_list)) != NULL) { 3159 list_remove(&sio_list, sio); 3160 scan_io_queue_insert_impl(queue, sio); 3161 } 3162 3163 queue->q_zio = NULL; 3164 mutex_exit(q_lock); 3165 zio_nowait(zio); 3166 list_destroy(&sio_list); 3167 } 3168 3169 /* 3170 * Performs an emptying run on all scan queues in the pool. This just 3171 * punches out one thread per top-level vdev, each of which processes 3172 * only that vdev's scan queue. We can parallelize the I/O here because 3173 * we know that each queue's I/Os only affect its own top-level vdev. 3174 * 3175 * This function waits for the queue runs to complete, and must be 3176 * called from dsl_scan_sync (or in general, syncing context). 3177 */ 3178 static void 3179 scan_io_queues_run(dsl_scan_t *scn) 3180 { 3181 spa_t *spa = scn->scn_dp->dp_spa; 3182 3183 ASSERT(scn->scn_is_sorted); 3184 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3185 3186 if (scn->scn_queues_pending == 0) 3187 return; 3188 3189 if (scn->scn_taskq == NULL) { 3190 int nthreads = spa->spa_root_vdev->vdev_children; 3191 3192 /* 3193 * We need to make this taskq *always* execute as many 3194 * threads in parallel as we have top-level vdevs and no 3195 * less, otherwise strange serialization of the calls to 3196 * scan_io_queues_run_one can occur during spa_sync runs 3197 * and that significantly impacts performance. 3198 */ 3199 scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads, 3200 minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE); 3201 } 3202 3203 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 3204 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 3205 3206 mutex_enter(&vd->vdev_scan_io_queue_lock); 3207 if (vd->vdev_scan_io_queue != NULL) { 3208 VERIFY(taskq_dispatch(scn->scn_taskq, 3209 scan_io_queues_run_one, vd->vdev_scan_io_queue, 3210 TQ_SLEEP) != TASKQID_INVALID); 3211 } 3212 mutex_exit(&vd->vdev_scan_io_queue_lock); 3213 } 3214 3215 /* 3216 * Wait for the queues to finish issuing their IOs for this run 3217 * before we return. There may still be IOs in flight at this 3218 * point. 3219 */ 3220 taskq_wait(scn->scn_taskq); 3221 } 3222 3223 static boolean_t 3224 dsl_scan_async_block_should_pause(dsl_scan_t *scn) 3225 { 3226 uint64_t elapsed_nanosecs; 3227 3228 if (zfs_recover) 3229 return (B_FALSE); 3230 3231 if (zfs_async_block_max_blocks != 0 && 3232 scn->scn_visited_this_txg >= zfs_async_block_max_blocks) { 3233 return (B_TRUE); 3234 } 3235 3236 if (zfs_max_async_dedup_frees != 0 && 3237 scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) { 3238 return (B_TRUE); 3239 } 3240 3241 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 3242 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 3243 (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && 3244 txg_sync_waiting(scn->scn_dp)) || 3245 spa_shutting_down(scn->scn_dp->dp_spa)); 3246 } 3247 3248 static int 3249 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 3250 { 3251 dsl_scan_t *scn = arg; 3252 3253 if (!scn->scn_is_bptree || 3254 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 3255 if (dsl_scan_async_block_should_pause(scn)) 3256 return (SET_ERROR(ERESTART)); 3257 } 3258 3259 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 3260 dmu_tx_get_txg(tx), bp, 0)); 3261 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 3262 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 3263 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 3264 scn->scn_visited_this_txg++; 3265 if (BP_GET_DEDUP(bp)) 3266 scn->scn_dedup_frees_this_txg++; 3267 return (0); 3268 } 3269 3270 static void 3271 dsl_scan_update_stats(dsl_scan_t *scn) 3272 { 3273 spa_t *spa = scn->scn_dp->dp_spa; 3274 uint64_t i; 3275 uint64_t seg_size_total = 0, zio_size_total = 0; 3276 uint64_t seg_count_total = 0, zio_count_total = 0; 3277 3278 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 3279 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 3280 dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; 3281 3282 if (queue == NULL) 3283 continue; 3284 3285 seg_size_total += queue->q_total_seg_size_this_txg; 3286 zio_size_total += queue->q_total_zio_size_this_txg; 3287 seg_count_total += queue->q_segs_this_txg; 3288 zio_count_total += queue->q_zios_this_txg; 3289 } 3290 3291 if (seg_count_total == 0 || zio_count_total == 0) { 3292 scn->scn_avg_seg_size_this_txg = 0; 3293 scn->scn_avg_zio_size_this_txg = 0; 3294 scn->scn_segs_this_txg = 0; 3295 scn->scn_zios_this_txg = 0; 3296 return; 3297 } 3298 3299 scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; 3300 scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; 3301 scn->scn_segs_this_txg = seg_count_total; 3302 scn->scn_zios_this_txg = zio_count_total; 3303 } 3304 3305 static int 3306 bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3307 dmu_tx_t *tx) 3308 { 3309 ASSERT(!bp_freed); 3310 return (dsl_scan_free_block_cb(arg, bp, tx)); 3311 } 3312 3313 static int 3314 dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3315 dmu_tx_t *tx) 3316 { 3317 ASSERT(!bp_freed); 3318 dsl_scan_t *scn = arg; 3319 const dva_t *dva = &bp->blk_dva[0]; 3320 3321 if (dsl_scan_async_block_should_pause(scn)) 3322 return (SET_ERROR(ERESTART)); 3323 3324 spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, 3325 DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), 3326 DVA_GET_ASIZE(dva), tx); 3327 scn->scn_visited_this_txg++; 3328 return (0); 3329 } 3330 3331 boolean_t 3332 dsl_scan_active(dsl_scan_t *scn) 3333 { 3334 spa_t *spa = scn->scn_dp->dp_spa; 3335 uint64_t used = 0, comp, uncomp; 3336 boolean_t clones_left; 3337 3338 if (spa->spa_load_state != SPA_LOAD_NONE) 3339 return (B_FALSE); 3340 if (spa_shutting_down(spa)) 3341 return (B_FALSE); 3342 if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || 3343 (scn->scn_async_destroying && !scn->scn_async_stalled)) 3344 return (B_TRUE); 3345 3346 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 3347 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 3348 &used, &comp, &uncomp); 3349 } 3350 clones_left = spa_livelist_delete_check(spa); 3351 return ((used != 0) || (clones_left)); 3352 } 3353 3354 static boolean_t 3355 dsl_scan_check_deferred(vdev_t *vd) 3356 { 3357 boolean_t need_resilver = B_FALSE; 3358 3359 for (int c = 0; c < vd->vdev_children; c++) { 3360 need_resilver |= 3361 dsl_scan_check_deferred(vd->vdev_child[c]); 3362 } 3363 3364 if (!vdev_is_concrete(vd) || vd->vdev_aux || 3365 !vd->vdev_ops->vdev_op_leaf) 3366 return (need_resilver); 3367 3368 if (!vd->vdev_resilver_deferred) 3369 need_resilver = B_TRUE; 3370 3371 return (need_resilver); 3372 } 3373 3374 static boolean_t 3375 dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, 3376 uint64_t phys_birth) 3377 { 3378 vdev_t *vd; 3379 3380 vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 3381 3382 if (vd->vdev_ops == &vdev_indirect_ops) { 3383 /* 3384 * The indirect vdev can point to multiple 3385 * vdevs. For simplicity, always create 3386 * the resilver zio_t. zio_vdev_io_start() 3387 * will bypass the child resilver i/o's if 3388 * they are on vdevs that don't have DTL's. 3389 */ 3390 return (B_TRUE); 3391 } 3392 3393 if (DVA_GET_GANG(dva)) { 3394 /* 3395 * Gang members may be spread across multiple 3396 * vdevs, so the best estimate we have is the 3397 * scrub range, which has already been checked. 3398 * XXX -- it would be better to change our 3399 * allocation policy to ensure that all 3400 * gang members reside on the same vdev. 3401 */ 3402 return (B_TRUE); 3403 } 3404 3405 /* 3406 * Check if the top-level vdev must resilver this offset. 3407 * When the offset does not intersect with a dirty leaf DTL 3408 * then it may be possible to skip the resilver IO. The psize 3409 * is provided instead of asize to simplify the check for RAIDZ. 3410 */ 3411 if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth)) 3412 return (B_FALSE); 3413 3414 /* 3415 * Check that this top-level vdev has a device under it which 3416 * is resilvering and is not deferred. 3417 */ 3418 if (!dsl_scan_check_deferred(vd)) 3419 return (B_FALSE); 3420 3421 return (B_TRUE); 3422 } 3423 3424 static int 3425 dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) 3426 { 3427 dsl_scan_t *scn = dp->dp_scan; 3428 spa_t *spa = dp->dp_spa; 3429 int err = 0; 3430 3431 if (spa_suspend_async_destroy(spa)) 3432 return (0); 3433 3434 if (zfs_free_bpobj_enabled && 3435 spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3436 scn->scn_is_bptree = B_FALSE; 3437 scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; 3438 scn->scn_zio_root = zio_root(spa, NULL, 3439 NULL, ZIO_FLAG_MUSTSUCCEED); 3440 err = bpobj_iterate(&dp->dp_free_bpobj, 3441 bpobj_dsl_scan_free_block_cb, scn, tx); 3442 VERIFY0(zio_wait(scn->scn_zio_root)); 3443 scn->scn_zio_root = NULL; 3444 3445 if (err != 0 && err != ERESTART) 3446 zfs_panic_recover("error %u from bpobj_iterate()", err); 3447 } 3448 3449 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 3450 ASSERT(scn->scn_async_destroying); 3451 scn->scn_is_bptree = B_TRUE; 3452 scn->scn_zio_root = zio_root(spa, NULL, 3453 NULL, ZIO_FLAG_MUSTSUCCEED); 3454 err = bptree_iterate(dp->dp_meta_objset, 3455 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 3456 VERIFY0(zio_wait(scn->scn_zio_root)); 3457 scn->scn_zio_root = NULL; 3458 3459 if (err == EIO || err == ECKSUM) { 3460 err = 0; 3461 } else if (err != 0 && err != ERESTART) { 3462 zfs_panic_recover("error %u from " 3463 "traverse_dataset_destroyed()", err); 3464 } 3465 3466 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 3467 /* finished; deactivate async destroy feature */ 3468 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 3469 ASSERT(!spa_feature_is_active(spa, 3470 SPA_FEATURE_ASYNC_DESTROY)); 3471 VERIFY0(zap_remove(dp->dp_meta_objset, 3472 DMU_POOL_DIRECTORY_OBJECT, 3473 DMU_POOL_BPTREE_OBJ, tx)); 3474 VERIFY0(bptree_free(dp->dp_meta_objset, 3475 dp->dp_bptree_obj, tx)); 3476 dp->dp_bptree_obj = 0; 3477 scn->scn_async_destroying = B_FALSE; 3478 scn->scn_async_stalled = B_FALSE; 3479 } else { 3480 /* 3481 * If we didn't make progress, mark the async 3482 * destroy as stalled, so that we will not initiate 3483 * a spa_sync() on its behalf. Note that we only 3484 * check this if we are not finished, because if the 3485 * bptree had no blocks for us to visit, we can 3486 * finish without "making progress". 3487 */ 3488 scn->scn_async_stalled = 3489 (scn->scn_visited_this_txg == 0); 3490 } 3491 } 3492 if (scn->scn_visited_this_txg) { 3493 zfs_dbgmsg("freed %llu blocks in %llums from " 3494 "free_bpobj/bptree on %s in txg %llu; err=%u", 3495 (longlong_t)scn->scn_visited_this_txg, 3496 (longlong_t) 3497 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 3498 spa->spa_name, (longlong_t)tx->tx_txg, err); 3499 scn->scn_visited_this_txg = 0; 3500 scn->scn_dedup_frees_this_txg = 0; 3501 3502 /* 3503 * Write out changes to the DDT and the BRT that may be required 3504 * as a result of the blocks freed. This ensures that the DDT 3505 * and the BRT are clean when a scrub/resilver runs. 3506 */ 3507 ddt_sync(spa, tx->tx_txg); 3508 brt_sync(spa, tx->tx_txg); 3509 } 3510 if (err != 0) 3511 return (err); 3512 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3513 zfs_free_leak_on_eio && 3514 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 3515 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 3516 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 3517 /* 3518 * We have finished background destroying, but there is still 3519 * some space left in the dp_free_dir. Transfer this leaked 3520 * space to the dp_leak_dir. 3521 */ 3522 if (dp->dp_leak_dir == NULL) { 3523 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 3524 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 3525 LEAK_DIR_NAME, tx); 3526 VERIFY0(dsl_pool_open_special_dir(dp, 3527 LEAK_DIR_NAME, &dp->dp_leak_dir)); 3528 rrw_exit(&dp->dp_config_rwlock, FTAG); 3529 } 3530 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 3531 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3532 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3533 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3534 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 3535 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3536 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3537 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3538 } 3539 3540 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3541 !spa_livelist_delete_check(spa)) { 3542 /* finished; verify that space accounting went to zero */ 3543 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 3544 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 3545 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 3546 } 3547 3548 spa_notify_waiters(spa); 3549 3550 EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), 3551 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3552 DMU_POOL_OBSOLETE_BPOBJ)); 3553 if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { 3554 ASSERT(spa_feature_is_active(dp->dp_spa, 3555 SPA_FEATURE_OBSOLETE_COUNTS)); 3556 3557 scn->scn_is_bptree = B_FALSE; 3558 scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; 3559 err = bpobj_iterate(&dp->dp_obsolete_bpobj, 3560 dsl_scan_obsolete_block_cb, scn, tx); 3561 if (err != 0 && err != ERESTART) 3562 zfs_panic_recover("error %u from bpobj_iterate()", err); 3563 3564 if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) 3565 dsl_pool_destroy_obsolete_bpobj(dp, tx); 3566 } 3567 return (0); 3568 } 3569 3570 /* 3571 * This is the primary entry point for scans that is called from syncing 3572 * context. Scans must happen entirely during syncing context so that we 3573 * can guarantee that blocks we are currently scanning will not change out 3574 * from under us. While a scan is active, this function controls how quickly 3575 * transaction groups proceed, instead of the normal handling provided by 3576 * txg_sync_thread(). 3577 */ 3578 void 3579 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 3580 { 3581 int err = 0; 3582 dsl_scan_t *scn = dp->dp_scan; 3583 spa_t *spa = dp->dp_spa; 3584 state_sync_type_t sync_type = SYNC_OPTIONAL; 3585 3586 if (spa->spa_resilver_deferred && 3587 !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) 3588 spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx); 3589 3590 /* 3591 * Check for scn_restart_txg before checking spa_load_state, so 3592 * that we can restart an old-style scan while the pool is being 3593 * imported (see dsl_scan_init). We also restart scans if there 3594 * is a deferred resilver and the user has manually disabled 3595 * deferred resilvers via the tunable. 3596 */ 3597 if (dsl_scan_restarting(scn, tx) || 3598 (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) { 3599 pool_scan_func_t func = POOL_SCAN_SCRUB; 3600 dsl_scan_done(scn, B_FALSE, tx); 3601 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 3602 func = POOL_SCAN_RESILVER; 3603 zfs_dbgmsg("restarting scan func=%u on %s txg=%llu", 3604 func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg); 3605 dsl_scan_setup_sync(&func, tx); 3606 } 3607 3608 /* 3609 * Only process scans in sync pass 1. 3610 */ 3611 if (spa_sync_pass(spa) > 1) 3612 return; 3613 3614 /* 3615 * If the spa is shutting down, then stop scanning. This will 3616 * ensure that the scan does not dirty any new data during the 3617 * shutdown phase. 3618 */ 3619 if (spa_shutting_down(spa)) 3620 return; 3621 3622 /* 3623 * If the scan is inactive due to a stalled async destroy, try again. 3624 */ 3625 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 3626 return; 3627 3628 /* reset scan statistics */ 3629 scn->scn_visited_this_txg = 0; 3630 scn->scn_dedup_frees_this_txg = 0; 3631 scn->scn_holes_this_txg = 0; 3632 scn->scn_lt_min_this_txg = 0; 3633 scn->scn_gt_max_this_txg = 0; 3634 scn->scn_ddt_contained_this_txg = 0; 3635 scn->scn_objsets_visited_this_txg = 0; 3636 scn->scn_avg_seg_size_this_txg = 0; 3637 scn->scn_segs_this_txg = 0; 3638 scn->scn_avg_zio_size_this_txg = 0; 3639 scn->scn_zios_this_txg = 0; 3640 scn->scn_suspending = B_FALSE; 3641 scn->scn_sync_start_time = gethrtime(); 3642 spa->spa_scrub_active = B_TRUE; 3643 3644 /* 3645 * First process the async destroys. If we suspend, don't do 3646 * any scrubbing or resilvering. This ensures that there are no 3647 * async destroys while we are scanning, so the scan code doesn't 3648 * have to worry about traversing it. It is also faster to free the 3649 * blocks than to scrub them. 3650 */ 3651 err = dsl_process_async_destroys(dp, tx); 3652 if (err != 0) 3653 return; 3654 3655 if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) 3656 return; 3657 3658 /* 3659 * Wait a few txgs after importing to begin scanning so that 3660 * we can get the pool imported quickly. 3661 */ 3662 if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) 3663 return; 3664 3665 /* 3666 * zfs_scan_suspend_progress can be set to disable scan progress. 3667 * We don't want to spin the txg_sync thread, so we add a delay 3668 * here to simulate the time spent doing a scan. This is mostly 3669 * useful for testing and debugging. 3670 */ 3671 if (zfs_scan_suspend_progress) { 3672 uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; 3673 uint_t mintime = (scn->scn_phys.scn_func == 3674 POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms : 3675 zfs_scrub_min_time_ms; 3676 3677 while (zfs_scan_suspend_progress && 3678 !txg_sync_waiting(scn->scn_dp) && 3679 !spa_shutting_down(scn->scn_dp->dp_spa) && 3680 NSEC2MSEC(scan_time_ns) < mintime) { 3681 delay(hz); 3682 scan_time_ns = gethrtime() - scn->scn_sync_start_time; 3683 } 3684 return; 3685 } 3686 3687 /* 3688 * Disabled by default, set zfs_scan_report_txgs to report 3689 * average performance over the last zfs_scan_report_txgs TXGs. 3690 */ 3691 if (!dsl_scan_is_paused_scrub(scn) && zfs_scan_report_txgs != 0 && 3692 tx->tx_txg % zfs_scan_report_txgs == 0) { 3693 scn->scn_issued_before_pass += spa->spa_scan_pass_issued; 3694 spa_scan_stat_init(spa); 3695 } 3696 3697 /* 3698 * It is possible to switch from unsorted to sorted at any time, 3699 * but afterwards the scan will remain sorted unless reloaded from 3700 * a checkpoint after a reboot. 3701 */ 3702 if (!zfs_scan_legacy) { 3703 scn->scn_is_sorted = B_TRUE; 3704 if (scn->scn_last_checkpoint == 0) 3705 scn->scn_last_checkpoint = ddi_get_lbolt(); 3706 } 3707 3708 /* 3709 * For sorted scans, determine what kind of work we will be doing 3710 * this txg based on our memory limitations and whether or not we 3711 * need to perform a checkpoint. 3712 */ 3713 if (scn->scn_is_sorted) { 3714 /* 3715 * If we are over our checkpoint interval, set scn_clearing 3716 * so that we can begin checkpointing immediately. The 3717 * checkpoint allows us to save a consistent bookmark 3718 * representing how much data we have scrubbed so far. 3719 * Otherwise, use the memory limit to determine if we should 3720 * scan for metadata or start issue scrub IOs. We accumulate 3721 * metadata until we hit our hard memory limit at which point 3722 * we issue scrub IOs until we are at our soft memory limit. 3723 */ 3724 if (scn->scn_checkpointing || 3725 ddi_get_lbolt() - scn->scn_last_checkpoint > 3726 SEC_TO_TICK(zfs_scan_checkpoint_intval)) { 3727 if (!scn->scn_checkpointing) 3728 zfs_dbgmsg("begin scan checkpoint for %s", 3729 spa->spa_name); 3730 3731 scn->scn_checkpointing = B_TRUE; 3732 scn->scn_clearing = B_TRUE; 3733 } else { 3734 boolean_t should_clear = dsl_scan_should_clear(scn); 3735 if (should_clear && !scn->scn_clearing) { 3736 zfs_dbgmsg("begin scan clearing for %s", 3737 spa->spa_name); 3738 scn->scn_clearing = B_TRUE; 3739 } else if (!should_clear && scn->scn_clearing) { 3740 zfs_dbgmsg("finish scan clearing for %s", 3741 spa->spa_name); 3742 scn->scn_clearing = B_FALSE; 3743 } 3744 } 3745 } else { 3746 ASSERT0(scn->scn_checkpointing); 3747 ASSERT0(scn->scn_clearing); 3748 } 3749 3750 if (!scn->scn_clearing && scn->scn_done_txg == 0) { 3751 /* Need to scan metadata for more blocks to scrub */ 3752 dsl_scan_phys_t *scnp = &scn->scn_phys; 3753 taskqid_t prefetch_tqid; 3754 3755 /* 3756 * Calculate the max number of in-flight bytes for pool-wide 3757 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). 3758 * Limits for the issuing phase are done per top-level vdev and 3759 * are handled separately. 3760 */ 3761 scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, 3762 zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); 3763 3764 if (scnp->scn_ddt_bookmark.ddb_class <= 3765 scnp->scn_ddt_class_max) { 3766 ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); 3767 zfs_dbgmsg("doing scan sync for %s txg %llu; " 3768 "ddt bm=%llu/%llu/%llu/%llx", 3769 spa->spa_name, 3770 (longlong_t)tx->tx_txg, 3771 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 3772 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 3773 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 3774 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 3775 } else { 3776 zfs_dbgmsg("doing scan sync for %s txg %llu; " 3777 "bm=%llu/%llu/%llu/%llu", 3778 spa->spa_name, 3779 (longlong_t)tx->tx_txg, 3780 (longlong_t)scnp->scn_bookmark.zb_objset, 3781 (longlong_t)scnp->scn_bookmark.zb_object, 3782 (longlong_t)scnp->scn_bookmark.zb_level, 3783 (longlong_t)scnp->scn_bookmark.zb_blkid); 3784 } 3785 3786 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3787 NULL, ZIO_FLAG_CANFAIL); 3788 3789 scn->scn_prefetch_stop = B_FALSE; 3790 prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, 3791 dsl_scan_prefetch_thread, scn, TQ_SLEEP); 3792 ASSERT(prefetch_tqid != TASKQID_INVALID); 3793 3794 dsl_pool_config_enter(dp, FTAG); 3795 dsl_scan_visit(scn, tx); 3796 dsl_pool_config_exit(dp, FTAG); 3797 3798 mutex_enter(&dp->dp_spa->spa_scrub_lock); 3799 scn->scn_prefetch_stop = B_TRUE; 3800 cv_broadcast(&spa->spa_scrub_io_cv); 3801 mutex_exit(&dp->dp_spa->spa_scrub_lock); 3802 3803 taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); 3804 (void) zio_wait(scn->scn_zio_root); 3805 scn->scn_zio_root = NULL; 3806 3807 zfs_dbgmsg("scan visited %llu blocks of %s in %llums " 3808 "(%llu os's, %llu holes, %llu < mintxg, " 3809 "%llu in ddt, %llu > maxtxg)", 3810 (longlong_t)scn->scn_visited_this_txg, 3811 spa->spa_name, 3812 (longlong_t)NSEC2MSEC(gethrtime() - 3813 scn->scn_sync_start_time), 3814 (longlong_t)scn->scn_objsets_visited_this_txg, 3815 (longlong_t)scn->scn_holes_this_txg, 3816 (longlong_t)scn->scn_lt_min_this_txg, 3817 (longlong_t)scn->scn_ddt_contained_this_txg, 3818 (longlong_t)scn->scn_gt_max_this_txg); 3819 3820 if (!scn->scn_suspending) { 3821 ASSERT0(avl_numnodes(&scn->scn_queue)); 3822 scn->scn_done_txg = tx->tx_txg + 1; 3823 if (scn->scn_is_sorted) { 3824 scn->scn_checkpointing = B_TRUE; 3825 scn->scn_clearing = B_TRUE; 3826 scn->scn_issued_before_pass += 3827 spa->spa_scan_pass_issued; 3828 spa_scan_stat_init(spa); 3829 } 3830 zfs_dbgmsg("scan complete for %s txg %llu", 3831 spa->spa_name, 3832 (longlong_t)tx->tx_txg); 3833 } 3834 } else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) { 3835 ASSERT(scn->scn_clearing); 3836 3837 /* need to issue scrubbing IOs from per-vdev queues */ 3838 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3839 NULL, ZIO_FLAG_CANFAIL); 3840 scan_io_queues_run(scn); 3841 (void) zio_wait(scn->scn_zio_root); 3842 scn->scn_zio_root = NULL; 3843 3844 /* calculate and dprintf the current memory usage */ 3845 (void) dsl_scan_should_clear(scn); 3846 dsl_scan_update_stats(scn); 3847 3848 zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) " 3849 "in %llums (avg_block_size = %llu, avg_seg_size = %llu)", 3850 (longlong_t)scn->scn_zios_this_txg, 3851 spa->spa_name, 3852 (longlong_t)scn->scn_segs_this_txg, 3853 (longlong_t)NSEC2MSEC(gethrtime() - 3854 scn->scn_sync_start_time), 3855 (longlong_t)scn->scn_avg_zio_size_this_txg, 3856 (longlong_t)scn->scn_avg_seg_size_this_txg); 3857 } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { 3858 /* Finished with everything. Mark the scrub as complete */ 3859 zfs_dbgmsg("scan issuing complete txg %llu for %s", 3860 (longlong_t)tx->tx_txg, 3861 spa->spa_name); 3862 ASSERT3U(scn->scn_done_txg, !=, 0); 3863 ASSERT0(spa->spa_scrub_inflight); 3864 ASSERT0(scn->scn_queues_pending); 3865 dsl_scan_done(scn, B_TRUE, tx); 3866 sync_type = SYNC_MANDATORY; 3867 } 3868 3869 dsl_scan_sync_state(scn, tx, sync_type); 3870 } 3871 3872 static void 3873 count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all) 3874 { 3875 /* 3876 * Don't count embedded bp's, since we already did the work of 3877 * scanning these when we scanned the containing block. 3878 */ 3879 if (BP_IS_EMBEDDED(bp)) 3880 return; 3881 3882 /* 3883 * Update the spa's stats on how many bytes we have issued. 3884 * Sequential scrubs create a zio for each DVA of the bp. Each 3885 * of these will include all DVAs for repair purposes, but the 3886 * zio code will only try the first one unless there is an issue. 3887 * Therefore, we should only count the first DVA for these IOs. 3888 */ 3889 atomic_add_64(&spa->spa_scan_pass_issued, 3890 all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0])); 3891 } 3892 3893 static void 3894 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 3895 { 3896 /* 3897 * If we resume after a reboot, zab will be NULL; don't record 3898 * incomplete stats in that case. 3899 */ 3900 if (zab == NULL) 3901 return; 3902 3903 for (int i = 0; i < 4; i++) { 3904 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 3905 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 3906 3907 if (t & DMU_OT_NEWTYPE) 3908 t = DMU_OT_OTHER; 3909 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 3910 int equal; 3911 3912 zb->zb_count++; 3913 zb->zb_asize += BP_GET_ASIZE(bp); 3914 zb->zb_lsize += BP_GET_LSIZE(bp); 3915 zb->zb_psize += BP_GET_PSIZE(bp); 3916 zb->zb_gangs += BP_COUNT_GANG(bp); 3917 3918 switch (BP_GET_NDVAS(bp)) { 3919 case 2: 3920 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 3921 DVA_GET_VDEV(&bp->blk_dva[1])) 3922 zb->zb_ditto_2_of_2_samevdev++; 3923 break; 3924 case 3: 3925 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 3926 DVA_GET_VDEV(&bp->blk_dva[1])) + 3927 (DVA_GET_VDEV(&bp->blk_dva[0]) == 3928 DVA_GET_VDEV(&bp->blk_dva[2])) + 3929 (DVA_GET_VDEV(&bp->blk_dva[1]) == 3930 DVA_GET_VDEV(&bp->blk_dva[2])); 3931 if (equal == 1) 3932 zb->zb_ditto_2_of_3_samevdev++; 3933 else if (equal == 3) 3934 zb->zb_ditto_3_of_3_samevdev++; 3935 break; 3936 } 3937 } 3938 } 3939 3940 static void 3941 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) 3942 { 3943 avl_index_t idx; 3944 dsl_scan_t *scn = queue->q_scn; 3945 3946 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3947 3948 if (unlikely(avl_is_empty(&queue->q_sios_by_addr))) 3949 atomic_add_64(&scn->scn_queues_pending, 1); 3950 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { 3951 /* block is already scheduled for reading */ 3952 sio_free(sio); 3953 return; 3954 } 3955 avl_insert(&queue->q_sios_by_addr, sio, idx); 3956 queue->q_sio_memused += SIO_GET_MUSED(sio); 3957 range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), 3958 SIO_GET_ASIZE(sio)); 3959 } 3960 3961 /* 3962 * Given all the info we got from our metadata scanning process, we 3963 * construct a scan_io_t and insert it into the scan sorting queue. The 3964 * I/O must already be suitable for us to process. This is controlled 3965 * by dsl_scan_enqueue(). 3966 */ 3967 static void 3968 scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, 3969 int zio_flags, const zbookmark_phys_t *zb) 3970 { 3971 scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp)); 3972 3973 ASSERT0(BP_IS_GANG(bp)); 3974 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3975 3976 bp2sio(bp, sio, dva_i); 3977 sio->sio_flags = zio_flags; 3978 sio->sio_zb = *zb; 3979 3980 queue->q_last_ext_addr = -1; 3981 scan_io_queue_insert_impl(queue, sio); 3982 } 3983 3984 /* 3985 * Given a set of I/O parameters as discovered by the metadata traversal 3986 * process, attempts to place the I/O into the sorted queues (if allowed), 3987 * or immediately executes the I/O. 3988 */ 3989 static void 3990 dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 3991 const zbookmark_phys_t *zb) 3992 { 3993 spa_t *spa = dp->dp_spa; 3994 3995 ASSERT(!BP_IS_EMBEDDED(bp)); 3996 3997 /* 3998 * Gang blocks are hard to issue sequentially, so we just issue them 3999 * here immediately instead of queuing them. 4000 */ 4001 if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { 4002 scan_exec_io(dp, bp, zio_flags, zb, NULL); 4003 return; 4004 } 4005 4006 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 4007 dva_t dva; 4008 vdev_t *vdev; 4009 4010 dva = bp->blk_dva[i]; 4011 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); 4012 ASSERT(vdev != NULL); 4013 4014 mutex_enter(&vdev->vdev_scan_io_queue_lock); 4015 if (vdev->vdev_scan_io_queue == NULL) 4016 vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); 4017 ASSERT(dp->dp_scan != NULL); 4018 scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, 4019 i, zio_flags, zb); 4020 mutex_exit(&vdev->vdev_scan_io_queue_lock); 4021 } 4022 } 4023 4024 static int 4025 dsl_scan_scrub_cb(dsl_pool_t *dp, 4026 const blkptr_t *bp, const zbookmark_phys_t *zb) 4027 { 4028 dsl_scan_t *scn = dp->dp_scan; 4029 spa_t *spa = dp->dp_spa; 4030 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 4031 size_t psize = BP_GET_PSIZE(bp); 4032 boolean_t needs_io = B_FALSE; 4033 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 4034 4035 count_block(dp->dp_blkstats, bp); 4036 if (phys_birth <= scn->scn_phys.scn_min_txg || 4037 phys_birth >= scn->scn_phys.scn_max_txg) { 4038 count_block_issued(spa, bp, B_TRUE); 4039 return (0); 4040 } 4041 4042 /* Embedded BP's have phys_birth==0, so we reject them above. */ 4043 ASSERT(!BP_IS_EMBEDDED(bp)); 4044 4045 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 4046 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 4047 zio_flags |= ZIO_FLAG_SCRUB; 4048 needs_io = B_TRUE; 4049 } else { 4050 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 4051 zio_flags |= ZIO_FLAG_RESILVER; 4052 needs_io = B_FALSE; 4053 } 4054 4055 /* If it's an intent log block, failure is expected. */ 4056 if (zb->zb_level == ZB_ZIL_LEVEL) 4057 zio_flags |= ZIO_FLAG_SPECULATIVE; 4058 4059 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 4060 const dva_t *dva = &bp->blk_dva[d]; 4061 4062 /* 4063 * Keep track of how much data we've examined so that 4064 * zpool(8) status can make useful progress reports. 4065 */ 4066 uint64_t asize = DVA_GET_ASIZE(dva); 4067 scn->scn_phys.scn_examined += asize; 4068 spa->spa_scan_pass_exam += asize; 4069 4070 /* if it's a resilver, this may not be in the target range */ 4071 if (!needs_io) 4072 needs_io = dsl_scan_need_resilver(spa, dva, psize, 4073 phys_birth); 4074 } 4075 4076 if (needs_io && !zfs_no_scrub_io) { 4077 dsl_scan_enqueue(dp, bp, zio_flags, zb); 4078 } else { 4079 count_block_issued(spa, bp, B_TRUE); 4080 } 4081 4082 /* do not relocate this block */ 4083 return (0); 4084 } 4085 4086 static void 4087 dsl_scan_scrub_done(zio_t *zio) 4088 { 4089 spa_t *spa = zio->io_spa; 4090 blkptr_t *bp = zio->io_bp; 4091 dsl_scan_io_queue_t *queue = zio->io_private; 4092 4093 abd_free(zio->io_abd); 4094 4095 if (queue == NULL) { 4096 mutex_enter(&spa->spa_scrub_lock); 4097 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 4098 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 4099 cv_broadcast(&spa->spa_scrub_io_cv); 4100 mutex_exit(&spa->spa_scrub_lock); 4101 } else { 4102 mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); 4103 ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); 4104 queue->q_inflight_bytes -= BP_GET_PSIZE(bp); 4105 cv_broadcast(&queue->q_zio_cv); 4106 mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); 4107 } 4108 4109 if (zio->io_error && (zio->io_error != ECKSUM || 4110 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 4111 atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors); 4112 } 4113 } 4114 4115 /* 4116 * Given a scanning zio's information, executes the zio. The zio need 4117 * not necessarily be only sortable, this function simply executes the 4118 * zio, no matter what it is. The optional queue argument allows the 4119 * caller to specify that they want per top level vdev IO rate limiting 4120 * instead of the legacy global limiting. 4121 */ 4122 static void 4123 scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 4124 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) 4125 { 4126 spa_t *spa = dp->dp_spa; 4127 dsl_scan_t *scn = dp->dp_scan; 4128 size_t size = BP_GET_PSIZE(bp); 4129 abd_t *data = abd_alloc_for_io(size, B_FALSE); 4130 zio_t *pio; 4131 4132 if (queue == NULL) { 4133 ASSERT3U(scn->scn_maxinflight_bytes, >, 0); 4134 mutex_enter(&spa->spa_scrub_lock); 4135 while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) 4136 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 4137 spa->spa_scrub_inflight += BP_GET_PSIZE(bp); 4138 mutex_exit(&spa->spa_scrub_lock); 4139 pio = scn->scn_zio_root; 4140 } else { 4141 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 4142 4143 ASSERT3U(queue->q_maxinflight_bytes, >, 0); 4144 mutex_enter(q_lock); 4145 while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) 4146 cv_wait(&queue->q_zio_cv, q_lock); 4147 queue->q_inflight_bytes += BP_GET_PSIZE(bp); 4148 pio = queue->q_zio; 4149 mutex_exit(q_lock); 4150 } 4151 4152 ASSERT(pio != NULL); 4153 count_block_issued(spa, bp, queue == NULL); 4154 zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done, 4155 queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); 4156 } 4157 4158 /* 4159 * This is the primary extent sorting algorithm. We balance two parameters: 4160 * 1) how many bytes of I/O are in an extent 4161 * 2) how well the extent is filled with I/O (as a fraction of its total size) 4162 * Since we allow extents to have gaps between their constituent I/Os, it's 4163 * possible to have a fairly large extent that contains the same amount of 4164 * I/O bytes than a much smaller extent, which just packs the I/O more tightly. 4165 * The algorithm sorts based on a score calculated from the extent's size, 4166 * the relative fill volume (in %) and a "fill weight" parameter that controls 4167 * the split between whether we prefer larger extents or more well populated 4168 * extents: 4169 * 4170 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) 4171 * 4172 * Example: 4173 * 1) assume extsz = 64 MiB 4174 * 2) assume fill = 32 MiB (extent is half full) 4175 * 3) assume fill_weight = 3 4176 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 4177 * SCORE = 32M + (50 * 3 * 32M) / 100 4178 * SCORE = 32M + (4800M / 100) 4179 * SCORE = 32M + 48M 4180 * ^ ^ 4181 * | +--- final total relative fill-based score 4182 * +--------- final total fill-based score 4183 * SCORE = 80M 4184 * 4185 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards 4186 * extents that are more completely filled (in a 3:2 ratio) vs just larger. 4187 * Note that as an optimization, we replace multiplication and division by 4188 * 100 with bitshifting by 7 (which effectively multiplies and divides by 128). 4189 * 4190 * Since we do not care if one extent is only few percent better than another, 4191 * compress the score into 6 bits via binary logarithm AKA highbit64() and 4192 * put into otherwise unused due to ashift high bits of offset. This allows 4193 * to reduce q_exts_by_size B-tree elements to only 64 bits and compare them 4194 * with single operation. Plus it makes scrubs more sequential and reduces 4195 * chances that minor extent change move it within the B-tree. 4196 */ 4197 static int 4198 ext_size_compare(const void *x, const void *y) 4199 { 4200 const uint64_t *a = x, *b = y; 4201 4202 return (TREE_CMP(*a, *b)); 4203 } 4204 4205 static void 4206 ext_size_create(range_tree_t *rt, void *arg) 4207 { 4208 (void) rt; 4209 zfs_btree_t *size_tree = arg; 4210 4211 zfs_btree_create(size_tree, ext_size_compare, sizeof (uint64_t)); 4212 } 4213 4214 static void 4215 ext_size_destroy(range_tree_t *rt, void *arg) 4216 { 4217 (void) rt; 4218 zfs_btree_t *size_tree = arg; 4219 ASSERT0(zfs_btree_numnodes(size_tree)); 4220 4221 zfs_btree_destroy(size_tree); 4222 } 4223 4224 static uint64_t 4225 ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg) 4226 { 4227 (void) rt; 4228 uint64_t size = rsg->rs_end - rsg->rs_start; 4229 uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) * 4230 fill_weight * rsg->rs_fill) >> 7); 4231 ASSERT3U(rt->rt_shift, >=, 8); 4232 return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start); 4233 } 4234 4235 static void 4236 ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg) 4237 { 4238 zfs_btree_t *size_tree = arg; 4239 ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); 4240 uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); 4241 zfs_btree_add(size_tree, &v); 4242 } 4243 4244 static void 4245 ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 4246 { 4247 zfs_btree_t *size_tree = arg; 4248 ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); 4249 uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); 4250 zfs_btree_remove(size_tree, &v); 4251 } 4252 4253 static void 4254 ext_size_vacate(range_tree_t *rt, void *arg) 4255 { 4256 zfs_btree_t *size_tree = arg; 4257 zfs_btree_clear(size_tree); 4258 zfs_btree_destroy(size_tree); 4259 4260 ext_size_create(rt, arg); 4261 } 4262 4263 static const range_tree_ops_t ext_size_ops = { 4264 .rtop_create = ext_size_create, 4265 .rtop_destroy = ext_size_destroy, 4266 .rtop_add = ext_size_add, 4267 .rtop_remove = ext_size_remove, 4268 .rtop_vacate = ext_size_vacate 4269 }; 4270 4271 /* 4272 * Comparator for the q_sios_by_addr tree. Sorting is simply performed 4273 * based on LBA-order (from lowest to highest). 4274 */ 4275 static int 4276 sio_addr_compare(const void *x, const void *y) 4277 { 4278 const scan_io_t *a = x, *b = y; 4279 4280 return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b))); 4281 } 4282 4283 /* IO queues are created on demand when they are needed. */ 4284 static dsl_scan_io_queue_t * 4285 scan_io_queue_create(vdev_t *vd) 4286 { 4287 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; 4288 dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); 4289 4290 q->q_scn = scn; 4291 q->q_vd = vd; 4292 q->q_sio_memused = 0; 4293 q->q_last_ext_addr = -1; 4294 cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); 4295 q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP, 4296 &q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap); 4297 avl_create(&q->q_sios_by_addr, sio_addr_compare, 4298 sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); 4299 4300 return (q); 4301 } 4302 4303 /* 4304 * Destroys a scan queue and all segments and scan_io_t's contained in it. 4305 * No further execution of I/O occurs, anything pending in the queue is 4306 * simply freed without being executed. 4307 */ 4308 void 4309 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) 4310 { 4311 dsl_scan_t *scn = queue->q_scn; 4312 scan_io_t *sio; 4313 void *cookie = NULL; 4314 4315 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 4316 4317 if (!avl_is_empty(&queue->q_sios_by_addr)) 4318 atomic_add_64(&scn->scn_queues_pending, -1); 4319 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != 4320 NULL) { 4321 ASSERT(range_tree_contains(queue->q_exts_by_addr, 4322 SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); 4323 queue->q_sio_memused -= SIO_GET_MUSED(sio); 4324 sio_free(sio); 4325 } 4326 4327 ASSERT0(queue->q_sio_memused); 4328 range_tree_vacate(queue->q_exts_by_addr, NULL, queue); 4329 range_tree_destroy(queue->q_exts_by_addr); 4330 avl_destroy(&queue->q_sios_by_addr); 4331 cv_destroy(&queue->q_zio_cv); 4332 4333 kmem_free(queue, sizeof (*queue)); 4334 } 4335 4336 /* 4337 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is 4338 * called on behalf of vdev_top_transfer when creating or destroying 4339 * a mirror vdev due to zpool attach/detach. 4340 */ 4341 void 4342 dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) 4343 { 4344 mutex_enter(&svd->vdev_scan_io_queue_lock); 4345 mutex_enter(&tvd->vdev_scan_io_queue_lock); 4346 4347 VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); 4348 tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; 4349 svd->vdev_scan_io_queue = NULL; 4350 if (tvd->vdev_scan_io_queue != NULL) 4351 tvd->vdev_scan_io_queue->q_vd = tvd; 4352 4353 mutex_exit(&tvd->vdev_scan_io_queue_lock); 4354 mutex_exit(&svd->vdev_scan_io_queue_lock); 4355 } 4356 4357 static void 4358 scan_io_queues_destroy(dsl_scan_t *scn) 4359 { 4360 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 4361 4362 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 4363 vdev_t *tvd = rvd->vdev_child[i]; 4364 4365 mutex_enter(&tvd->vdev_scan_io_queue_lock); 4366 if (tvd->vdev_scan_io_queue != NULL) 4367 dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); 4368 tvd->vdev_scan_io_queue = NULL; 4369 mutex_exit(&tvd->vdev_scan_io_queue_lock); 4370 } 4371 } 4372 4373 static void 4374 dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) 4375 { 4376 dsl_pool_t *dp = spa->spa_dsl_pool; 4377 dsl_scan_t *scn = dp->dp_scan; 4378 vdev_t *vdev; 4379 kmutex_t *q_lock; 4380 dsl_scan_io_queue_t *queue; 4381 scan_io_t *srch_sio, *sio; 4382 avl_index_t idx; 4383 uint64_t start, size; 4384 4385 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); 4386 ASSERT(vdev != NULL); 4387 q_lock = &vdev->vdev_scan_io_queue_lock; 4388 queue = vdev->vdev_scan_io_queue; 4389 4390 mutex_enter(q_lock); 4391 if (queue == NULL) { 4392 mutex_exit(q_lock); 4393 return; 4394 } 4395 4396 srch_sio = sio_alloc(BP_GET_NDVAS(bp)); 4397 bp2sio(bp, srch_sio, dva_i); 4398 start = SIO_GET_OFFSET(srch_sio); 4399 size = SIO_GET_ASIZE(srch_sio); 4400 4401 /* 4402 * We can find the zio in two states: 4403 * 1) Cold, just sitting in the queue of zio's to be issued at 4404 * some point in the future. In this case, all we do is 4405 * remove the zio from the q_sios_by_addr tree, decrement 4406 * its data volume from the containing range_seg_t and 4407 * resort the q_exts_by_size tree to reflect that the 4408 * range_seg_t has lost some of its 'fill'. We don't shorten 4409 * the range_seg_t - this is usually rare enough not to be 4410 * worth the extra hassle of trying keep track of precise 4411 * extent boundaries. 4412 * 2) Hot, where the zio is currently in-flight in 4413 * dsl_scan_issue_ios. In this case, we can't simply 4414 * reach in and stop the in-flight zio's, so we instead 4415 * block the caller. Eventually, dsl_scan_issue_ios will 4416 * be done with issuing the zio's it gathered and will 4417 * signal us. 4418 */ 4419 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); 4420 sio_free(srch_sio); 4421 4422 if (sio != NULL) { 4423 blkptr_t tmpbp; 4424 4425 /* Got it while it was cold in the queue */ 4426 ASSERT3U(start, ==, SIO_GET_OFFSET(sio)); 4427 ASSERT3U(size, ==, SIO_GET_ASIZE(sio)); 4428 avl_remove(&queue->q_sios_by_addr, sio); 4429 if (avl_is_empty(&queue->q_sios_by_addr)) 4430 atomic_add_64(&scn->scn_queues_pending, -1); 4431 queue->q_sio_memused -= SIO_GET_MUSED(sio); 4432 4433 ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); 4434 range_tree_remove_fill(queue->q_exts_by_addr, start, size); 4435 4436 /* count the block as though we issued it */ 4437 sio2bp(sio, &tmpbp); 4438 count_block_issued(spa, &tmpbp, B_FALSE); 4439 4440 sio_free(sio); 4441 } 4442 mutex_exit(q_lock); 4443 } 4444 4445 /* 4446 * Callback invoked when a zio_free() zio is executing. This needs to be 4447 * intercepted to prevent the zio from deallocating a particular portion 4448 * of disk space and it then getting reallocated and written to, while we 4449 * still have it queued up for processing. 4450 */ 4451 void 4452 dsl_scan_freed(spa_t *spa, const blkptr_t *bp) 4453 { 4454 dsl_pool_t *dp = spa->spa_dsl_pool; 4455 dsl_scan_t *scn = dp->dp_scan; 4456 4457 ASSERT(!BP_IS_EMBEDDED(bp)); 4458 ASSERT(scn != NULL); 4459 if (!dsl_scan_is_running(scn)) 4460 return; 4461 4462 for (int i = 0; i < BP_GET_NDVAS(bp); i++) 4463 dsl_scan_freed_dva(spa, bp, i); 4464 } 4465 4466 /* 4467 * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has 4468 * not started, start it. Otherwise, only restart if max txg in DTL range is 4469 * greater than the max txg in the current scan. If the DTL max is less than 4470 * the scan max, then the vdev has not missed any new data since the resilver 4471 * started, so a restart is not needed. 4472 */ 4473 void 4474 dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd) 4475 { 4476 uint64_t min, max; 4477 4478 if (!vdev_resilver_needed(vd, &min, &max)) 4479 return; 4480 4481 if (!dsl_scan_resilvering(dp)) { 4482 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); 4483 return; 4484 } 4485 4486 if (max <= dp->dp_scan->scn_phys.scn_max_txg) 4487 return; 4488 4489 /* restart is needed, check if it can be deferred */ 4490 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) 4491 vdev_defer_resilver(vd); 4492 else 4493 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); 4494 } 4495 4496 ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW, 4497 "Max bytes in flight per leaf vdev for scrubs and resilvers"); 4498 4499 ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW, 4500 "Min millisecs to scrub per txg"); 4501 4502 ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW, 4503 "Min millisecs to obsolete per txg"); 4504 4505 ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW, 4506 "Min millisecs to free per txg"); 4507 4508 ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW, 4509 "Min millisecs to resilver per txg"); 4510 4511 ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW, 4512 "Set to prevent scans from progressing"); 4513 4514 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW, 4515 "Set to disable scrub I/O"); 4516 4517 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW, 4518 "Set to disable scrub prefetching"); 4519 4520 ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW, 4521 "Max number of blocks freed in one txg"); 4522 4523 ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW, 4524 "Max number of dedup blocks freed in one txg"); 4525 4526 ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW, 4527 "Enable processing of the free_bpobj"); 4528 4529 ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW, 4530 "Enable block statistics calculation during scrub"); 4531 4532 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW, 4533 "Fraction of RAM for scan hard limit"); 4534 4535 ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW, 4536 "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size"); 4537 4538 ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW, 4539 "Scrub using legacy non-sequential method"); 4540 4541 ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW, 4542 "Scan progress on-disk checkpointing interval"); 4543 4544 ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW, 4545 "Max gap in bytes between sequential scrub / resilver I/Os"); 4546 4547 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW, 4548 "Fraction of hard limit used as soft limit"); 4549 4550 ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW, 4551 "Tunable to attempt to reduce lock contention"); 4552 4553 ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW, 4554 "Tunable to adjust bias towards more filled segments during scans"); 4555 4556 ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW, 4557 "Tunable to report resilver performance over the last N txgs"); 4558 4559 ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW, 4560 "Process all resilvers immediately"); 4561