1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright 2016 Gary Mills 25 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 27 * Copyright 2019 Joyent, Inc. 28 */ 29 30 #include <sys/dsl_scan.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dsl_dataset.h> 33 #include <sys/dsl_prop.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dsl_synctask.h> 36 #include <sys/dnode.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dmu_objset.h> 39 #include <sys/arc.h> 40 #include <sys/zap.h> 41 #include <sys/zio.h> 42 #include <sys/zfs_context.h> 43 #include <sys/fs/zfs.h> 44 #include <sys/zfs_znode.h> 45 #include <sys/spa_impl.h> 46 #include <sys/vdev_impl.h> 47 #include <sys/zil_impl.h> 48 #include <sys/zio_checksum.h> 49 #include <sys/ddt.h> 50 #include <sys/sa.h> 51 #include <sys/sa_impl.h> 52 #include <sys/zfeature.h> 53 #include <sys/abd.h> 54 #include <sys/range_tree.h> 55 #ifdef _KERNEL 56 #include <sys/zfs_vfsops.h> 57 #endif 58 59 /* 60 * Grand theory statement on scan queue sorting 61 * 62 * Scanning is implemented by recursively traversing all indirection levels 63 * in an object and reading all blocks referenced from said objects. This 64 * results in us approximately traversing the object from lowest logical 65 * offset to the highest. For best performance, we would want the logical 66 * blocks to be physically contiguous. However, this is frequently not the 67 * case with pools given the allocation patterns of copy-on-write filesystems. 68 * So instead, we put the I/Os into a reordering queue and issue them in a 69 * way that will most benefit physical disks (LBA-order). 70 * 71 * Queue management: 72 * 73 * Ideally, we would want to scan all metadata and queue up all block I/O 74 * prior to starting to issue it, because that allows us to do an optimal 75 * sorting job. This can however consume large amounts of memory. Therefore 76 * we continuously monitor the size of the queues and constrain them to 5% 77 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this 78 * limit, we clear out a few of the largest extents at the head of the queues 79 * to make room for more scanning. Hopefully, these extents will be fairly 80 * large and contiguous, allowing us to approach sequential I/O throughput 81 * even without a fully sorted tree. 82 * 83 * Metadata scanning takes place in dsl_scan_visit(), which is called from 84 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all 85 * metadata on the pool, or we need to make room in memory because our 86 * queues are too large, dsl_scan_visit() is postponed and 87 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies 88 * that metadata scanning and queued I/O issuing are mutually exclusive. This 89 * allows us to provide maximum sequential I/O throughput for the majority of 90 * I/O's issued since sequential I/O performance is significantly negatively 91 * impacted if it is interleaved with random I/O. 92 * 93 * Implementation Notes 94 * 95 * One side effect of the queued scanning algorithm is that the scanning code 96 * needs to be notified whenever a block is freed. This is needed to allow 97 * the scanning code to remove these I/Os from the issuing queue. Additionally, 98 * we do not attempt to queue gang blocks to be issued sequentially since this 99 * is very hard to do and would have an extremely limited performance benefit. 100 * Instead, we simply issue gang I/Os as soon as we find them using the legacy 101 * algorithm. 102 * 103 * Backwards compatibility 104 * 105 * This new algorithm is backwards compatible with the legacy on-disk data 106 * structures (and therefore does not require a new feature flag). 107 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan 108 * will stop scanning metadata (in logical order) and wait for all outstanding 109 * sorted I/O to complete. Once this is done, we write out a checkpoint 110 * bookmark, indicating that we have scanned everything logically before it. 111 * If the pool is imported on a machine without the new sorting algorithm, 112 * the scan simply resumes from the last checkpoint using the legacy algorithm. 113 */ 114 115 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 116 const zbookmark_phys_t *); 117 118 static scan_cb_t dsl_scan_scrub_cb; 119 120 static int scan_ds_queue_compare(const void *a, const void *b); 121 static int scan_prefetch_queue_compare(const void *a, const void *b); 122 static void scan_ds_queue_clear(dsl_scan_t *scn); 123 static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn); 124 static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, 125 uint64_t *txg); 126 static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); 127 static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); 128 static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); 129 static uint64_t dsl_scan_count_leaves(vdev_t *vd); 130 131 extern int zfs_vdev_async_write_active_min_dirty_percent; 132 133 /* 134 * By default zfs will check to ensure it is not over the hard memory 135 * limit before each txg. If finer-grained control of this is needed 136 * this value can be set to 1 to enable checking before scanning each 137 * block. 138 */ 139 int zfs_scan_strict_mem_lim = B_FALSE; 140 141 /* 142 * Maximum number of parallelly executed bytes per leaf vdev. We attempt 143 * to strike a balance here between keeping the vdev queues full of I/Os 144 * at all times and not overflowing the queues to cause long latency, 145 * which would cause long txg sync times. No matter what, we will not 146 * overload the drives with I/O, since that is protected by 147 * zfs_vdev_scrub_max_active. 148 */ 149 unsigned long zfs_scan_vdev_limit = 4 << 20; 150 151 int zfs_scan_issue_strategy = 0; 152 int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ 153 unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ 154 155 /* 156 * fill_weight is non-tunable at runtime, so we copy it at module init from 157 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would 158 * break queue sorting. 159 */ 160 int zfs_scan_fill_weight = 3; 161 static uint64_t fill_weight; 162 163 /* See dsl_scan_should_clear() for details on the memory limit tunables */ 164 uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ 165 uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ 166 int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ 167 int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ 168 169 int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */ 170 int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */ 171 int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ 172 int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */ 173 int zfs_scan_checkpoint_intval = 7200; /* in seconds */ 174 int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ 175 int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 176 int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 177 enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 178 /* max number of blocks to free in a single TXG */ 179 unsigned long zfs_async_block_max_blocks = ULONG_MAX; 180 /* max number of dedup blocks to free in a single TXG */ 181 unsigned long zfs_max_async_dedup_frees = 100000; 182 183 int zfs_resilver_disable_defer = 0; /* set to disable resilver deferring */ 184 185 /* 186 * We wait a few txgs after importing a pool to begin scanning so that 187 * the import / mounting code isn't held up by scrub / resilver IO. 188 * Unfortunately, it is a bit difficult to determine exactly how long 189 * this will take since userspace will trigger fs mounts asynchronously 190 * and the kernel will create zvol minors asynchronously. As a result, 191 * the value provided here is a bit arbitrary, but represents a 192 * reasonable estimate of how many txgs it will take to finish fully 193 * importing a pool 194 */ 195 #define SCAN_IMPORT_WAIT_TXGS 5 196 197 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 198 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 199 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 200 201 /* 202 * Enable/disable the processing of the free_bpobj object. 203 */ 204 int zfs_free_bpobj_enabled = 1; 205 206 /* the order has to match pool_scan_type */ 207 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 208 NULL, 209 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 210 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 211 }; 212 213 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ 214 typedef struct { 215 uint64_t sds_dsobj; 216 uint64_t sds_txg; 217 avl_node_t sds_node; 218 } scan_ds_t; 219 220 /* 221 * This controls what conditions are placed on dsl_scan_sync_state(): 222 * SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0 223 * SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0. 224 * SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise 225 * write out the scn_phys_cached version. 226 * See dsl_scan_sync_state for details. 227 */ 228 typedef enum { 229 SYNC_OPTIONAL, 230 SYNC_MANDATORY, 231 SYNC_CACHED 232 } state_sync_type_t; 233 234 /* 235 * This struct represents the minimum information needed to reconstruct a 236 * zio for sequential scanning. This is useful because many of these will 237 * accumulate in the sequential IO queues before being issued, so saving 238 * memory matters here. 239 */ 240 typedef struct scan_io { 241 /* fields from blkptr_t */ 242 uint64_t sio_blk_prop; 243 uint64_t sio_phys_birth; 244 uint64_t sio_birth; 245 zio_cksum_t sio_cksum; 246 uint32_t sio_nr_dvas; 247 248 /* fields from zio_t */ 249 uint32_t sio_flags; 250 zbookmark_phys_t sio_zb; 251 252 /* members for queue sorting */ 253 union { 254 avl_node_t sio_addr_node; /* link into issuing queue */ 255 list_node_t sio_list_node; /* link for issuing to disk */ 256 } sio_nodes; 257 258 /* 259 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp, 260 * depending on how many were in the original bp. Only the 261 * first DVA is really used for sorting and issuing purposes. 262 * The other DVAs (if provided) simply exist so that the zio 263 * layer can find additional copies to repair from in the 264 * event of an error. This array must go at the end of the 265 * struct to allow this for the variable number of elements. 266 */ 267 dva_t sio_dva[0]; 268 } scan_io_t; 269 270 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x) 271 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x) 272 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0]) 273 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0]) 274 #define SIO_GET_END_OFFSET(sio) \ 275 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio)) 276 #define SIO_GET_MUSED(sio) \ 277 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t))) 278 279 struct dsl_scan_io_queue { 280 dsl_scan_t *q_scn; /* associated dsl_scan_t */ 281 vdev_t *q_vd; /* top-level vdev that this queue represents */ 282 283 /* trees used for sorting I/Os and extents of I/Os */ 284 range_tree_t *q_exts_by_addr; 285 zfs_btree_t q_exts_by_size; 286 avl_tree_t q_sios_by_addr; 287 uint64_t q_sio_memused; 288 289 /* members for zio rate limiting */ 290 uint64_t q_maxinflight_bytes; 291 uint64_t q_inflight_bytes; 292 kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ 293 294 /* per txg statistics */ 295 uint64_t q_total_seg_size_this_txg; 296 uint64_t q_segs_this_txg; 297 uint64_t q_total_zio_size_this_txg; 298 uint64_t q_zios_this_txg; 299 }; 300 301 /* private data for dsl_scan_prefetch_cb() */ 302 typedef struct scan_prefetch_ctx { 303 zfs_refcount_t spc_refcnt; /* refcount for memory management */ 304 dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ 305 boolean_t spc_root; /* is this prefetch for an objset? */ 306 uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ 307 uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ 308 } scan_prefetch_ctx_t; 309 310 /* private data for dsl_scan_prefetch() */ 311 typedef struct scan_prefetch_issue_ctx { 312 avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ 313 scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ 314 blkptr_t spic_bp; /* bp to prefetch */ 315 zbookmark_phys_t spic_zb; /* bookmark to prefetch */ 316 } scan_prefetch_issue_ctx_t; 317 318 static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 319 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); 320 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, 321 scan_io_t *sio); 322 323 static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); 324 static void scan_io_queues_destroy(dsl_scan_t *scn); 325 326 static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP]; 327 328 /* sio->sio_nr_dvas must be set so we know which cache to free from */ 329 static void 330 sio_free(scan_io_t *sio) 331 { 332 ASSERT3U(sio->sio_nr_dvas, >, 0); 333 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); 334 335 kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio); 336 } 337 338 /* It is up to the caller to set sio->sio_nr_dvas for freeing */ 339 static scan_io_t * 340 sio_alloc(unsigned short nr_dvas) 341 { 342 ASSERT3U(nr_dvas, >, 0); 343 ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP); 344 345 return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP)); 346 } 347 348 void 349 scan_init(void) 350 { 351 /* 352 * This is used in ext_size_compare() to weight segments 353 * based on how sparse they are. This cannot be changed 354 * mid-scan and the tree comparison functions don't currently 355 * have a mechanism for passing additional context to the 356 * compare functions. Thus we store this value globally and 357 * we only allow it to be set at module initialization time 358 */ 359 fill_weight = zfs_scan_fill_weight; 360 361 for (int i = 0; i < SPA_DVAS_PER_BP; i++) { 362 char name[36]; 363 364 (void) snprintf(name, sizeof (name), "sio_cache_%d", i); 365 sio_cache[i] = kmem_cache_create(name, 366 (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))), 367 0, NULL, NULL, NULL, NULL, NULL, 0); 368 } 369 } 370 371 void 372 scan_fini(void) 373 { 374 for (int i = 0; i < SPA_DVAS_PER_BP; i++) { 375 kmem_cache_destroy(sio_cache[i]); 376 } 377 } 378 379 static inline boolean_t 380 dsl_scan_is_running(const dsl_scan_t *scn) 381 { 382 return (scn->scn_phys.scn_state == DSS_SCANNING); 383 } 384 385 boolean_t 386 dsl_scan_resilvering(dsl_pool_t *dp) 387 { 388 return (dsl_scan_is_running(dp->dp_scan) && 389 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 390 } 391 392 static inline void 393 sio2bp(const scan_io_t *sio, blkptr_t *bp) 394 { 395 bzero(bp, sizeof (*bp)); 396 bp->blk_prop = sio->sio_blk_prop; 397 bp->blk_phys_birth = sio->sio_phys_birth; 398 bp->blk_birth = sio->sio_birth; 399 bp->blk_fill = 1; /* we always only work with data pointers */ 400 bp->blk_cksum = sio->sio_cksum; 401 402 ASSERT3U(sio->sio_nr_dvas, >, 0); 403 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); 404 405 bcopy(sio->sio_dva, bp->blk_dva, sio->sio_nr_dvas * sizeof (dva_t)); 406 } 407 408 static inline void 409 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) 410 { 411 sio->sio_blk_prop = bp->blk_prop; 412 sio->sio_phys_birth = bp->blk_phys_birth; 413 sio->sio_birth = bp->blk_birth; 414 sio->sio_cksum = bp->blk_cksum; 415 sio->sio_nr_dvas = BP_GET_NDVAS(bp); 416 417 /* 418 * Copy the DVAs to the sio. We need all copies of the block so 419 * that the self healing code can use the alternate copies if the 420 * first is corrupted. We want the DVA at index dva_i to be first 421 * in the sio since this is the primary one that we want to issue. 422 */ 423 for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) { 424 sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas]; 425 } 426 } 427 428 int 429 dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 430 { 431 int err; 432 dsl_scan_t *scn; 433 spa_t *spa = dp->dp_spa; 434 uint64_t f; 435 436 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 437 scn->scn_dp = dp; 438 439 /* 440 * It's possible that we're resuming a scan after a reboot so 441 * make sure that the scan_async_destroying flag is initialized 442 * appropriately. 443 */ 444 ASSERT(!scn->scn_async_destroying); 445 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 446 SPA_FEATURE_ASYNC_DESTROY); 447 448 /* 449 * Calculate the max number of in-flight bytes for pool-wide 450 * scanning operations (minimum 1MB). Limits for the issuing 451 * phase are done per top-level vdev and are handled separately. 452 */ 453 scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit * 454 dsl_scan_count_leaves(spa->spa_root_vdev), 1ULL << 20); 455 456 avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), 457 offsetof(scan_ds_t, sds_node)); 458 avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, 459 sizeof (scan_prefetch_issue_ctx_t), 460 offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); 461 462 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 463 "scrub_func", sizeof (uint64_t), 1, &f); 464 if (err == 0) { 465 /* 466 * There was an old-style scrub in progress. Restart a 467 * new-style scrub from the beginning. 468 */ 469 scn->scn_restart_txg = txg; 470 zfs_dbgmsg("old-style scrub was in progress; " 471 "restarting new-style scrub in txg %llu", 472 (longlong_t)scn->scn_restart_txg); 473 474 /* 475 * Load the queue obj from the old location so that it 476 * can be freed by dsl_scan_done(). 477 */ 478 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 479 "scrub_queue", sizeof (uint64_t), 1, 480 &scn->scn_phys.scn_queue_obj); 481 } else { 482 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 483 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 484 &scn->scn_phys); 485 /* 486 * Detect if the pool contains the signature of #2094. If it 487 * does properly update the scn->scn_phys structure and notify 488 * the administrator by setting an errata for the pool. 489 */ 490 if (err == EOVERFLOW) { 491 uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; 492 VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); 493 VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, 494 (23 * sizeof (uint64_t))); 495 496 err = zap_lookup(dp->dp_meta_objset, 497 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, 498 sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); 499 if (err == 0) { 500 uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; 501 502 if (overflow & ~DSL_SCAN_FLAGS_MASK || 503 scn->scn_async_destroying) { 504 spa->spa_errata = 505 ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; 506 return (EOVERFLOW); 507 } 508 509 bcopy(zaptmp, &scn->scn_phys, 510 SCAN_PHYS_NUMINTS * sizeof (uint64_t)); 511 scn->scn_phys.scn_flags = overflow; 512 513 /* Required scrub already in progress. */ 514 if (scn->scn_phys.scn_state == DSS_FINISHED || 515 scn->scn_phys.scn_state == DSS_CANCELED) 516 spa->spa_errata = 517 ZPOOL_ERRATA_ZOL_2094_SCRUB; 518 } 519 } 520 521 if (err == ENOENT) 522 return (0); 523 else if (err) 524 return (err); 525 526 /* 527 * We might be restarting after a reboot, so jump the issued 528 * counter to how far we've scanned. We know we're consistent 529 * up to here. 530 */ 531 scn->scn_issued_before_pass = scn->scn_phys.scn_examined; 532 533 if (dsl_scan_is_running(scn) && 534 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 535 /* 536 * A new-type scrub was in progress on an old 537 * pool, and the pool was accessed by old 538 * software. Restart from the beginning, since 539 * the old software may have changed the pool in 540 * the meantime. 541 */ 542 scn->scn_restart_txg = txg; 543 zfs_dbgmsg("new-style scrub was modified " 544 "by old software; restarting in txg %llu", 545 (longlong_t)scn->scn_restart_txg); 546 } else if (dsl_scan_resilvering(dp)) { 547 /* 548 * If a resilver is in progress and there are already 549 * errors, restart it instead of finishing this scan and 550 * then restarting it. If there haven't been any errors 551 * then remember that the incore DTL is valid. 552 */ 553 if (scn->scn_phys.scn_errors > 0) { 554 scn->scn_restart_txg = txg; 555 zfs_dbgmsg("resilver can't excise DTL_MISSING " 556 "when finished; restarting in txg %llu", 557 (u_longlong_t)scn->scn_restart_txg); 558 } else { 559 /* it's safe to excise DTL when finished */ 560 spa->spa_scrub_started = B_TRUE; 561 } 562 } 563 } 564 565 bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); 566 567 /* reload the queue into the in-core state */ 568 if (scn->scn_phys.scn_queue_obj != 0) { 569 zap_cursor_t zc; 570 zap_attribute_t za; 571 572 for (zap_cursor_init(&zc, dp->dp_meta_objset, 573 scn->scn_phys.scn_queue_obj); 574 zap_cursor_retrieve(&zc, &za) == 0; 575 (void) zap_cursor_advance(&zc)) { 576 scan_ds_queue_insert(scn, 577 zfs_strtonum(za.za_name, NULL), 578 za.za_first_integer); 579 } 580 zap_cursor_fini(&zc); 581 } 582 583 spa_scan_stat_init(spa); 584 return (0); 585 } 586 587 void 588 dsl_scan_fini(dsl_pool_t *dp) 589 { 590 if (dp->dp_scan != NULL) { 591 dsl_scan_t *scn = dp->dp_scan; 592 593 if (scn->scn_taskq != NULL) 594 taskq_destroy(scn->scn_taskq); 595 596 scan_ds_queue_clear(scn); 597 avl_destroy(&scn->scn_queue); 598 scan_ds_prefetch_queue_clear(scn); 599 avl_destroy(&scn->scn_prefetch_queue); 600 601 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 602 dp->dp_scan = NULL; 603 } 604 } 605 606 static boolean_t 607 dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) 608 { 609 return (scn->scn_restart_txg != 0 && 610 scn->scn_restart_txg <= tx->tx_txg); 611 } 612 613 boolean_t 614 dsl_scan_resilver_scheduled(dsl_pool_t *dp) 615 { 616 return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) || 617 (spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER)); 618 } 619 620 boolean_t 621 dsl_scan_scrubbing(const dsl_pool_t *dp) 622 { 623 dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; 624 625 return (scn_phys->scn_state == DSS_SCANNING && 626 scn_phys->scn_func == POOL_SCAN_SCRUB); 627 } 628 629 boolean_t 630 dsl_scan_is_paused_scrub(const dsl_scan_t *scn) 631 { 632 return (dsl_scan_scrubbing(scn->scn_dp) && 633 scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); 634 } 635 636 /* 637 * Writes out a persistent dsl_scan_phys_t record to the pool directory. 638 * Because we can be running in the block sorting algorithm, we do not always 639 * want to write out the record, only when it is "safe" to do so. This safety 640 * condition is achieved by making sure that the sorting queues are empty 641 * (scn_bytes_pending == 0). When this condition is not true, the sync'd state 642 * is inconsistent with how much actual scanning progress has been made. The 643 * kind of sync to be performed is specified by the sync_type argument. If the 644 * sync is optional, we only sync if the queues are empty. If the sync is 645 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The 646 * third possible state is a "cached" sync. This is done in response to: 647 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been 648 * destroyed, so we wouldn't be able to restart scanning from it. 649 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been 650 * superseded by a newer snapshot. 651 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been 652 * swapped with its clone. 653 * In all cases, a cached sync simply rewrites the last record we've written, 654 * just slightly modified. For the modifications that are performed to the 655 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, 656 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. 657 */ 658 static void 659 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) 660 { 661 int i; 662 spa_t *spa = scn->scn_dp->dp_spa; 663 664 ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0); 665 if (scn->scn_bytes_pending == 0) { 666 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 667 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 668 dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; 669 670 if (q == NULL) 671 continue; 672 673 mutex_enter(&vd->vdev_scan_io_queue_lock); 674 ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); 675 ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==, 676 NULL); 677 ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); 678 mutex_exit(&vd->vdev_scan_io_queue_lock); 679 } 680 681 if (scn->scn_phys.scn_queue_obj != 0) 682 scan_ds_queue_sync(scn, tx); 683 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 684 DMU_POOL_DIRECTORY_OBJECT, 685 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 686 &scn->scn_phys, tx)); 687 bcopy(&scn->scn_phys, &scn->scn_phys_cached, 688 sizeof (scn->scn_phys)); 689 690 if (scn->scn_checkpointing) 691 zfs_dbgmsg("finish scan checkpoint"); 692 693 scn->scn_checkpointing = B_FALSE; 694 scn->scn_last_checkpoint = ddi_get_lbolt(); 695 } else if (sync_type == SYNC_CACHED) { 696 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 697 DMU_POOL_DIRECTORY_OBJECT, 698 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 699 &scn->scn_phys_cached, tx)); 700 } 701 } 702 703 /* ARGSUSED */ 704 static int 705 dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 706 { 707 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 708 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 709 710 if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd)) 711 return (SET_ERROR(EBUSY)); 712 713 return (0); 714 } 715 716 void 717 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 718 { 719 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 720 pool_scan_func_t *funcp = arg; 721 dmu_object_type_t ot = 0; 722 dsl_pool_t *dp = scn->scn_dp; 723 spa_t *spa = dp->dp_spa; 724 725 ASSERT(!dsl_scan_is_running(scn)); 726 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 727 bzero(&scn->scn_phys, sizeof (scn->scn_phys)); 728 scn->scn_phys.scn_func = *funcp; 729 scn->scn_phys.scn_state = DSS_SCANNING; 730 scn->scn_phys.scn_min_txg = 0; 731 scn->scn_phys.scn_max_txg = tx->tx_txg; 732 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 733 scn->scn_phys.scn_start_time = gethrestime_sec(); 734 scn->scn_phys.scn_errors = 0; 735 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 736 scn->scn_issued_before_pass = 0; 737 scn->scn_restart_txg = 0; 738 scn->scn_done_txg = 0; 739 scn->scn_last_checkpoint = 0; 740 scn->scn_checkpointing = B_FALSE; 741 spa_scan_stat_init(spa); 742 743 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 744 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 745 746 /* rewrite all disk labels */ 747 vdev_config_dirty(spa->spa_root_vdev); 748 749 if (vdev_resilver_needed(spa->spa_root_vdev, 750 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 751 nvlist_t *aux = fnvlist_alloc(); 752 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, 753 "healing"); 754 spa_event_notify(spa, NULL, aux, 755 ESC_ZFS_RESILVER_START); 756 nvlist_free(aux); 757 } else { 758 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); 759 } 760 761 spa->spa_scrub_started = B_TRUE; 762 /* 763 * If this is an incremental scrub, limit the DDT scrub phase 764 * to just the auto-ditto class (for correctness); the rest 765 * of the scrub should go faster using top-down pruning. 766 */ 767 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 768 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 769 770 /* 771 * When starting a resilver clear any existing rebuild state. 772 * This is required to prevent stale rebuild status from 773 * being reported when a rebuild is run, then a resilver and 774 * finally a scrub. In which case only the scrub status 775 * should be reported by 'zpool status'. 776 */ 777 if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) { 778 vdev_t *rvd = spa->spa_root_vdev; 779 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 780 vdev_t *vd = rvd->vdev_child[i]; 781 vdev_rebuild_clear_sync( 782 (void *)(uintptr_t)vd->vdev_id, tx); 783 } 784 } 785 } 786 787 /* back to the generic stuff */ 788 789 if (dp->dp_blkstats == NULL) { 790 dp->dp_blkstats = 791 vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 792 mutex_init(&dp->dp_blkstats->zab_lock, NULL, 793 MUTEX_DEFAULT, NULL); 794 } 795 bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type)); 796 797 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 798 ot = DMU_OT_ZAP_OTHER; 799 800 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 801 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 802 803 bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys)); 804 805 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 806 807 spa_history_log_internal(spa, "scan setup", tx, 808 "func=%u mintxg=%llu maxtxg=%llu", 809 *funcp, (u_longlong_t)scn->scn_phys.scn_min_txg, 810 (u_longlong_t)scn->scn_phys.scn_max_txg); 811 } 812 813 /* 814 * Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver. 815 * Can also be called to resume a paused scrub. 816 */ 817 int 818 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 819 { 820 spa_t *spa = dp->dp_spa; 821 dsl_scan_t *scn = dp->dp_scan; 822 823 /* 824 * Purge all vdev caches and probe all devices. We do this here 825 * rather than in sync context because this requires a writer lock 826 * on the spa_config lock, which we can't do from sync context. The 827 * spa_scrub_reopen flag indicates that vdev_open() should not 828 * attempt to start another scrub. 829 */ 830 spa_vdev_state_enter(spa, SCL_NONE); 831 spa->spa_scrub_reopen = B_TRUE; 832 vdev_reopen(spa->spa_root_vdev); 833 spa->spa_scrub_reopen = B_FALSE; 834 (void) spa_vdev_state_exit(spa, NULL, 0); 835 836 if (func == POOL_SCAN_RESILVER) { 837 dsl_scan_restart_resilver(spa->spa_dsl_pool, 0); 838 return (0); 839 } 840 841 if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { 842 /* got scrub start cmd, resume paused scrub */ 843 int err = dsl_scrub_set_pause_resume(scn->scn_dp, 844 POOL_SCRUB_NORMAL); 845 if (err == 0) { 846 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); 847 return (SET_ERROR(ECANCELED)); 848 } 849 850 return (SET_ERROR(err)); 851 } 852 853 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 854 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); 855 } 856 857 /* ARGSUSED */ 858 static void 859 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 860 { 861 static const char *old_names[] = { 862 "scrub_bookmark", 863 "scrub_ddt_bookmark", 864 "scrub_ddt_class_max", 865 "scrub_queue", 866 "scrub_min_txg", 867 "scrub_max_txg", 868 "scrub_func", 869 "scrub_errors", 870 NULL 871 }; 872 873 dsl_pool_t *dp = scn->scn_dp; 874 spa_t *spa = dp->dp_spa; 875 int i; 876 877 /* Remove any remnants of an old-style scrub. */ 878 for (i = 0; old_names[i]; i++) { 879 (void) zap_remove(dp->dp_meta_objset, 880 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 881 } 882 883 if (scn->scn_phys.scn_queue_obj != 0) { 884 VERIFY0(dmu_object_free(dp->dp_meta_objset, 885 scn->scn_phys.scn_queue_obj, tx)); 886 scn->scn_phys.scn_queue_obj = 0; 887 } 888 scan_ds_queue_clear(scn); 889 scan_ds_prefetch_queue_clear(scn); 890 891 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 892 893 /* 894 * If we were "restarted" from a stopped state, don't bother 895 * with anything else. 896 */ 897 if (!dsl_scan_is_running(scn)) { 898 ASSERT(!scn->scn_is_sorted); 899 return; 900 } 901 902 if (scn->scn_is_sorted) { 903 scan_io_queues_destroy(scn); 904 scn->scn_is_sorted = B_FALSE; 905 906 if (scn->scn_taskq != NULL) { 907 taskq_destroy(scn->scn_taskq); 908 scn->scn_taskq = NULL; 909 } 910 } 911 912 scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; 913 914 spa_notify_waiters(spa); 915 916 if (dsl_scan_restarting(scn, tx)) 917 spa_history_log_internal(spa, "scan aborted, restarting", tx, 918 "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa)); 919 else if (!complete) 920 spa_history_log_internal(spa, "scan cancelled", tx, 921 "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa)); 922 else 923 spa_history_log_internal(spa, "scan done", tx, 924 "errors=%llu", (u_longlong_t)spa_get_errlog_size(spa)); 925 926 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 927 spa->spa_scrub_active = B_FALSE; 928 929 /* 930 * If the scrub/resilver completed, update all DTLs to 931 * reflect this. Whether it succeeded or not, vacate 932 * all temporary scrub DTLs. 933 * 934 * As the scrub does not currently support traversing 935 * data that have been freed but are part of a checkpoint, 936 * we don't mark the scrub as done in the DTLs as faults 937 * may still exist in those vdevs. 938 */ 939 if (complete && 940 !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 941 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 942 scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE); 943 944 if (scn->scn_phys.scn_min_txg) { 945 nvlist_t *aux = fnvlist_alloc(); 946 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, 947 "healing"); 948 spa_event_notify(spa, NULL, aux, 949 ESC_ZFS_RESILVER_FINISH); 950 nvlist_free(aux); 951 } else { 952 spa_event_notify(spa, NULL, NULL, 953 ESC_ZFS_SCRUB_FINISH); 954 } 955 } else { 956 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 957 0, B_TRUE, B_FALSE); 958 } 959 spa_errlog_rotate(spa); 960 961 /* 962 * Don't clear flag until after vdev_dtl_reassess to ensure that 963 * DTL_MISSING will get updated when possible. 964 */ 965 spa->spa_scrub_started = B_FALSE; 966 967 /* 968 * We may have finished replacing a device. 969 * Let the async thread assess this and handle the detach. 970 */ 971 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 972 973 /* 974 * Clear any resilver_deferred flags in the config. 975 * If there are drives that need resilvering, kick 976 * off an asynchronous request to start resilver. 977 * vdev_clear_resilver_deferred() may update the config 978 * before the resilver can restart. In the event of 979 * a crash during this period, the spa loading code 980 * will find the drives that need to be resilvered 981 * and start the resilver then. 982 */ 983 if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) && 984 vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) { 985 spa_history_log_internal(spa, 986 "starting deferred resilver", tx, "errors=%llu", 987 (u_longlong_t)spa_get_errlog_size(spa)); 988 spa_async_request(spa, SPA_ASYNC_RESILVER); 989 } 990 } 991 992 scn->scn_phys.scn_end_time = gethrestime_sec(); 993 994 if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) 995 spa->spa_errata = 0; 996 997 ASSERT(!dsl_scan_is_running(scn)); 998 } 999 1000 /* ARGSUSED */ 1001 static int 1002 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 1003 { 1004 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1005 1006 if (!dsl_scan_is_running(scn)) 1007 return (SET_ERROR(ENOENT)); 1008 return (0); 1009 } 1010 1011 /* ARGSUSED */ 1012 static void 1013 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 1014 { 1015 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1016 1017 dsl_scan_done(scn, B_FALSE, tx); 1018 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 1019 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); 1020 } 1021 1022 int 1023 dsl_scan_cancel(dsl_pool_t *dp) 1024 { 1025 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 1026 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 1027 } 1028 1029 static int 1030 dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) 1031 { 1032 pool_scrub_cmd_t *cmd = arg; 1033 dsl_pool_t *dp = dmu_tx_pool(tx); 1034 dsl_scan_t *scn = dp->dp_scan; 1035 1036 if (*cmd == POOL_SCRUB_PAUSE) { 1037 /* can't pause a scrub when there is no in-progress scrub */ 1038 if (!dsl_scan_scrubbing(dp)) 1039 return (SET_ERROR(ENOENT)); 1040 1041 /* can't pause a paused scrub */ 1042 if (dsl_scan_is_paused_scrub(scn)) 1043 return (SET_ERROR(EBUSY)); 1044 } else if (*cmd != POOL_SCRUB_NORMAL) { 1045 return (SET_ERROR(ENOTSUP)); 1046 } 1047 1048 return (0); 1049 } 1050 1051 static void 1052 dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) 1053 { 1054 pool_scrub_cmd_t *cmd = arg; 1055 dsl_pool_t *dp = dmu_tx_pool(tx); 1056 spa_t *spa = dp->dp_spa; 1057 dsl_scan_t *scn = dp->dp_scan; 1058 1059 if (*cmd == POOL_SCRUB_PAUSE) { 1060 /* can't pause a scrub when there is no in-progress scrub */ 1061 spa->spa_scan_pass_scrub_pause = gethrestime_sec(); 1062 scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; 1063 scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED; 1064 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1065 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); 1066 spa_notify_waiters(spa); 1067 } else { 1068 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); 1069 if (dsl_scan_is_paused_scrub(scn)) { 1070 /* 1071 * We need to keep track of how much time we spend 1072 * paused per pass so that we can adjust the scrub rate 1073 * shown in the output of 'zpool status' 1074 */ 1075 spa->spa_scan_pass_scrub_spent_paused += 1076 gethrestime_sec() - spa->spa_scan_pass_scrub_pause; 1077 spa->spa_scan_pass_scrub_pause = 0; 1078 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 1079 scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED; 1080 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1081 } 1082 } 1083 } 1084 1085 /* 1086 * Set scrub pause/resume state if it makes sense to do so 1087 */ 1088 int 1089 dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) 1090 { 1091 return (dsl_sync_task(spa_name(dp->dp_spa), 1092 dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, 1093 ZFS_SPACE_CHECK_RESERVED)); 1094 } 1095 1096 1097 /* start a new scan, or restart an existing one. */ 1098 void 1099 dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg) 1100 { 1101 if (txg == 0) { 1102 dmu_tx_t *tx; 1103 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1104 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1105 1106 txg = dmu_tx_get_txg(tx); 1107 dp->dp_scan->scn_restart_txg = txg; 1108 dmu_tx_commit(tx); 1109 } else { 1110 dp->dp_scan->scn_restart_txg = txg; 1111 } 1112 zfs_dbgmsg("restarting resilver txg=%llu", (longlong_t)txg); 1113 } 1114 1115 void 1116 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 1117 { 1118 zio_free(dp->dp_spa, txg, bp); 1119 } 1120 1121 void 1122 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 1123 { 1124 ASSERT(dsl_pool_sync_context(dp)); 1125 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); 1126 } 1127 1128 static int 1129 scan_ds_queue_compare(const void *a, const void *b) 1130 { 1131 const scan_ds_t *sds_a = a, *sds_b = b; 1132 1133 if (sds_a->sds_dsobj < sds_b->sds_dsobj) 1134 return (-1); 1135 if (sds_a->sds_dsobj == sds_b->sds_dsobj) 1136 return (0); 1137 return (1); 1138 } 1139 1140 static void 1141 scan_ds_queue_clear(dsl_scan_t *scn) 1142 { 1143 void *cookie = NULL; 1144 scan_ds_t *sds; 1145 while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { 1146 kmem_free(sds, sizeof (*sds)); 1147 } 1148 } 1149 1150 static boolean_t 1151 scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) 1152 { 1153 scan_ds_t srch, *sds; 1154 1155 srch.sds_dsobj = dsobj; 1156 sds = avl_find(&scn->scn_queue, &srch, NULL); 1157 if (sds != NULL && txg != NULL) 1158 *txg = sds->sds_txg; 1159 return (sds != NULL); 1160 } 1161 1162 static void 1163 scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) 1164 { 1165 scan_ds_t *sds; 1166 avl_index_t where; 1167 1168 sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); 1169 sds->sds_dsobj = dsobj; 1170 sds->sds_txg = txg; 1171 1172 VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); 1173 avl_insert(&scn->scn_queue, sds, where); 1174 } 1175 1176 static void 1177 scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) 1178 { 1179 scan_ds_t srch, *sds; 1180 1181 srch.sds_dsobj = dsobj; 1182 1183 sds = avl_find(&scn->scn_queue, &srch, NULL); 1184 VERIFY(sds != NULL); 1185 avl_remove(&scn->scn_queue, sds); 1186 kmem_free(sds, sizeof (*sds)); 1187 } 1188 1189 static void 1190 scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) 1191 { 1192 dsl_pool_t *dp = scn->scn_dp; 1193 spa_t *spa = dp->dp_spa; 1194 dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? 1195 DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; 1196 1197 ASSERT0(scn->scn_bytes_pending); 1198 ASSERT(scn->scn_phys.scn_queue_obj != 0); 1199 1200 VERIFY0(dmu_object_free(dp->dp_meta_objset, 1201 scn->scn_phys.scn_queue_obj, tx)); 1202 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, 1203 DMU_OT_NONE, 0, tx); 1204 for (scan_ds_t *sds = avl_first(&scn->scn_queue); 1205 sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { 1206 VERIFY0(zap_add_int_key(dp->dp_meta_objset, 1207 scn->scn_phys.scn_queue_obj, sds->sds_dsobj, 1208 sds->sds_txg, tx)); 1209 } 1210 } 1211 1212 /* 1213 * Computes the memory limit state that we're currently in. A sorted scan 1214 * needs quite a bit of memory to hold the sorting queue, so we need to 1215 * reasonably constrain the size so it doesn't impact overall system 1216 * performance. We compute two limits: 1217 * 1) Hard memory limit: if the amount of memory used by the sorting 1218 * queues on a pool gets above this value, we stop the metadata 1219 * scanning portion and start issuing the queued up and sorted 1220 * I/Os to reduce memory usage. 1221 * This limit is calculated as a fraction of physmem (by default 5%). 1222 * We constrain the lower bound of the hard limit to an absolute 1223 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain 1224 * the upper bound to 5% of the total pool size - no chance we'll 1225 * ever need that much memory, but just to keep the value in check. 1226 * 2) Soft memory limit: once we hit the hard memory limit, we start 1227 * issuing I/O to reduce queue memory usage, but we don't want to 1228 * completely empty out the queues, since we might be able to find I/Os 1229 * that will fill in the gaps of our non-sequential IOs at some point 1230 * in the future. So we stop the issuing of I/Os once the amount of 1231 * memory used drops below the soft limit (at which point we stop issuing 1232 * I/O and start scanning metadata again). 1233 * 1234 * This limit is calculated by subtracting a fraction of the hard 1235 * limit from the hard limit. By default this fraction is 5%, so 1236 * the soft limit is 95% of the hard limit. We cap the size of the 1237 * difference between the hard and soft limits at an absolute 1238 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is 1239 * sufficient to not cause too frequent switching between the 1240 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's 1241 * worth of queues is about 1.2 GiB of on-pool data, so scanning 1242 * that should take at least a decent fraction of a second). 1243 */ 1244 static boolean_t 1245 dsl_scan_should_clear(dsl_scan_t *scn) 1246 { 1247 spa_t *spa = scn->scn_dp->dp_spa; 1248 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 1249 uint64_t alloc, mlim_hard, mlim_soft, mused; 1250 1251 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 1252 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 1253 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 1254 1255 mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, 1256 zfs_scan_mem_lim_min); 1257 mlim_hard = MIN(mlim_hard, alloc / 20); 1258 mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, 1259 zfs_scan_mem_lim_soft_max); 1260 mused = 0; 1261 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 1262 vdev_t *tvd = rvd->vdev_child[i]; 1263 dsl_scan_io_queue_t *queue; 1264 1265 mutex_enter(&tvd->vdev_scan_io_queue_lock); 1266 queue = tvd->vdev_scan_io_queue; 1267 if (queue != NULL) { 1268 /* # extents in exts_by_size = # in exts_by_addr */ 1269 mused += zfs_btree_numnodes(&queue->q_exts_by_size) * 1270 sizeof (range_seg_gap_t) + queue->q_sio_memused; 1271 } 1272 mutex_exit(&tvd->vdev_scan_io_queue_lock); 1273 } 1274 1275 dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); 1276 1277 if (mused == 0) 1278 ASSERT0(scn->scn_bytes_pending); 1279 1280 /* 1281 * If we are above our hard limit, we need to clear out memory. 1282 * If we are below our soft limit, we need to accumulate sequential IOs. 1283 * Otherwise, we should keep doing whatever we are currently doing. 1284 */ 1285 if (mused >= mlim_hard) 1286 return (B_TRUE); 1287 else if (mused < mlim_soft) 1288 return (B_FALSE); 1289 else 1290 return (scn->scn_clearing); 1291 } 1292 1293 static boolean_t 1294 dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) 1295 { 1296 /* we never skip user/group accounting objects */ 1297 if (zb && (int64_t)zb->zb_object < 0) 1298 return (B_FALSE); 1299 1300 if (scn->scn_suspending) 1301 return (B_TRUE); /* we're already suspending */ 1302 1303 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 1304 return (B_FALSE); /* we're resuming */ 1305 1306 /* We only know how to resume from level-0 and objset blocks. */ 1307 if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL)) 1308 return (B_FALSE); 1309 1310 /* 1311 * We suspend if: 1312 * - we have scanned for at least the minimum time (default 1 sec 1313 * for scrub, 3 sec for resilver), and either we have sufficient 1314 * dirty data that we are starting to write more quickly 1315 * (default 30%), someone is explicitly waiting for this txg 1316 * to complete, or we have used up all of the time in the txg 1317 * timeout (default 5 sec). 1318 * or 1319 * - the spa is shutting down because this pool is being exported 1320 * or the machine is rebooting. 1321 * or 1322 * - the scan queue has reached its memory use limit 1323 */ 1324 uint64_t curr_time_ns = gethrtime(); 1325 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 1326 uint64_t sync_time_ns = curr_time_ns - 1327 scn->scn_dp->dp_spa->spa_sync_starttime; 1328 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 1329 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 1330 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 1331 1332 if ((NSEC2MSEC(scan_time_ns) > mintime && 1333 (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || 1334 txg_sync_waiting(scn->scn_dp) || 1335 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 1336 spa_shutting_down(scn->scn_dp->dp_spa) || 1337 (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { 1338 if (zb && zb->zb_level == ZB_ROOT_LEVEL) { 1339 dprintf("suspending at first available bookmark " 1340 "%llx/%llx/%llx/%llx\n", 1341 (longlong_t)zb->zb_objset, 1342 (longlong_t)zb->zb_object, 1343 (longlong_t)zb->zb_level, 1344 (longlong_t)zb->zb_blkid); 1345 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 1346 zb->zb_objset, 0, 0, 0); 1347 } else if (zb != NULL) { 1348 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", 1349 (longlong_t)zb->zb_objset, 1350 (longlong_t)zb->zb_object, 1351 (longlong_t)zb->zb_level, 1352 (longlong_t)zb->zb_blkid); 1353 scn->scn_phys.scn_bookmark = *zb; 1354 } else { 1355 #ifdef ZFS_DEBUG 1356 dsl_scan_phys_t *scnp = &scn->scn_phys; 1357 dprintf("suspending at at DDT bookmark " 1358 "%llx/%llx/%llx/%llx\n", 1359 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 1360 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 1361 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 1362 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 1363 #endif 1364 } 1365 scn->scn_suspending = B_TRUE; 1366 return (B_TRUE); 1367 } 1368 return (B_FALSE); 1369 } 1370 1371 typedef struct zil_scan_arg { 1372 dsl_pool_t *zsa_dp; 1373 zil_header_t *zsa_zh; 1374 } zil_scan_arg_t; 1375 1376 /* ARGSUSED */ 1377 static int 1378 dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, 1379 uint64_t claim_txg) 1380 { 1381 zil_scan_arg_t *zsa = arg; 1382 dsl_pool_t *dp = zsa->zsa_dp; 1383 dsl_scan_t *scn = dp->dp_scan; 1384 zil_header_t *zh = zsa->zsa_zh; 1385 zbookmark_phys_t zb; 1386 1387 ASSERT(!BP_IS_REDACTED(bp)); 1388 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1389 return (0); 1390 1391 /* 1392 * One block ("stubby") can be allocated a long time ago; we 1393 * want to visit that one because it has been allocated 1394 * (on-disk) even if it hasn't been claimed (even though for 1395 * scrub there's nothing to do to it). 1396 */ 1397 if (claim_txg == 0 && bp->blk_birth >= spa_min_claim_txg(dp->dp_spa)) 1398 return (0); 1399 1400 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1401 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 1402 1403 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1404 return (0); 1405 } 1406 1407 /* ARGSUSED */ 1408 static int 1409 dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, 1410 uint64_t claim_txg) 1411 { 1412 if (lrc->lrc_txtype == TX_WRITE) { 1413 zil_scan_arg_t *zsa = arg; 1414 dsl_pool_t *dp = zsa->zsa_dp; 1415 dsl_scan_t *scn = dp->dp_scan; 1416 zil_header_t *zh = zsa->zsa_zh; 1417 const lr_write_t *lr = (const lr_write_t *)lrc; 1418 const blkptr_t *bp = &lr->lr_blkptr; 1419 zbookmark_phys_t zb; 1420 1421 ASSERT(!BP_IS_REDACTED(bp)); 1422 if (BP_IS_HOLE(bp) || 1423 bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) 1424 return (0); 1425 1426 /* 1427 * birth can be < claim_txg if this record's txg is 1428 * already txg sync'ed (but this log block contains 1429 * other records that are not synced) 1430 */ 1431 if (claim_txg == 0 || bp->blk_birth < claim_txg) 1432 return (0); 1433 1434 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1435 lr->lr_foid, ZB_ZIL_LEVEL, 1436 lr->lr_offset / BP_GET_LSIZE(bp)); 1437 1438 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1439 } 1440 return (0); 1441 } 1442 1443 static void 1444 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 1445 { 1446 uint64_t claim_txg = zh->zh_claim_txg; 1447 zil_scan_arg_t zsa = { dp, zh }; 1448 zilog_t *zilog; 1449 1450 ASSERT(spa_writeable(dp->dp_spa)); 1451 1452 /* 1453 * We only want to visit blocks that have been claimed but not yet 1454 * replayed (or, in read-only mode, blocks that *would* be claimed). 1455 */ 1456 if (claim_txg == 0) 1457 return; 1458 1459 zilog = zil_alloc(dp->dp_meta_objset, zh); 1460 1461 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 1462 claim_txg, B_FALSE); 1463 1464 zil_free(zilog); 1465 } 1466 1467 /* 1468 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea 1469 * here is to sort the AVL tree by the order each block will be needed. 1470 */ 1471 static int 1472 scan_prefetch_queue_compare(const void *a, const void *b) 1473 { 1474 const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; 1475 const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; 1476 const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; 1477 1478 return (zbookmark_compare(spc_a->spc_datablkszsec, 1479 spc_a->spc_indblkshift, spc_b->spc_datablkszsec, 1480 spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); 1481 } 1482 1483 static void 1484 scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag) 1485 { 1486 if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { 1487 zfs_refcount_destroy(&spc->spc_refcnt); 1488 kmem_free(spc, sizeof (scan_prefetch_ctx_t)); 1489 } 1490 } 1491 1492 static scan_prefetch_ctx_t * 1493 scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag) 1494 { 1495 scan_prefetch_ctx_t *spc; 1496 1497 spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); 1498 zfs_refcount_create(&spc->spc_refcnt); 1499 zfs_refcount_add(&spc->spc_refcnt, tag); 1500 spc->spc_scn = scn; 1501 if (dnp != NULL) { 1502 spc->spc_datablkszsec = dnp->dn_datablkszsec; 1503 spc->spc_indblkshift = dnp->dn_indblkshift; 1504 spc->spc_root = B_FALSE; 1505 } else { 1506 spc->spc_datablkszsec = 0; 1507 spc->spc_indblkshift = 0; 1508 spc->spc_root = B_TRUE; 1509 } 1510 1511 return (spc); 1512 } 1513 1514 static void 1515 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag) 1516 { 1517 zfs_refcount_add(&spc->spc_refcnt, tag); 1518 } 1519 1520 static void 1521 scan_ds_prefetch_queue_clear(dsl_scan_t *scn) 1522 { 1523 spa_t *spa = scn->scn_dp->dp_spa; 1524 void *cookie = NULL; 1525 scan_prefetch_issue_ctx_t *spic = NULL; 1526 1527 mutex_enter(&spa->spa_scrub_lock); 1528 while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue, 1529 &cookie)) != NULL) { 1530 scan_prefetch_ctx_rele(spic->spic_spc, scn); 1531 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1532 } 1533 mutex_exit(&spa->spa_scrub_lock); 1534 } 1535 1536 static boolean_t 1537 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, 1538 const zbookmark_phys_t *zb) 1539 { 1540 zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; 1541 dnode_phys_t tmp_dnp; 1542 dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; 1543 1544 if (zb->zb_objset != last_zb->zb_objset) 1545 return (B_TRUE); 1546 if ((int64_t)zb->zb_object < 0) 1547 return (B_FALSE); 1548 1549 tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; 1550 tmp_dnp.dn_indblkshift = spc->spc_indblkshift; 1551 1552 if (zbookmark_subtree_completed(dnp, zb, last_zb)) 1553 return (B_TRUE); 1554 1555 return (B_FALSE); 1556 } 1557 1558 static void 1559 dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) 1560 { 1561 avl_index_t idx; 1562 dsl_scan_t *scn = spc->spc_scn; 1563 spa_t *spa = scn->scn_dp->dp_spa; 1564 scan_prefetch_issue_ctx_t *spic; 1565 1566 if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp)) 1567 return; 1568 1569 if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg || 1570 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && 1571 BP_GET_TYPE(bp) != DMU_OT_OBJSET)) 1572 return; 1573 1574 if (dsl_scan_check_prefetch_resume(spc, zb)) 1575 return; 1576 1577 scan_prefetch_ctx_add_ref(spc, scn); 1578 spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); 1579 spic->spic_spc = spc; 1580 spic->spic_bp = *bp; 1581 spic->spic_zb = *zb; 1582 1583 /* 1584 * Add the IO to the queue of blocks to prefetch. This allows us to 1585 * prioritize blocks that we will need first for the main traversal 1586 * thread. 1587 */ 1588 mutex_enter(&spa->spa_scrub_lock); 1589 if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { 1590 /* this block is already queued for prefetch */ 1591 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1592 scan_prefetch_ctx_rele(spc, scn); 1593 mutex_exit(&spa->spa_scrub_lock); 1594 return; 1595 } 1596 1597 avl_insert(&scn->scn_prefetch_queue, spic, idx); 1598 cv_broadcast(&spa->spa_scrub_io_cv); 1599 mutex_exit(&spa->spa_scrub_lock); 1600 } 1601 1602 static void 1603 dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, 1604 uint64_t objset, uint64_t object) 1605 { 1606 int i; 1607 zbookmark_phys_t zb; 1608 scan_prefetch_ctx_t *spc; 1609 1610 if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1611 return; 1612 1613 SET_BOOKMARK(&zb, objset, object, 0, 0); 1614 1615 spc = scan_prefetch_ctx_create(scn, dnp, FTAG); 1616 1617 for (i = 0; i < dnp->dn_nblkptr; i++) { 1618 zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); 1619 zb.zb_blkid = i; 1620 dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); 1621 } 1622 1623 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1624 zb.zb_level = 0; 1625 zb.zb_blkid = DMU_SPILL_BLKID; 1626 dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb); 1627 } 1628 1629 scan_prefetch_ctx_rele(spc, FTAG); 1630 } 1631 1632 static void 1633 dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1634 arc_buf_t *buf, void *private) 1635 { 1636 scan_prefetch_ctx_t *spc = private; 1637 dsl_scan_t *scn = spc->spc_scn; 1638 spa_t *spa = scn->scn_dp->dp_spa; 1639 1640 /* broadcast that the IO has completed for rate limiting purposes */ 1641 mutex_enter(&spa->spa_scrub_lock); 1642 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 1643 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 1644 cv_broadcast(&spa->spa_scrub_io_cv); 1645 mutex_exit(&spa->spa_scrub_lock); 1646 1647 /* if there was an error or we are done prefetching, just cleanup */ 1648 if (buf == NULL || scn->scn_prefetch_stop) 1649 goto out; 1650 1651 if (BP_GET_LEVEL(bp) > 0) { 1652 int i; 1653 blkptr_t *cbp; 1654 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1655 zbookmark_phys_t czb; 1656 1657 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1658 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1659 zb->zb_level - 1, zb->zb_blkid * epb + i); 1660 dsl_scan_prefetch(spc, cbp, &czb); 1661 } 1662 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1663 dnode_phys_t *cdnp; 1664 int i; 1665 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1666 1667 for (i = 0, cdnp = buf->b_data; i < epb; 1668 i += cdnp->dn_extra_slots + 1, 1669 cdnp += cdnp->dn_extra_slots + 1) { 1670 dsl_scan_prefetch_dnode(scn, cdnp, 1671 zb->zb_objset, zb->zb_blkid * epb + i); 1672 } 1673 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1674 objset_phys_t *osp = buf->b_data; 1675 1676 dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, 1677 zb->zb_objset, DMU_META_DNODE_OBJECT); 1678 1679 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1680 dsl_scan_prefetch_dnode(scn, 1681 &osp->os_groupused_dnode, zb->zb_objset, 1682 DMU_GROUPUSED_OBJECT); 1683 dsl_scan_prefetch_dnode(scn, 1684 &osp->os_userused_dnode, zb->zb_objset, 1685 DMU_USERUSED_OBJECT); 1686 } 1687 } 1688 1689 out: 1690 if (buf != NULL) 1691 arc_buf_destroy(buf, private); 1692 scan_prefetch_ctx_rele(spc, scn); 1693 } 1694 1695 /* ARGSUSED */ 1696 static void 1697 dsl_scan_prefetch_thread(void *arg) 1698 { 1699 dsl_scan_t *scn = arg; 1700 spa_t *spa = scn->scn_dp->dp_spa; 1701 scan_prefetch_issue_ctx_t *spic; 1702 1703 /* loop until we are told to stop */ 1704 while (!scn->scn_prefetch_stop) { 1705 arc_flags_t flags = ARC_FLAG_NOWAIT | 1706 ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; 1707 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1708 1709 mutex_enter(&spa->spa_scrub_lock); 1710 1711 /* 1712 * Wait until we have an IO to issue and are not above our 1713 * maximum in flight limit. 1714 */ 1715 while (!scn->scn_prefetch_stop && 1716 (avl_numnodes(&scn->scn_prefetch_queue) == 0 || 1717 spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { 1718 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1719 } 1720 1721 /* recheck if we should stop since we waited for the cv */ 1722 if (scn->scn_prefetch_stop) { 1723 mutex_exit(&spa->spa_scrub_lock); 1724 break; 1725 } 1726 1727 /* remove the prefetch IO from the tree */ 1728 spic = avl_first(&scn->scn_prefetch_queue); 1729 spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); 1730 avl_remove(&scn->scn_prefetch_queue, spic); 1731 1732 mutex_exit(&spa->spa_scrub_lock); 1733 1734 if (BP_IS_PROTECTED(&spic->spic_bp)) { 1735 ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE || 1736 BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET); 1737 ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0); 1738 zio_flags |= ZIO_FLAG_RAW; 1739 } 1740 1741 /* issue the prefetch asynchronously */ 1742 (void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, 1743 &spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc, 1744 ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb); 1745 1746 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1747 } 1748 1749 ASSERT(scn->scn_prefetch_stop); 1750 1751 /* free any prefetches we didn't get to complete */ 1752 mutex_enter(&spa->spa_scrub_lock); 1753 while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { 1754 avl_remove(&scn->scn_prefetch_queue, spic); 1755 scan_prefetch_ctx_rele(spic->spic_spc, scn); 1756 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1757 } 1758 ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); 1759 mutex_exit(&spa->spa_scrub_lock); 1760 } 1761 1762 static boolean_t 1763 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 1764 const zbookmark_phys_t *zb) 1765 { 1766 /* 1767 * We never skip over user/group accounting objects (obj<0) 1768 */ 1769 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 1770 (int64_t)zb->zb_object >= 0) { 1771 /* 1772 * If we already visited this bp & everything below (in 1773 * a prior txg sync), don't bother doing it again. 1774 */ 1775 if (zbookmark_subtree_completed(dnp, zb, 1776 &scn->scn_phys.scn_bookmark)) 1777 return (B_TRUE); 1778 1779 /* 1780 * If we found the block we're trying to resume from, or 1781 * we went past it to a different object, zero it out to 1782 * indicate that it's OK to start checking for suspending 1783 * again. 1784 */ 1785 if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 || 1786 zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) { 1787 dprintf("resuming at %llx/%llx/%llx/%llx\n", 1788 (longlong_t)zb->zb_objset, 1789 (longlong_t)zb->zb_object, 1790 (longlong_t)zb->zb_level, 1791 (longlong_t)zb->zb_blkid); 1792 bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb)); 1793 } 1794 } 1795 return (B_FALSE); 1796 } 1797 1798 static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 1799 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 1800 dmu_objset_type_t ostype, dmu_tx_t *tx); 1801 inline __attribute__((always_inline)) static void dsl_scan_visitdnode( 1802 dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1803 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 1804 1805 /* 1806 * Return nonzero on i/o error. 1807 * Return new buf to write out in *bufp. 1808 */ 1809 inline __attribute__((always_inline)) static int 1810 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 1811 dnode_phys_t *dnp, const blkptr_t *bp, 1812 const zbookmark_phys_t *zb, dmu_tx_t *tx) 1813 { 1814 dsl_pool_t *dp = scn->scn_dp; 1815 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 1816 int err; 1817 1818 ASSERT(!BP_IS_REDACTED(bp)); 1819 1820 if (BP_GET_LEVEL(bp) > 0) { 1821 arc_flags_t flags = ARC_FLAG_WAIT; 1822 int i; 1823 blkptr_t *cbp; 1824 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1825 arc_buf_t *buf; 1826 1827 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1828 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1829 if (err) { 1830 scn->scn_phys.scn_errors++; 1831 return (err); 1832 } 1833 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 1834 zbookmark_phys_t czb; 1835 1836 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 1837 zb->zb_level - 1, 1838 zb->zb_blkid * epb + i); 1839 dsl_scan_visitbp(cbp, &czb, dnp, 1840 ds, scn, ostype, tx); 1841 } 1842 arc_buf_destroy(buf, &buf); 1843 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 1844 arc_flags_t flags = ARC_FLAG_WAIT; 1845 dnode_phys_t *cdnp; 1846 int i; 1847 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 1848 arc_buf_t *buf; 1849 1850 if (BP_IS_PROTECTED(bp)) { 1851 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 1852 zio_flags |= ZIO_FLAG_RAW; 1853 } 1854 1855 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1856 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1857 if (err) { 1858 scn->scn_phys.scn_errors++; 1859 return (err); 1860 } 1861 for (i = 0, cdnp = buf->b_data; i < epb; 1862 i += cdnp->dn_extra_slots + 1, 1863 cdnp += cdnp->dn_extra_slots + 1) { 1864 dsl_scan_visitdnode(scn, ds, ostype, 1865 cdnp, zb->zb_blkid * epb + i, tx); 1866 } 1867 1868 arc_buf_destroy(buf, &buf); 1869 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 1870 arc_flags_t flags = ARC_FLAG_WAIT; 1871 objset_phys_t *osp; 1872 arc_buf_t *buf; 1873 1874 err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf, 1875 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 1876 if (err) { 1877 scn->scn_phys.scn_errors++; 1878 return (err); 1879 } 1880 1881 osp = buf->b_data; 1882 1883 dsl_scan_visitdnode(scn, ds, osp->os_type, 1884 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 1885 1886 if (OBJSET_BUF_HAS_USERUSED(buf)) { 1887 /* 1888 * We also always visit user/group/project accounting 1889 * objects, and never skip them, even if we are 1890 * suspending. This is necessary so that the 1891 * space deltas from this txg get integrated. 1892 */ 1893 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 1894 dsl_scan_visitdnode(scn, ds, osp->os_type, 1895 &osp->os_projectused_dnode, 1896 DMU_PROJECTUSED_OBJECT, tx); 1897 dsl_scan_visitdnode(scn, ds, osp->os_type, 1898 &osp->os_groupused_dnode, 1899 DMU_GROUPUSED_OBJECT, tx); 1900 dsl_scan_visitdnode(scn, ds, osp->os_type, 1901 &osp->os_userused_dnode, 1902 DMU_USERUSED_OBJECT, tx); 1903 } 1904 arc_buf_destroy(buf, &buf); 1905 } 1906 1907 return (0); 1908 } 1909 1910 inline __attribute__((always_inline)) static void 1911 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 1912 dmu_objset_type_t ostype, dnode_phys_t *dnp, 1913 uint64_t object, dmu_tx_t *tx) 1914 { 1915 int j; 1916 1917 for (j = 0; j < dnp->dn_nblkptr; j++) { 1918 zbookmark_phys_t czb; 1919 1920 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 1921 dnp->dn_nlevels - 1, j); 1922 dsl_scan_visitbp(&dnp->dn_blkptr[j], 1923 &czb, dnp, ds, scn, ostype, tx); 1924 } 1925 1926 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1927 zbookmark_phys_t czb; 1928 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 1929 0, DMU_SPILL_BLKID); 1930 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), 1931 &czb, dnp, ds, scn, ostype, tx); 1932 } 1933 } 1934 1935 /* 1936 * The arguments are in this order because mdb can only print the 1937 * first 5; we want them to be useful. 1938 */ 1939 static void 1940 dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb, 1941 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 1942 dmu_objset_type_t ostype, dmu_tx_t *tx) 1943 { 1944 dsl_pool_t *dp = scn->scn_dp; 1945 blkptr_t *bp_toread = NULL; 1946 1947 if (dsl_scan_check_suspend(scn, zb)) 1948 return; 1949 1950 if (dsl_scan_check_resume(scn, dnp, zb)) 1951 return; 1952 1953 scn->scn_visited_this_txg++; 1954 1955 /* 1956 * This debugging is commented out to conserve stack space. This 1957 * function is called recursively and the debugging adds several 1958 * bytes to the stack for each call. It can be commented back in 1959 * if required to debug an issue in dsl_scan_visitbp(). 1960 * 1961 * dprintf_bp(bp, 1962 * "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p", 1963 * ds, ds ? ds->ds_object : 0, 1964 * zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid, 1965 * bp); 1966 */ 1967 1968 if (BP_IS_HOLE(bp)) { 1969 scn->scn_holes_this_txg++; 1970 return; 1971 } 1972 1973 if (BP_IS_REDACTED(bp)) { 1974 ASSERT(dsl_dataset_feature_is_active(ds, 1975 SPA_FEATURE_REDACTED_DATASETS)); 1976 return; 1977 } 1978 1979 if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) { 1980 scn->scn_lt_min_this_txg++; 1981 return; 1982 } 1983 1984 bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 1985 *bp_toread = *bp; 1986 1987 if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0) 1988 goto out; 1989 1990 /* 1991 * If dsl_scan_ddt() has already visited this block, it will have 1992 * already done any translations or scrubbing, so don't call the 1993 * callback again. 1994 */ 1995 if (ddt_class_contains(dp->dp_spa, 1996 scn->scn_phys.scn_ddt_class_max, bp)) { 1997 scn->scn_ddt_contained_this_txg++; 1998 goto out; 1999 } 2000 2001 /* 2002 * If this block is from the future (after cur_max_txg), then we 2003 * are doing this on behalf of a deleted snapshot, and we will 2004 * revisit the future block on the next pass of this dataset. 2005 * Don't scan it now unless we need to because something 2006 * under it was modified. 2007 */ 2008 if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { 2009 scn->scn_gt_max_this_txg++; 2010 goto out; 2011 } 2012 2013 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 2014 2015 out: 2016 kmem_free(bp_toread, sizeof (blkptr_t)); 2017 } 2018 2019 static void 2020 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 2021 dmu_tx_t *tx) 2022 { 2023 zbookmark_phys_t zb; 2024 scan_prefetch_ctx_t *spc; 2025 2026 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 2027 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 2028 2029 if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { 2030 SET_BOOKMARK(&scn->scn_prefetch_bookmark, 2031 zb.zb_objset, 0, 0, 0); 2032 } else { 2033 scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; 2034 } 2035 2036 scn->scn_objsets_visited_this_txg++; 2037 2038 spc = scan_prefetch_ctx_create(scn, NULL, FTAG); 2039 dsl_scan_prefetch(spc, bp, &zb); 2040 scan_prefetch_ctx_rele(spc, FTAG); 2041 2042 dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); 2043 2044 dprintf_ds(ds, "finished scan%s", ""); 2045 } 2046 2047 static void 2048 ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) 2049 { 2050 if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { 2051 if (ds->ds_is_snapshot) { 2052 /* 2053 * Note: 2054 * - scn_cur_{min,max}_txg stays the same. 2055 * - Setting the flag is not really necessary if 2056 * scn_cur_max_txg == scn_max_txg, because there 2057 * is nothing after this snapshot that we care 2058 * about. However, we set it anyway and then 2059 * ignore it when we retraverse it in 2060 * dsl_scan_visitds(). 2061 */ 2062 scn_phys->scn_bookmark.zb_objset = 2063 dsl_dataset_phys(ds)->ds_next_snap_obj; 2064 zfs_dbgmsg("destroying ds %llu; currently traversing; " 2065 "reset zb_objset to %llu", 2066 (u_longlong_t)ds->ds_object, 2067 (u_longlong_t)dsl_dataset_phys(ds)-> 2068 ds_next_snap_obj); 2069 scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; 2070 } else { 2071 SET_BOOKMARK(&scn_phys->scn_bookmark, 2072 ZB_DESTROYED_OBJSET, 0, 0, 0); 2073 zfs_dbgmsg("destroying ds %llu; currently traversing; " 2074 "reset bookmark to -1,0,0,0", 2075 (u_longlong_t)ds->ds_object); 2076 } 2077 } 2078 } 2079 2080 /* 2081 * Invoked when a dataset is destroyed. We need to make sure that: 2082 * 2083 * 1) If it is the dataset that was currently being scanned, we write 2084 * a new dsl_scan_phys_t and marking the objset reference in it 2085 * as destroyed. 2086 * 2) Remove it from the work queue, if it was present. 2087 * 2088 * If the dataset was actually a snapshot, instead of marking the dataset 2089 * as destroyed, we instead substitute the next snapshot in line. 2090 */ 2091 void 2092 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 2093 { 2094 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2095 dsl_scan_t *scn = dp->dp_scan; 2096 uint64_t mintxg; 2097 2098 if (!dsl_scan_is_running(scn)) 2099 return; 2100 2101 ds_destroyed_scn_phys(ds, &scn->scn_phys); 2102 ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); 2103 2104 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 2105 scan_ds_queue_remove(scn, ds->ds_object); 2106 if (ds->ds_is_snapshot) 2107 scan_ds_queue_insert(scn, 2108 dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); 2109 } 2110 2111 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2112 ds->ds_object, &mintxg) == 0) { 2113 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 2114 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2115 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 2116 if (ds->ds_is_snapshot) { 2117 /* 2118 * We keep the same mintxg; it could be > 2119 * ds_creation_txg if the previous snapshot was 2120 * deleted too. 2121 */ 2122 VERIFY(zap_add_int_key(dp->dp_meta_objset, 2123 scn->scn_phys.scn_queue_obj, 2124 dsl_dataset_phys(ds)->ds_next_snap_obj, 2125 mintxg, tx) == 0); 2126 zfs_dbgmsg("destroying ds %llu; in queue; " 2127 "replacing with %llu", 2128 (u_longlong_t)ds->ds_object, 2129 (u_longlong_t)dsl_dataset_phys(ds)-> 2130 ds_next_snap_obj); 2131 } else { 2132 zfs_dbgmsg("destroying ds %llu; in queue; removing", 2133 (u_longlong_t)ds->ds_object); 2134 } 2135 } 2136 2137 /* 2138 * dsl_scan_sync() should be called after this, and should sync 2139 * out our changed state, but just to be safe, do it here. 2140 */ 2141 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2142 } 2143 2144 static void 2145 ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) 2146 { 2147 if (scn_bookmark->zb_objset == ds->ds_object) { 2148 scn_bookmark->zb_objset = 2149 dsl_dataset_phys(ds)->ds_prev_snap_obj; 2150 zfs_dbgmsg("snapshotting ds %llu; currently traversing; " 2151 "reset zb_objset to %llu", 2152 (u_longlong_t)ds->ds_object, 2153 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 2154 } 2155 } 2156 2157 /* 2158 * Called when a dataset is snapshotted. If we were currently traversing 2159 * this snapshot, we reset our bookmark to point at the newly created 2160 * snapshot. We also modify our work queue to remove the old snapshot and 2161 * replace with the new one. 2162 */ 2163 void 2164 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 2165 { 2166 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2167 dsl_scan_t *scn = dp->dp_scan; 2168 uint64_t mintxg; 2169 2170 if (!dsl_scan_is_running(scn)) 2171 return; 2172 2173 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 2174 2175 ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); 2176 ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); 2177 2178 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 2179 scan_ds_queue_remove(scn, ds->ds_object); 2180 scan_ds_queue_insert(scn, 2181 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); 2182 } 2183 2184 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2185 ds->ds_object, &mintxg) == 0) { 2186 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2187 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 2188 VERIFY(zap_add_int_key(dp->dp_meta_objset, 2189 scn->scn_phys.scn_queue_obj, 2190 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 2191 zfs_dbgmsg("snapshotting ds %llu; in queue; " 2192 "replacing with %llu", 2193 (u_longlong_t)ds->ds_object, 2194 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 2195 } 2196 2197 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2198 } 2199 2200 static void 2201 ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, 2202 zbookmark_phys_t *scn_bookmark) 2203 { 2204 if (scn_bookmark->zb_objset == ds1->ds_object) { 2205 scn_bookmark->zb_objset = ds2->ds_object; 2206 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 2207 "reset zb_objset to %llu", 2208 (u_longlong_t)ds1->ds_object, 2209 (u_longlong_t)ds2->ds_object); 2210 } else if (scn_bookmark->zb_objset == ds2->ds_object) { 2211 scn_bookmark->zb_objset = ds1->ds_object; 2212 zfs_dbgmsg("clone_swap ds %llu; currently traversing; " 2213 "reset zb_objset to %llu", 2214 (u_longlong_t)ds2->ds_object, 2215 (u_longlong_t)ds1->ds_object); 2216 } 2217 } 2218 2219 /* 2220 * Called when an origin dataset and its clone are swapped. If we were 2221 * currently traversing the dataset, we need to switch to traversing the 2222 * newly promoted clone. 2223 */ 2224 void 2225 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 2226 { 2227 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 2228 dsl_scan_t *scn = dp->dp_scan; 2229 uint64_t mintxg1, mintxg2; 2230 boolean_t ds1_queued, ds2_queued; 2231 2232 if (!dsl_scan_is_running(scn)) 2233 return; 2234 2235 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); 2236 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); 2237 2238 /* 2239 * Handle the in-memory scan queue. 2240 */ 2241 ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1); 2242 ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2); 2243 2244 /* Sanity checking. */ 2245 if (ds1_queued) { 2246 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2247 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2248 } 2249 if (ds2_queued) { 2250 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2251 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2252 } 2253 2254 if (ds1_queued && ds2_queued) { 2255 /* 2256 * If both are queued, we don't need to do anything. 2257 * The swapping code below would not handle this case correctly, 2258 * since we can't insert ds2 if it is already there. That's 2259 * because scan_ds_queue_insert() prohibits a duplicate insert 2260 * and panics. 2261 */ 2262 } else if (ds1_queued) { 2263 scan_ds_queue_remove(scn, ds1->ds_object); 2264 scan_ds_queue_insert(scn, ds2->ds_object, mintxg1); 2265 } else if (ds2_queued) { 2266 scan_ds_queue_remove(scn, ds2->ds_object); 2267 scan_ds_queue_insert(scn, ds1->ds_object, mintxg2); 2268 } 2269 2270 /* 2271 * Handle the on-disk scan queue. 2272 * The on-disk state is an out-of-date version of the in-memory state, 2273 * so the in-memory and on-disk values for ds1_queued and ds2_queued may 2274 * be different. Therefore we need to apply the swap logic to the 2275 * on-disk state independently of the in-memory state. 2276 */ 2277 ds1_queued = zap_lookup_int_key(dp->dp_meta_objset, 2278 scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0; 2279 ds2_queued = zap_lookup_int_key(dp->dp_meta_objset, 2280 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0; 2281 2282 /* Sanity checking. */ 2283 if (ds1_queued) { 2284 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2285 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2286 } 2287 if (ds2_queued) { 2288 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2289 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2290 } 2291 2292 if (ds1_queued && ds2_queued) { 2293 /* 2294 * If both are queued, we don't need to do anything. 2295 * Alternatively, we could check for EEXIST from 2296 * zap_add_int_key() and back out to the original state, but 2297 * that would be more work than checking for this case upfront. 2298 */ 2299 } else if (ds1_queued) { 2300 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2301 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 2302 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2303 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx)); 2304 zfs_dbgmsg("clone_swap ds %llu; in queue; " 2305 "replacing with %llu", 2306 (u_longlong_t)ds1->ds_object, 2307 (u_longlong_t)ds2->ds_object); 2308 } else if (ds2_queued) { 2309 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2310 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 2311 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2312 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx)); 2313 zfs_dbgmsg("clone_swap ds %llu; in queue; " 2314 "replacing with %llu", 2315 (u_longlong_t)ds2->ds_object, 2316 (u_longlong_t)ds1->ds_object); 2317 } 2318 2319 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2320 } 2321 2322 /* ARGSUSED */ 2323 static int 2324 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2325 { 2326 uint64_t originobj = *(uint64_t *)arg; 2327 dsl_dataset_t *ds; 2328 int err; 2329 dsl_scan_t *scn = dp->dp_scan; 2330 2331 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) 2332 return (0); 2333 2334 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2335 if (err) 2336 return (err); 2337 2338 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { 2339 dsl_dataset_t *prev; 2340 err = dsl_dataset_hold_obj(dp, 2341 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2342 2343 dsl_dataset_rele(ds, FTAG); 2344 if (err) 2345 return (err); 2346 ds = prev; 2347 } 2348 scan_ds_queue_insert(scn, ds->ds_object, 2349 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2350 dsl_dataset_rele(ds, FTAG); 2351 return (0); 2352 } 2353 2354 static void 2355 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 2356 { 2357 dsl_pool_t *dp = scn->scn_dp; 2358 dsl_dataset_t *ds; 2359 2360 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2361 2362 if (scn->scn_phys.scn_cur_min_txg >= 2363 scn->scn_phys.scn_max_txg) { 2364 /* 2365 * This can happen if this snapshot was created after the 2366 * scan started, and we already completed a previous snapshot 2367 * that was created after the scan started. This snapshot 2368 * only references blocks with: 2369 * 2370 * birth < our ds_creation_txg 2371 * cur_min_txg is no less than ds_creation_txg. 2372 * We have already visited these blocks. 2373 * or 2374 * birth > scn_max_txg 2375 * The scan requested not to visit these blocks. 2376 * 2377 * Subsequent snapshots (and clones) can reference our 2378 * blocks, or blocks with even higher birth times. 2379 * Therefore we do not need to visit them either, 2380 * so we do not add them to the work queue. 2381 * 2382 * Note that checking for cur_min_txg >= cur_max_txg 2383 * is not sufficient, because in that case we may need to 2384 * visit subsequent snapshots. This happens when min_txg > 0, 2385 * which raises cur_min_txg. In this case we will visit 2386 * this dataset but skip all of its blocks, because the 2387 * rootbp's birth time is < cur_min_txg. Then we will 2388 * add the next snapshots/clones to the work queue. 2389 */ 2390 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2391 dsl_dataset_name(ds, dsname); 2392 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " 2393 "cur_min_txg (%llu) >= max_txg (%llu)", 2394 (longlong_t)dsobj, dsname, 2395 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2396 (longlong_t)scn->scn_phys.scn_max_txg); 2397 kmem_free(dsname, MAXNAMELEN); 2398 2399 goto out; 2400 } 2401 2402 /* 2403 * Only the ZIL in the head (non-snapshot) is valid. Even though 2404 * snapshots can have ZIL block pointers (which may be the same 2405 * BP as in the head), they must be ignored. In addition, $ORIGIN 2406 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't 2407 * need to look for a ZIL in it either. So we traverse the ZIL here, 2408 * rather than in scan_recurse(), because the regular snapshot 2409 * block-sharing rules don't apply to it. 2410 */ 2411 if (!dsl_dataset_is_snapshot(ds) && 2412 (dp->dp_origin_snap == NULL || 2413 ds->ds_dir != dp->dp_origin_snap->ds_dir)) { 2414 objset_t *os; 2415 if (dmu_objset_from_ds(ds, &os) != 0) { 2416 goto out; 2417 } 2418 dsl_scan_zil(dp, &os->os_zil_header); 2419 } 2420 2421 /* 2422 * Iterate over the bps in this ds. 2423 */ 2424 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2425 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 2426 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 2427 rrw_exit(&ds->ds_bp_rwlock, FTAG); 2428 2429 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2430 dsl_dataset_name(ds, dsname); 2431 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 2432 "suspending=%u", 2433 (longlong_t)dsobj, dsname, 2434 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2435 (longlong_t)scn->scn_phys.scn_cur_max_txg, 2436 (int)scn->scn_suspending); 2437 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); 2438 2439 if (scn->scn_suspending) 2440 goto out; 2441 2442 /* 2443 * We've finished this pass over this dataset. 2444 */ 2445 2446 /* 2447 * If we did not completely visit this dataset, do another pass. 2448 */ 2449 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 2450 zfs_dbgmsg("incomplete pass; visiting again"); 2451 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 2452 scan_ds_queue_insert(scn, ds->ds_object, 2453 scn->scn_phys.scn_cur_max_txg); 2454 goto out; 2455 } 2456 2457 /* 2458 * Add descendant datasets to work queue. 2459 */ 2460 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 2461 scan_ds_queue_insert(scn, 2462 dsl_dataset_phys(ds)->ds_next_snap_obj, 2463 dsl_dataset_phys(ds)->ds_creation_txg); 2464 } 2465 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 2466 boolean_t usenext = B_FALSE; 2467 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 2468 uint64_t count; 2469 /* 2470 * A bug in a previous version of the code could 2471 * cause upgrade_clones_cb() to not set 2472 * ds_next_snap_obj when it should, leading to a 2473 * missing entry. Therefore we can only use the 2474 * next_clones_obj when its count is correct. 2475 */ 2476 int err = zap_count(dp->dp_meta_objset, 2477 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 2478 if (err == 0 && 2479 count == dsl_dataset_phys(ds)->ds_num_children - 1) 2480 usenext = B_TRUE; 2481 } 2482 2483 if (usenext) { 2484 zap_cursor_t zc; 2485 zap_attribute_t za; 2486 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2487 dsl_dataset_phys(ds)->ds_next_clones_obj); 2488 zap_cursor_retrieve(&zc, &za) == 0; 2489 (void) zap_cursor_advance(&zc)) { 2490 scan_ds_queue_insert(scn, 2491 zfs_strtonum(za.za_name, NULL), 2492 dsl_dataset_phys(ds)->ds_creation_txg); 2493 } 2494 zap_cursor_fini(&zc); 2495 } else { 2496 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2497 enqueue_clones_cb, &ds->ds_object, 2498 DS_FIND_CHILDREN)); 2499 } 2500 } 2501 2502 out: 2503 dsl_dataset_rele(ds, FTAG); 2504 } 2505 2506 /* ARGSUSED */ 2507 static int 2508 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2509 { 2510 dsl_dataset_t *ds; 2511 int err; 2512 dsl_scan_t *scn = dp->dp_scan; 2513 2514 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2515 if (err) 2516 return (err); 2517 2518 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 2519 dsl_dataset_t *prev; 2520 err = dsl_dataset_hold_obj(dp, 2521 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2522 if (err) { 2523 dsl_dataset_rele(ds, FTAG); 2524 return (err); 2525 } 2526 2527 /* 2528 * If this is a clone, we don't need to worry about it for now. 2529 */ 2530 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 2531 dsl_dataset_rele(ds, FTAG); 2532 dsl_dataset_rele(prev, FTAG); 2533 return (0); 2534 } 2535 dsl_dataset_rele(ds, FTAG); 2536 ds = prev; 2537 } 2538 2539 scan_ds_queue_insert(scn, ds->ds_object, 2540 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2541 dsl_dataset_rele(ds, FTAG); 2542 return (0); 2543 } 2544 2545 /* ARGSUSED */ 2546 void 2547 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 2548 ddt_entry_t *dde, dmu_tx_t *tx) 2549 { 2550 const ddt_key_t *ddk = &dde->dde_key; 2551 ddt_phys_t *ddp = dde->dde_phys; 2552 blkptr_t bp; 2553 zbookmark_phys_t zb = { 0 }; 2554 int p; 2555 2556 if (!dsl_scan_is_running(scn)) 2557 return; 2558 2559 /* 2560 * This function is special because it is the only thing 2561 * that can add scan_io_t's to the vdev scan queues from 2562 * outside dsl_scan_sync(). For the most part this is ok 2563 * as long as it is called from within syncing context. 2564 * However, dsl_scan_sync() expects that no new sio's will 2565 * be added between when all the work for a scan is done 2566 * and the next txg when the scan is actually marked as 2567 * completed. This check ensures we do not issue new sio's 2568 * during this period. 2569 */ 2570 if (scn->scn_done_txg != 0) 2571 return; 2572 2573 for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2574 if (ddp->ddp_phys_birth == 0 || 2575 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 2576 continue; 2577 ddt_bp_create(checksum, ddk, ddp, &bp); 2578 2579 scn->scn_visited_this_txg++; 2580 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 2581 } 2582 } 2583 2584 /* 2585 * Scrub/dedup interaction. 2586 * 2587 * If there are N references to a deduped block, we don't want to scrub it 2588 * N times -- ideally, we should scrub it exactly once. 2589 * 2590 * We leverage the fact that the dde's replication class (enum ddt_class) 2591 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 2592 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 2593 * 2594 * To prevent excess scrubbing, the scrub begins by walking the DDT 2595 * to find all blocks with refcnt > 1, and scrubs each of these once. 2596 * Since there are two replication classes which contain blocks with 2597 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 2598 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 2599 * 2600 * There would be nothing more to say if a block's refcnt couldn't change 2601 * during a scrub, but of course it can so we must account for changes 2602 * in a block's replication class. 2603 * 2604 * Here's an example of what can occur: 2605 * 2606 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 2607 * when visited during the top-down scrub phase, it will be scrubbed twice. 2608 * This negates our scrub optimization, but is otherwise harmless. 2609 * 2610 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 2611 * on each visit during the top-down scrub phase, it will never be scrubbed. 2612 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 2613 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 2614 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 2615 * while a scrub is in progress, it scrubs the block right then. 2616 */ 2617 static void 2618 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 2619 { 2620 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 2621 ddt_entry_t dde; 2622 int error; 2623 uint64_t n = 0; 2624 2625 bzero(&dde, sizeof (ddt_entry_t)); 2626 2627 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 2628 ddt_t *ddt; 2629 2630 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 2631 break; 2632 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 2633 (longlong_t)ddb->ddb_class, 2634 (longlong_t)ddb->ddb_type, 2635 (longlong_t)ddb->ddb_checksum, 2636 (longlong_t)ddb->ddb_cursor); 2637 2638 /* There should be no pending changes to the dedup table */ 2639 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 2640 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 2641 2642 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 2643 n++; 2644 2645 if (dsl_scan_check_suspend(scn, NULL)) 2646 break; 2647 } 2648 2649 zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; " 2650 "suspending=%u", (longlong_t)n, 2651 (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); 2652 2653 ASSERT(error == 0 || error == ENOENT); 2654 ASSERT(error != ENOENT || 2655 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 2656 } 2657 2658 static uint64_t 2659 dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 2660 { 2661 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 2662 if (ds->ds_is_snapshot) 2663 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 2664 return (smt); 2665 } 2666 2667 static void 2668 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 2669 { 2670 scan_ds_t *sds; 2671 dsl_pool_t *dp = scn->scn_dp; 2672 2673 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 2674 scn->scn_phys.scn_ddt_class_max) { 2675 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2676 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2677 dsl_scan_ddt(scn, tx); 2678 if (scn->scn_suspending) 2679 return; 2680 } 2681 2682 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 2683 /* First do the MOS & ORIGIN */ 2684 2685 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 2686 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 2687 dsl_scan_visit_rootbp(scn, NULL, 2688 &dp->dp_meta_rootbp, tx); 2689 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 2690 if (scn->scn_suspending) 2691 return; 2692 2693 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 2694 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2695 enqueue_cb, NULL, DS_FIND_CHILDREN)); 2696 } else { 2697 dsl_scan_visitds(scn, 2698 dp->dp_origin_snap->ds_object, tx); 2699 } 2700 ASSERT(!scn->scn_suspending); 2701 } else if (scn->scn_phys.scn_bookmark.zb_objset != 2702 ZB_DESTROYED_OBJSET) { 2703 uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; 2704 /* 2705 * If we were suspended, continue from here. Note if the 2706 * ds we were suspended on was deleted, the zb_objset may 2707 * be -1, so we will skip this and find a new objset 2708 * below. 2709 */ 2710 dsl_scan_visitds(scn, dsobj, tx); 2711 if (scn->scn_suspending) 2712 return; 2713 } 2714 2715 /* 2716 * In case we suspended right at the end of the ds, zero the 2717 * bookmark so we don't think that we're still trying to resume. 2718 */ 2719 bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t)); 2720 2721 /* 2722 * Keep pulling things out of the dataset avl queue. Updates to the 2723 * persistent zap-object-as-queue happen only at checkpoints. 2724 */ 2725 while ((sds = avl_first(&scn->scn_queue)) != NULL) { 2726 dsl_dataset_t *ds; 2727 uint64_t dsobj = sds->sds_dsobj; 2728 uint64_t txg = sds->sds_txg; 2729 2730 /* dequeue and free the ds from the queue */ 2731 scan_ds_queue_remove(scn, dsobj); 2732 sds = NULL; 2733 2734 /* set up min / max txg */ 2735 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2736 if (txg != 0) { 2737 scn->scn_phys.scn_cur_min_txg = 2738 MAX(scn->scn_phys.scn_min_txg, txg); 2739 } else { 2740 scn->scn_phys.scn_cur_min_txg = 2741 MAX(scn->scn_phys.scn_min_txg, 2742 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2743 } 2744 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 2745 dsl_dataset_rele(ds, FTAG); 2746 2747 dsl_scan_visitds(scn, dsobj, tx); 2748 if (scn->scn_suspending) 2749 return; 2750 } 2751 2752 /* No more objsets to fetch, we're done */ 2753 scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; 2754 ASSERT0(scn->scn_suspending); 2755 } 2756 2757 static uint64_t 2758 dsl_scan_count_leaves(vdev_t *vd) 2759 { 2760 uint64_t i, leaves = 0; 2761 2762 /* we only count leaves that belong to the main pool and are readable */ 2763 if (vd->vdev_islog || vd->vdev_isspare || 2764 vd->vdev_isl2cache || !vdev_readable(vd)) 2765 return (0); 2766 2767 if (vd->vdev_ops->vdev_op_leaf) 2768 return (1); 2769 2770 for (i = 0; i < vd->vdev_children; i++) { 2771 leaves += dsl_scan_count_leaves(vd->vdev_child[i]); 2772 } 2773 2774 return (leaves); 2775 } 2776 2777 static void 2778 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) 2779 { 2780 int i; 2781 uint64_t cur_size = 0; 2782 2783 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 2784 cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); 2785 } 2786 2787 q->q_total_zio_size_this_txg += cur_size; 2788 q->q_zios_this_txg++; 2789 } 2790 2791 static void 2792 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, 2793 uint64_t end) 2794 { 2795 q->q_total_seg_size_this_txg += end - start; 2796 q->q_segs_this_txg++; 2797 } 2798 2799 static boolean_t 2800 scan_io_queue_check_suspend(dsl_scan_t *scn) 2801 { 2802 /* See comment in dsl_scan_check_suspend() */ 2803 uint64_t curr_time_ns = gethrtime(); 2804 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 2805 uint64_t sync_time_ns = curr_time_ns - 2806 scn->scn_dp->dp_spa->spa_sync_starttime; 2807 int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max; 2808 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 2809 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 2810 2811 return ((NSEC2MSEC(scan_time_ns) > mintime && 2812 (dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent || 2813 txg_sync_waiting(scn->scn_dp) || 2814 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 2815 spa_shutting_down(scn->scn_dp->dp_spa)); 2816 } 2817 2818 /* 2819 * Given a list of scan_io_t's in io_list, this issues the I/Os out to 2820 * disk. This consumes the io_list and frees the scan_io_t's. This is 2821 * called when emptying queues, either when we're up against the memory 2822 * limit or when we have finished scanning. Returns B_TRUE if we stopped 2823 * processing the list before we finished. Any sios that were not issued 2824 * will remain in the io_list. 2825 */ 2826 static boolean_t 2827 scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) 2828 { 2829 dsl_scan_t *scn = queue->q_scn; 2830 scan_io_t *sio; 2831 int64_t bytes_issued = 0; 2832 boolean_t suspended = B_FALSE; 2833 2834 while ((sio = list_head(io_list)) != NULL) { 2835 blkptr_t bp; 2836 2837 if (scan_io_queue_check_suspend(scn)) { 2838 suspended = B_TRUE; 2839 break; 2840 } 2841 2842 sio2bp(sio, &bp); 2843 bytes_issued += SIO_GET_ASIZE(sio); 2844 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, 2845 &sio->sio_zb, queue); 2846 (void) list_remove_head(io_list); 2847 scan_io_queues_update_zio_stats(queue, &bp); 2848 sio_free(sio); 2849 } 2850 2851 atomic_add_64(&scn->scn_bytes_pending, -bytes_issued); 2852 2853 return (suspended); 2854 } 2855 2856 /* 2857 * This function removes sios from an IO queue which reside within a given 2858 * range_seg_t and inserts them (in offset order) into a list. Note that 2859 * we only ever return a maximum of 32 sios at once. If there are more sios 2860 * to process within this segment that did not make it onto the list we 2861 * return B_TRUE and otherwise B_FALSE. 2862 */ 2863 static boolean_t 2864 scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) 2865 { 2866 scan_io_t *srch_sio, *sio, *next_sio; 2867 avl_index_t idx; 2868 uint_t num_sios = 0; 2869 int64_t bytes_issued = 0; 2870 2871 ASSERT(rs != NULL); 2872 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 2873 2874 srch_sio = sio_alloc(1); 2875 srch_sio->sio_nr_dvas = 1; 2876 SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr)); 2877 2878 /* 2879 * The exact start of the extent might not contain any matching zios, 2880 * so if that's the case, examine the next one in the tree. 2881 */ 2882 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); 2883 sio_free(srch_sio); 2884 2885 if (sio == NULL) 2886 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); 2887 2888 while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, 2889 queue->q_exts_by_addr) && num_sios <= 32) { 2890 ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs, 2891 queue->q_exts_by_addr)); 2892 ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs, 2893 queue->q_exts_by_addr)); 2894 2895 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); 2896 avl_remove(&queue->q_sios_by_addr, sio); 2897 queue->q_sio_memused -= SIO_GET_MUSED(sio); 2898 2899 bytes_issued += SIO_GET_ASIZE(sio); 2900 num_sios++; 2901 list_insert_tail(list, sio); 2902 sio = next_sio; 2903 } 2904 2905 /* 2906 * We limit the number of sios we process at once to 32 to avoid 2907 * biting off more than we can chew. If we didn't take everything 2908 * in the segment we update it to reflect the work we were able to 2909 * complete. Otherwise, we remove it from the range tree entirely. 2910 */ 2911 if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, 2912 queue->q_exts_by_addr)) { 2913 range_tree_adjust_fill(queue->q_exts_by_addr, rs, 2914 -bytes_issued); 2915 range_tree_resize_segment(queue->q_exts_by_addr, rs, 2916 SIO_GET_OFFSET(sio), rs_get_end(rs, 2917 queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); 2918 2919 return (B_TRUE); 2920 } else { 2921 uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr); 2922 uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr); 2923 range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart); 2924 return (B_FALSE); 2925 } 2926 } 2927 2928 /* 2929 * This is called from the queue emptying thread and selects the next 2930 * extent from which we are to issue I/Os. The behavior of this function 2931 * depends on the state of the scan, the current memory consumption and 2932 * whether or not we are performing a scan shutdown. 2933 * 1) We select extents in an elevator algorithm (LBA-order) if the scan 2934 * needs to perform a checkpoint 2935 * 2) We select the largest available extent if we are up against the 2936 * memory limit. 2937 * 3) Otherwise we don't select any extents. 2938 */ 2939 static range_seg_t * 2940 scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) 2941 { 2942 dsl_scan_t *scn = queue->q_scn; 2943 range_tree_t *rt = queue->q_exts_by_addr; 2944 2945 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 2946 ASSERT(scn->scn_is_sorted); 2947 2948 /* handle tunable overrides */ 2949 if (scn->scn_checkpointing || scn->scn_clearing) { 2950 if (zfs_scan_issue_strategy == 1) { 2951 return (range_tree_first(rt)); 2952 } else if (zfs_scan_issue_strategy == 2) { 2953 /* 2954 * We need to get the original entry in the by_addr 2955 * tree so we can modify it. 2956 */ 2957 range_seg_t *size_rs = 2958 zfs_btree_first(&queue->q_exts_by_size, NULL); 2959 if (size_rs == NULL) 2960 return (NULL); 2961 uint64_t start = rs_get_start(size_rs, rt); 2962 uint64_t size = rs_get_end(size_rs, rt) - start; 2963 range_seg_t *addr_rs = range_tree_find(rt, start, 2964 size); 2965 ASSERT3P(addr_rs, !=, NULL); 2966 ASSERT3U(rs_get_start(size_rs, rt), ==, 2967 rs_get_start(addr_rs, rt)); 2968 ASSERT3U(rs_get_end(size_rs, rt), ==, 2969 rs_get_end(addr_rs, rt)); 2970 return (addr_rs); 2971 } 2972 } 2973 2974 /* 2975 * During normal clearing, we want to issue our largest segments 2976 * first, keeping IO as sequential as possible, and leaving the 2977 * smaller extents for later with the hope that they might eventually 2978 * grow to larger sequential segments. However, when the scan is 2979 * checkpointing, no new extents will be added to the sorting queue, 2980 * so the way we are sorted now is as good as it will ever get. 2981 * In this case, we instead switch to issuing extents in LBA order. 2982 */ 2983 if (scn->scn_checkpointing) { 2984 return (range_tree_first(rt)); 2985 } else if (scn->scn_clearing) { 2986 /* 2987 * We need to get the original entry in the by_addr 2988 * tree so we can modify it. 2989 */ 2990 range_seg_t *size_rs = zfs_btree_first(&queue->q_exts_by_size, 2991 NULL); 2992 if (size_rs == NULL) 2993 return (NULL); 2994 uint64_t start = rs_get_start(size_rs, rt); 2995 uint64_t size = rs_get_end(size_rs, rt) - start; 2996 range_seg_t *addr_rs = range_tree_find(rt, start, size); 2997 ASSERT3P(addr_rs, !=, NULL); 2998 ASSERT3U(rs_get_start(size_rs, rt), ==, rs_get_start(addr_rs, 2999 rt)); 3000 ASSERT3U(rs_get_end(size_rs, rt), ==, rs_get_end(addr_rs, rt)); 3001 return (addr_rs); 3002 } else { 3003 return (NULL); 3004 } 3005 } 3006 3007 static void 3008 scan_io_queues_run_one(void *arg) 3009 { 3010 dsl_scan_io_queue_t *queue = arg; 3011 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 3012 boolean_t suspended = B_FALSE; 3013 range_seg_t *rs = NULL; 3014 scan_io_t *sio = NULL; 3015 list_t sio_list; 3016 uint64_t bytes_per_leaf = zfs_scan_vdev_limit; 3017 uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd); 3018 3019 ASSERT(queue->q_scn->scn_is_sorted); 3020 3021 list_create(&sio_list, sizeof (scan_io_t), 3022 offsetof(scan_io_t, sio_nodes.sio_list_node)); 3023 mutex_enter(q_lock); 3024 3025 /* calculate maximum in-flight bytes for this txg (min 1MB) */ 3026 queue->q_maxinflight_bytes = 3027 MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); 3028 3029 /* reset per-queue scan statistics for this txg */ 3030 queue->q_total_seg_size_this_txg = 0; 3031 queue->q_segs_this_txg = 0; 3032 queue->q_total_zio_size_this_txg = 0; 3033 queue->q_zios_this_txg = 0; 3034 3035 /* loop until we run out of time or sios */ 3036 while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) { 3037 uint64_t seg_start = 0, seg_end = 0; 3038 boolean_t more_left = B_TRUE; 3039 3040 ASSERT(list_is_empty(&sio_list)); 3041 3042 /* loop while we still have sios left to process in this rs */ 3043 while (more_left) { 3044 scan_io_t *first_sio, *last_sio; 3045 3046 /* 3047 * We have selected which extent needs to be 3048 * processed next. Gather up the corresponding sios. 3049 */ 3050 more_left = scan_io_queue_gather(queue, rs, &sio_list); 3051 ASSERT(!list_is_empty(&sio_list)); 3052 first_sio = list_head(&sio_list); 3053 last_sio = list_tail(&sio_list); 3054 3055 seg_end = SIO_GET_END_OFFSET(last_sio); 3056 if (seg_start == 0) 3057 seg_start = SIO_GET_OFFSET(first_sio); 3058 3059 /* 3060 * Issuing sios can take a long time so drop the 3061 * queue lock. The sio queue won't be updated by 3062 * other threads since we're in syncing context so 3063 * we can be sure that our trees will remain exactly 3064 * as we left them. 3065 */ 3066 mutex_exit(q_lock); 3067 suspended = scan_io_queue_issue(queue, &sio_list); 3068 mutex_enter(q_lock); 3069 3070 if (suspended) 3071 break; 3072 } 3073 3074 /* update statistics for debugging purposes */ 3075 scan_io_queues_update_seg_stats(queue, seg_start, seg_end); 3076 3077 if (suspended) 3078 break; 3079 } 3080 3081 /* 3082 * If we were suspended in the middle of processing, 3083 * requeue any unfinished sios and exit. 3084 */ 3085 while ((sio = list_head(&sio_list)) != NULL) { 3086 list_remove(&sio_list, sio); 3087 scan_io_queue_insert_impl(queue, sio); 3088 } 3089 3090 mutex_exit(q_lock); 3091 list_destroy(&sio_list); 3092 } 3093 3094 /* 3095 * Performs an emptying run on all scan queues in the pool. This just 3096 * punches out one thread per top-level vdev, each of which processes 3097 * only that vdev's scan queue. We can parallelize the I/O here because 3098 * we know that each queue's I/Os only affect its own top-level vdev. 3099 * 3100 * This function waits for the queue runs to complete, and must be 3101 * called from dsl_scan_sync (or in general, syncing context). 3102 */ 3103 static void 3104 scan_io_queues_run(dsl_scan_t *scn) 3105 { 3106 spa_t *spa = scn->scn_dp->dp_spa; 3107 3108 ASSERT(scn->scn_is_sorted); 3109 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3110 3111 if (scn->scn_bytes_pending == 0) 3112 return; 3113 3114 if (scn->scn_taskq == NULL) { 3115 int nthreads = spa->spa_root_vdev->vdev_children; 3116 3117 /* 3118 * We need to make this taskq *always* execute as many 3119 * threads in parallel as we have top-level vdevs and no 3120 * less, otherwise strange serialization of the calls to 3121 * scan_io_queues_run_one can occur during spa_sync runs 3122 * and that significantly impacts performance. 3123 */ 3124 scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads, 3125 minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE); 3126 } 3127 3128 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 3129 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 3130 3131 mutex_enter(&vd->vdev_scan_io_queue_lock); 3132 if (vd->vdev_scan_io_queue != NULL) { 3133 VERIFY(taskq_dispatch(scn->scn_taskq, 3134 scan_io_queues_run_one, vd->vdev_scan_io_queue, 3135 TQ_SLEEP) != TASKQID_INVALID); 3136 } 3137 mutex_exit(&vd->vdev_scan_io_queue_lock); 3138 } 3139 3140 /* 3141 * Wait for the queues to finish issuing their IOs for this run 3142 * before we return. There may still be IOs in flight at this 3143 * point. 3144 */ 3145 taskq_wait(scn->scn_taskq); 3146 } 3147 3148 static boolean_t 3149 dsl_scan_async_block_should_pause(dsl_scan_t *scn) 3150 { 3151 uint64_t elapsed_nanosecs; 3152 3153 if (zfs_recover) 3154 return (B_FALSE); 3155 3156 if (zfs_async_block_max_blocks != 0 && 3157 scn->scn_visited_this_txg >= zfs_async_block_max_blocks) { 3158 return (B_TRUE); 3159 } 3160 3161 if (zfs_max_async_dedup_frees != 0 && 3162 scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) { 3163 return (B_TRUE); 3164 } 3165 3166 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 3167 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 3168 (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && 3169 txg_sync_waiting(scn->scn_dp)) || 3170 spa_shutting_down(scn->scn_dp->dp_spa)); 3171 } 3172 3173 static int 3174 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 3175 { 3176 dsl_scan_t *scn = arg; 3177 3178 if (!scn->scn_is_bptree || 3179 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 3180 if (dsl_scan_async_block_should_pause(scn)) 3181 return (SET_ERROR(ERESTART)); 3182 } 3183 3184 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 3185 dmu_tx_get_txg(tx), bp, 0)); 3186 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 3187 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 3188 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 3189 scn->scn_visited_this_txg++; 3190 if (BP_GET_DEDUP(bp)) 3191 scn->scn_dedup_frees_this_txg++; 3192 return (0); 3193 } 3194 3195 static void 3196 dsl_scan_update_stats(dsl_scan_t *scn) 3197 { 3198 spa_t *spa = scn->scn_dp->dp_spa; 3199 uint64_t i; 3200 uint64_t seg_size_total = 0, zio_size_total = 0; 3201 uint64_t seg_count_total = 0, zio_count_total = 0; 3202 3203 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 3204 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 3205 dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; 3206 3207 if (queue == NULL) 3208 continue; 3209 3210 seg_size_total += queue->q_total_seg_size_this_txg; 3211 zio_size_total += queue->q_total_zio_size_this_txg; 3212 seg_count_total += queue->q_segs_this_txg; 3213 zio_count_total += queue->q_zios_this_txg; 3214 } 3215 3216 if (seg_count_total == 0 || zio_count_total == 0) { 3217 scn->scn_avg_seg_size_this_txg = 0; 3218 scn->scn_avg_zio_size_this_txg = 0; 3219 scn->scn_segs_this_txg = 0; 3220 scn->scn_zios_this_txg = 0; 3221 return; 3222 } 3223 3224 scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; 3225 scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; 3226 scn->scn_segs_this_txg = seg_count_total; 3227 scn->scn_zios_this_txg = zio_count_total; 3228 } 3229 3230 static int 3231 bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3232 dmu_tx_t *tx) 3233 { 3234 ASSERT(!bp_freed); 3235 return (dsl_scan_free_block_cb(arg, bp, tx)); 3236 } 3237 3238 static int 3239 dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3240 dmu_tx_t *tx) 3241 { 3242 ASSERT(!bp_freed); 3243 dsl_scan_t *scn = arg; 3244 const dva_t *dva = &bp->blk_dva[0]; 3245 3246 if (dsl_scan_async_block_should_pause(scn)) 3247 return (SET_ERROR(ERESTART)); 3248 3249 spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, 3250 DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), 3251 DVA_GET_ASIZE(dva), tx); 3252 scn->scn_visited_this_txg++; 3253 return (0); 3254 } 3255 3256 boolean_t 3257 dsl_scan_active(dsl_scan_t *scn) 3258 { 3259 spa_t *spa = scn->scn_dp->dp_spa; 3260 uint64_t used = 0, comp, uncomp; 3261 boolean_t clones_left; 3262 3263 if (spa->spa_load_state != SPA_LOAD_NONE) 3264 return (B_FALSE); 3265 if (spa_shutting_down(spa)) 3266 return (B_FALSE); 3267 if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || 3268 (scn->scn_async_destroying && !scn->scn_async_stalled)) 3269 return (B_TRUE); 3270 3271 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 3272 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 3273 &used, &comp, &uncomp); 3274 } 3275 clones_left = spa_livelist_delete_check(spa); 3276 return ((used != 0) || (clones_left)); 3277 } 3278 3279 static boolean_t 3280 dsl_scan_check_deferred(vdev_t *vd) 3281 { 3282 boolean_t need_resilver = B_FALSE; 3283 3284 for (int c = 0; c < vd->vdev_children; c++) { 3285 need_resilver |= 3286 dsl_scan_check_deferred(vd->vdev_child[c]); 3287 } 3288 3289 if (!vdev_is_concrete(vd) || vd->vdev_aux || 3290 !vd->vdev_ops->vdev_op_leaf) 3291 return (need_resilver); 3292 3293 if (!vd->vdev_resilver_deferred) 3294 need_resilver = B_TRUE; 3295 3296 return (need_resilver); 3297 } 3298 3299 static boolean_t 3300 dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, 3301 uint64_t phys_birth) 3302 { 3303 vdev_t *vd; 3304 3305 vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 3306 3307 if (vd->vdev_ops == &vdev_indirect_ops) { 3308 /* 3309 * The indirect vdev can point to multiple 3310 * vdevs. For simplicity, always create 3311 * the resilver zio_t. zio_vdev_io_start() 3312 * will bypass the child resilver i/o's if 3313 * they are on vdevs that don't have DTL's. 3314 */ 3315 return (B_TRUE); 3316 } 3317 3318 if (DVA_GET_GANG(dva)) { 3319 /* 3320 * Gang members may be spread across multiple 3321 * vdevs, so the best estimate we have is the 3322 * scrub range, which has already been checked. 3323 * XXX -- it would be better to change our 3324 * allocation policy to ensure that all 3325 * gang members reside on the same vdev. 3326 */ 3327 return (B_TRUE); 3328 } 3329 3330 /* 3331 * Check if the top-level vdev must resilver this offset. 3332 * When the offset does not intersect with a dirty leaf DTL 3333 * then it may be possible to skip the resilver IO. The psize 3334 * is provided instead of asize to simplify the check for RAIDZ. 3335 */ 3336 if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth)) 3337 return (B_FALSE); 3338 3339 /* 3340 * Check that this top-level vdev has a device under it which 3341 * is resilvering and is not deferred. 3342 */ 3343 if (!dsl_scan_check_deferred(vd)) 3344 return (B_FALSE); 3345 3346 return (B_TRUE); 3347 } 3348 3349 static int 3350 dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) 3351 { 3352 dsl_scan_t *scn = dp->dp_scan; 3353 spa_t *spa = dp->dp_spa; 3354 int err = 0; 3355 3356 if (spa_suspend_async_destroy(spa)) 3357 return (0); 3358 3359 if (zfs_free_bpobj_enabled && 3360 spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3361 scn->scn_is_bptree = B_FALSE; 3362 scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; 3363 scn->scn_zio_root = zio_root(spa, NULL, 3364 NULL, ZIO_FLAG_MUSTSUCCEED); 3365 err = bpobj_iterate(&dp->dp_free_bpobj, 3366 bpobj_dsl_scan_free_block_cb, scn, tx); 3367 VERIFY0(zio_wait(scn->scn_zio_root)); 3368 scn->scn_zio_root = NULL; 3369 3370 if (err != 0 && err != ERESTART) 3371 zfs_panic_recover("error %u from bpobj_iterate()", err); 3372 } 3373 3374 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 3375 ASSERT(scn->scn_async_destroying); 3376 scn->scn_is_bptree = B_TRUE; 3377 scn->scn_zio_root = zio_root(spa, NULL, 3378 NULL, ZIO_FLAG_MUSTSUCCEED); 3379 err = bptree_iterate(dp->dp_meta_objset, 3380 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 3381 VERIFY0(zio_wait(scn->scn_zio_root)); 3382 scn->scn_zio_root = NULL; 3383 3384 if (err == EIO || err == ECKSUM) { 3385 err = 0; 3386 } else if (err != 0 && err != ERESTART) { 3387 zfs_panic_recover("error %u from " 3388 "traverse_dataset_destroyed()", err); 3389 } 3390 3391 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 3392 /* finished; deactivate async destroy feature */ 3393 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 3394 ASSERT(!spa_feature_is_active(spa, 3395 SPA_FEATURE_ASYNC_DESTROY)); 3396 VERIFY0(zap_remove(dp->dp_meta_objset, 3397 DMU_POOL_DIRECTORY_OBJECT, 3398 DMU_POOL_BPTREE_OBJ, tx)); 3399 VERIFY0(bptree_free(dp->dp_meta_objset, 3400 dp->dp_bptree_obj, tx)); 3401 dp->dp_bptree_obj = 0; 3402 scn->scn_async_destroying = B_FALSE; 3403 scn->scn_async_stalled = B_FALSE; 3404 } else { 3405 /* 3406 * If we didn't make progress, mark the async 3407 * destroy as stalled, so that we will not initiate 3408 * a spa_sync() on its behalf. Note that we only 3409 * check this if we are not finished, because if the 3410 * bptree had no blocks for us to visit, we can 3411 * finish without "making progress". 3412 */ 3413 scn->scn_async_stalled = 3414 (scn->scn_visited_this_txg == 0); 3415 } 3416 } 3417 if (scn->scn_visited_this_txg) { 3418 zfs_dbgmsg("freed %llu blocks in %llums from " 3419 "free_bpobj/bptree txg %llu; err=%u", 3420 (longlong_t)scn->scn_visited_this_txg, 3421 (longlong_t) 3422 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 3423 (longlong_t)tx->tx_txg, err); 3424 scn->scn_visited_this_txg = 0; 3425 scn->scn_dedup_frees_this_txg = 0; 3426 3427 /* 3428 * Write out changes to the DDT that may be required as a 3429 * result of the blocks freed. This ensures that the DDT 3430 * is clean when a scrub/resilver runs. 3431 */ 3432 ddt_sync(spa, tx->tx_txg); 3433 } 3434 if (err != 0) 3435 return (err); 3436 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3437 zfs_free_leak_on_eio && 3438 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 3439 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 3440 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 3441 /* 3442 * We have finished background destroying, but there is still 3443 * some space left in the dp_free_dir. Transfer this leaked 3444 * space to the dp_leak_dir. 3445 */ 3446 if (dp->dp_leak_dir == NULL) { 3447 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 3448 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 3449 LEAK_DIR_NAME, tx); 3450 VERIFY0(dsl_pool_open_special_dir(dp, 3451 LEAK_DIR_NAME, &dp->dp_leak_dir)); 3452 rrw_exit(&dp->dp_config_rwlock, FTAG); 3453 } 3454 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 3455 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3456 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3457 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3458 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 3459 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3460 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3461 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3462 } 3463 3464 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3465 !spa_livelist_delete_check(spa)) { 3466 /* finished; verify that space accounting went to zero */ 3467 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 3468 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 3469 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 3470 } 3471 3472 spa_notify_waiters(spa); 3473 3474 EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), 3475 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3476 DMU_POOL_OBSOLETE_BPOBJ)); 3477 if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { 3478 ASSERT(spa_feature_is_active(dp->dp_spa, 3479 SPA_FEATURE_OBSOLETE_COUNTS)); 3480 3481 scn->scn_is_bptree = B_FALSE; 3482 scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; 3483 err = bpobj_iterate(&dp->dp_obsolete_bpobj, 3484 dsl_scan_obsolete_block_cb, scn, tx); 3485 if (err != 0 && err != ERESTART) 3486 zfs_panic_recover("error %u from bpobj_iterate()", err); 3487 3488 if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) 3489 dsl_pool_destroy_obsolete_bpobj(dp, tx); 3490 } 3491 return (0); 3492 } 3493 3494 /* 3495 * This is the primary entry point for scans that is called from syncing 3496 * context. Scans must happen entirely during syncing context so that we 3497 * can guarantee that blocks we are currently scanning will not change out 3498 * from under us. While a scan is active, this function controls how quickly 3499 * transaction groups proceed, instead of the normal handling provided by 3500 * txg_sync_thread(). 3501 */ 3502 void 3503 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 3504 { 3505 int err = 0; 3506 dsl_scan_t *scn = dp->dp_scan; 3507 spa_t *spa = dp->dp_spa; 3508 state_sync_type_t sync_type = SYNC_OPTIONAL; 3509 3510 if (spa->spa_resilver_deferred && 3511 !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) 3512 spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx); 3513 3514 /* 3515 * Check for scn_restart_txg before checking spa_load_state, so 3516 * that we can restart an old-style scan while the pool is being 3517 * imported (see dsl_scan_init). We also restart scans if there 3518 * is a deferred resilver and the user has manually disabled 3519 * deferred resilvers via the tunable. 3520 */ 3521 if (dsl_scan_restarting(scn, tx) || 3522 (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) { 3523 pool_scan_func_t func = POOL_SCAN_SCRUB; 3524 dsl_scan_done(scn, B_FALSE, tx); 3525 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 3526 func = POOL_SCAN_RESILVER; 3527 zfs_dbgmsg("restarting scan func=%u txg=%llu", 3528 func, (longlong_t)tx->tx_txg); 3529 dsl_scan_setup_sync(&func, tx); 3530 } 3531 3532 /* 3533 * Only process scans in sync pass 1. 3534 */ 3535 if (spa_sync_pass(spa) > 1) 3536 return; 3537 3538 /* 3539 * If the spa is shutting down, then stop scanning. This will 3540 * ensure that the scan does not dirty any new data during the 3541 * shutdown phase. 3542 */ 3543 if (spa_shutting_down(spa)) 3544 return; 3545 3546 /* 3547 * If the scan is inactive due to a stalled async destroy, try again. 3548 */ 3549 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 3550 return; 3551 3552 /* reset scan statistics */ 3553 scn->scn_visited_this_txg = 0; 3554 scn->scn_dedup_frees_this_txg = 0; 3555 scn->scn_holes_this_txg = 0; 3556 scn->scn_lt_min_this_txg = 0; 3557 scn->scn_gt_max_this_txg = 0; 3558 scn->scn_ddt_contained_this_txg = 0; 3559 scn->scn_objsets_visited_this_txg = 0; 3560 scn->scn_avg_seg_size_this_txg = 0; 3561 scn->scn_segs_this_txg = 0; 3562 scn->scn_avg_zio_size_this_txg = 0; 3563 scn->scn_zios_this_txg = 0; 3564 scn->scn_suspending = B_FALSE; 3565 scn->scn_sync_start_time = gethrtime(); 3566 spa->spa_scrub_active = B_TRUE; 3567 3568 /* 3569 * First process the async destroys. If we suspend, don't do 3570 * any scrubbing or resilvering. This ensures that there are no 3571 * async destroys while we are scanning, so the scan code doesn't 3572 * have to worry about traversing it. It is also faster to free the 3573 * blocks than to scrub them. 3574 */ 3575 err = dsl_process_async_destroys(dp, tx); 3576 if (err != 0) 3577 return; 3578 3579 if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) 3580 return; 3581 3582 /* 3583 * Wait a few txgs after importing to begin scanning so that 3584 * we can get the pool imported quickly. 3585 */ 3586 if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) 3587 return; 3588 3589 /* 3590 * zfs_scan_suspend_progress can be set to disable scan progress. 3591 * We don't want to spin the txg_sync thread, so we add a delay 3592 * here to simulate the time spent doing a scan. This is mostly 3593 * useful for testing and debugging. 3594 */ 3595 if (zfs_scan_suspend_progress) { 3596 uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; 3597 int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 3598 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 3599 3600 while (zfs_scan_suspend_progress && 3601 !txg_sync_waiting(scn->scn_dp) && 3602 !spa_shutting_down(scn->scn_dp->dp_spa) && 3603 NSEC2MSEC(scan_time_ns) < mintime) { 3604 delay(hz); 3605 scan_time_ns = gethrtime() - scn->scn_sync_start_time; 3606 } 3607 return; 3608 } 3609 3610 /* 3611 * It is possible to switch from unsorted to sorted at any time, 3612 * but afterwards the scan will remain sorted unless reloaded from 3613 * a checkpoint after a reboot. 3614 */ 3615 if (!zfs_scan_legacy) { 3616 scn->scn_is_sorted = B_TRUE; 3617 if (scn->scn_last_checkpoint == 0) 3618 scn->scn_last_checkpoint = ddi_get_lbolt(); 3619 } 3620 3621 /* 3622 * For sorted scans, determine what kind of work we will be doing 3623 * this txg based on our memory limitations and whether or not we 3624 * need to perform a checkpoint. 3625 */ 3626 if (scn->scn_is_sorted) { 3627 /* 3628 * If we are over our checkpoint interval, set scn_clearing 3629 * so that we can begin checkpointing immediately. The 3630 * checkpoint allows us to save a consistent bookmark 3631 * representing how much data we have scrubbed so far. 3632 * Otherwise, use the memory limit to determine if we should 3633 * scan for metadata or start issue scrub IOs. We accumulate 3634 * metadata until we hit our hard memory limit at which point 3635 * we issue scrub IOs until we are at our soft memory limit. 3636 */ 3637 if (scn->scn_checkpointing || 3638 ddi_get_lbolt() - scn->scn_last_checkpoint > 3639 SEC_TO_TICK(zfs_scan_checkpoint_intval)) { 3640 if (!scn->scn_checkpointing) 3641 zfs_dbgmsg("begin scan checkpoint"); 3642 3643 scn->scn_checkpointing = B_TRUE; 3644 scn->scn_clearing = B_TRUE; 3645 } else { 3646 boolean_t should_clear = dsl_scan_should_clear(scn); 3647 if (should_clear && !scn->scn_clearing) { 3648 zfs_dbgmsg("begin scan clearing"); 3649 scn->scn_clearing = B_TRUE; 3650 } else if (!should_clear && scn->scn_clearing) { 3651 zfs_dbgmsg("finish scan clearing"); 3652 scn->scn_clearing = B_FALSE; 3653 } 3654 } 3655 } else { 3656 ASSERT0(scn->scn_checkpointing); 3657 ASSERT0(scn->scn_clearing); 3658 } 3659 3660 if (!scn->scn_clearing && scn->scn_done_txg == 0) { 3661 /* Need to scan metadata for more blocks to scrub */ 3662 dsl_scan_phys_t *scnp = &scn->scn_phys; 3663 taskqid_t prefetch_tqid; 3664 uint64_t bytes_per_leaf = zfs_scan_vdev_limit; 3665 uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev); 3666 3667 /* 3668 * Recalculate the max number of in-flight bytes for pool-wide 3669 * scanning operations (minimum 1MB). Limits for the issuing 3670 * phase are done per top-level vdev and are handled separately. 3671 */ 3672 scn->scn_maxinflight_bytes = 3673 MAX(nr_leaves * bytes_per_leaf, 1ULL << 20); 3674 3675 if (scnp->scn_ddt_bookmark.ddb_class <= 3676 scnp->scn_ddt_class_max) { 3677 ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); 3678 zfs_dbgmsg("doing scan sync txg %llu; " 3679 "ddt bm=%llu/%llu/%llu/%llx", 3680 (longlong_t)tx->tx_txg, 3681 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 3682 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 3683 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 3684 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 3685 } else { 3686 zfs_dbgmsg("doing scan sync txg %llu; " 3687 "bm=%llu/%llu/%llu/%llu", 3688 (longlong_t)tx->tx_txg, 3689 (longlong_t)scnp->scn_bookmark.zb_objset, 3690 (longlong_t)scnp->scn_bookmark.zb_object, 3691 (longlong_t)scnp->scn_bookmark.zb_level, 3692 (longlong_t)scnp->scn_bookmark.zb_blkid); 3693 } 3694 3695 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3696 NULL, ZIO_FLAG_CANFAIL); 3697 3698 scn->scn_prefetch_stop = B_FALSE; 3699 prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, 3700 dsl_scan_prefetch_thread, scn, TQ_SLEEP); 3701 ASSERT(prefetch_tqid != TASKQID_INVALID); 3702 3703 dsl_pool_config_enter(dp, FTAG); 3704 dsl_scan_visit(scn, tx); 3705 dsl_pool_config_exit(dp, FTAG); 3706 3707 mutex_enter(&dp->dp_spa->spa_scrub_lock); 3708 scn->scn_prefetch_stop = B_TRUE; 3709 cv_broadcast(&spa->spa_scrub_io_cv); 3710 mutex_exit(&dp->dp_spa->spa_scrub_lock); 3711 3712 taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); 3713 (void) zio_wait(scn->scn_zio_root); 3714 scn->scn_zio_root = NULL; 3715 3716 zfs_dbgmsg("scan visited %llu blocks in %llums " 3717 "(%llu os's, %llu holes, %llu < mintxg, " 3718 "%llu in ddt, %llu > maxtxg)", 3719 (longlong_t)scn->scn_visited_this_txg, 3720 (longlong_t)NSEC2MSEC(gethrtime() - 3721 scn->scn_sync_start_time), 3722 (longlong_t)scn->scn_objsets_visited_this_txg, 3723 (longlong_t)scn->scn_holes_this_txg, 3724 (longlong_t)scn->scn_lt_min_this_txg, 3725 (longlong_t)scn->scn_ddt_contained_this_txg, 3726 (longlong_t)scn->scn_gt_max_this_txg); 3727 3728 if (!scn->scn_suspending) { 3729 ASSERT0(avl_numnodes(&scn->scn_queue)); 3730 scn->scn_done_txg = tx->tx_txg + 1; 3731 if (scn->scn_is_sorted) { 3732 scn->scn_checkpointing = B_TRUE; 3733 scn->scn_clearing = B_TRUE; 3734 } 3735 zfs_dbgmsg("scan complete txg %llu", 3736 (longlong_t)tx->tx_txg); 3737 } 3738 } else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) { 3739 ASSERT(scn->scn_clearing); 3740 3741 /* need to issue scrubbing IOs from per-vdev queues */ 3742 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 3743 NULL, ZIO_FLAG_CANFAIL); 3744 scan_io_queues_run(scn); 3745 (void) zio_wait(scn->scn_zio_root); 3746 scn->scn_zio_root = NULL; 3747 3748 /* calculate and dprintf the current memory usage */ 3749 (void) dsl_scan_should_clear(scn); 3750 dsl_scan_update_stats(scn); 3751 3752 zfs_dbgmsg("scan issued %llu blocks (%llu segs) in %llums " 3753 "(avg_block_size = %llu, avg_seg_size = %llu)", 3754 (longlong_t)scn->scn_zios_this_txg, 3755 (longlong_t)scn->scn_segs_this_txg, 3756 (longlong_t)NSEC2MSEC(gethrtime() - 3757 scn->scn_sync_start_time), 3758 (longlong_t)scn->scn_avg_zio_size_this_txg, 3759 (longlong_t)scn->scn_avg_seg_size_this_txg); 3760 } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { 3761 /* Finished with everything. Mark the scrub as complete */ 3762 zfs_dbgmsg("scan issuing complete txg %llu", 3763 (longlong_t)tx->tx_txg); 3764 ASSERT3U(scn->scn_done_txg, !=, 0); 3765 ASSERT0(spa->spa_scrub_inflight); 3766 ASSERT0(scn->scn_bytes_pending); 3767 dsl_scan_done(scn, B_TRUE, tx); 3768 sync_type = SYNC_MANDATORY; 3769 } 3770 3771 dsl_scan_sync_state(scn, tx, sync_type); 3772 } 3773 3774 static void 3775 count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp) 3776 { 3777 int i; 3778 3779 /* 3780 * Don't count embedded bp's, since we already did the work of 3781 * scanning these when we scanned the containing block. 3782 */ 3783 if (BP_IS_EMBEDDED(bp)) 3784 return; 3785 3786 /* 3787 * Update the spa's stats on how many bytes we have issued. 3788 * Sequential scrubs create a zio for each DVA of the bp. Each 3789 * of these will include all DVAs for repair purposes, but the 3790 * zio code will only try the first one unless there is an issue. 3791 * Therefore, we should only count the first DVA for these IOs. 3792 */ 3793 if (scn->scn_is_sorted) { 3794 atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued, 3795 DVA_GET_ASIZE(&bp->blk_dva[0])); 3796 } else { 3797 spa_t *spa = scn->scn_dp->dp_spa; 3798 3799 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 3800 atomic_add_64(&spa->spa_scan_pass_issued, 3801 DVA_GET_ASIZE(&bp->blk_dva[i])); 3802 } 3803 } 3804 3805 /* 3806 * If we resume after a reboot, zab will be NULL; don't record 3807 * incomplete stats in that case. 3808 */ 3809 if (zab == NULL) 3810 return; 3811 3812 mutex_enter(&zab->zab_lock); 3813 3814 for (i = 0; i < 4; i++) { 3815 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 3816 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 3817 3818 if (t & DMU_OT_NEWTYPE) 3819 t = DMU_OT_OTHER; 3820 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 3821 int equal; 3822 3823 zb->zb_count++; 3824 zb->zb_asize += BP_GET_ASIZE(bp); 3825 zb->zb_lsize += BP_GET_LSIZE(bp); 3826 zb->zb_psize += BP_GET_PSIZE(bp); 3827 zb->zb_gangs += BP_COUNT_GANG(bp); 3828 3829 switch (BP_GET_NDVAS(bp)) { 3830 case 2: 3831 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 3832 DVA_GET_VDEV(&bp->blk_dva[1])) 3833 zb->zb_ditto_2_of_2_samevdev++; 3834 break; 3835 case 3: 3836 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 3837 DVA_GET_VDEV(&bp->blk_dva[1])) + 3838 (DVA_GET_VDEV(&bp->blk_dva[0]) == 3839 DVA_GET_VDEV(&bp->blk_dva[2])) + 3840 (DVA_GET_VDEV(&bp->blk_dva[1]) == 3841 DVA_GET_VDEV(&bp->blk_dva[2])); 3842 if (equal == 1) 3843 zb->zb_ditto_2_of_3_samevdev++; 3844 else if (equal == 3) 3845 zb->zb_ditto_3_of_3_samevdev++; 3846 break; 3847 } 3848 } 3849 3850 mutex_exit(&zab->zab_lock); 3851 } 3852 3853 static void 3854 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) 3855 { 3856 avl_index_t idx; 3857 int64_t asize = SIO_GET_ASIZE(sio); 3858 dsl_scan_t *scn = queue->q_scn; 3859 3860 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3861 3862 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { 3863 /* block is already scheduled for reading */ 3864 atomic_add_64(&scn->scn_bytes_pending, -asize); 3865 sio_free(sio); 3866 return; 3867 } 3868 avl_insert(&queue->q_sios_by_addr, sio, idx); 3869 queue->q_sio_memused += SIO_GET_MUSED(sio); 3870 range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), asize); 3871 } 3872 3873 /* 3874 * Given all the info we got from our metadata scanning process, we 3875 * construct a scan_io_t and insert it into the scan sorting queue. The 3876 * I/O must already be suitable for us to process. This is controlled 3877 * by dsl_scan_enqueue(). 3878 */ 3879 static void 3880 scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, 3881 int zio_flags, const zbookmark_phys_t *zb) 3882 { 3883 dsl_scan_t *scn = queue->q_scn; 3884 scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp)); 3885 3886 ASSERT0(BP_IS_GANG(bp)); 3887 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3888 3889 bp2sio(bp, sio, dva_i); 3890 sio->sio_flags = zio_flags; 3891 sio->sio_zb = *zb; 3892 3893 /* 3894 * Increment the bytes pending counter now so that we can't 3895 * get an integer underflow in case the worker processes the 3896 * zio before we get to incrementing this counter. 3897 */ 3898 atomic_add_64(&scn->scn_bytes_pending, SIO_GET_ASIZE(sio)); 3899 3900 scan_io_queue_insert_impl(queue, sio); 3901 } 3902 3903 /* 3904 * Given a set of I/O parameters as discovered by the metadata traversal 3905 * process, attempts to place the I/O into the sorted queues (if allowed), 3906 * or immediately executes the I/O. 3907 */ 3908 static void 3909 dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 3910 const zbookmark_phys_t *zb) 3911 { 3912 spa_t *spa = dp->dp_spa; 3913 3914 ASSERT(!BP_IS_EMBEDDED(bp)); 3915 3916 /* 3917 * Gang blocks are hard to issue sequentially, so we just issue them 3918 * here immediately instead of queuing them. 3919 */ 3920 if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { 3921 scan_exec_io(dp, bp, zio_flags, zb, NULL); 3922 return; 3923 } 3924 3925 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 3926 dva_t dva; 3927 vdev_t *vdev; 3928 3929 dva = bp->blk_dva[i]; 3930 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); 3931 ASSERT(vdev != NULL); 3932 3933 mutex_enter(&vdev->vdev_scan_io_queue_lock); 3934 if (vdev->vdev_scan_io_queue == NULL) 3935 vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); 3936 ASSERT(dp->dp_scan != NULL); 3937 scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, 3938 i, zio_flags, zb); 3939 mutex_exit(&vdev->vdev_scan_io_queue_lock); 3940 } 3941 } 3942 3943 static int 3944 dsl_scan_scrub_cb(dsl_pool_t *dp, 3945 const blkptr_t *bp, const zbookmark_phys_t *zb) 3946 { 3947 dsl_scan_t *scn = dp->dp_scan; 3948 spa_t *spa = dp->dp_spa; 3949 uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); 3950 size_t psize = BP_GET_PSIZE(bp); 3951 boolean_t needs_io = B_FALSE; 3952 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 3953 3954 3955 if (phys_birth <= scn->scn_phys.scn_min_txg || 3956 phys_birth >= scn->scn_phys.scn_max_txg) { 3957 count_block(scn, dp->dp_blkstats, bp); 3958 return (0); 3959 } 3960 3961 /* Embedded BP's have phys_birth==0, so we reject them above. */ 3962 ASSERT(!BP_IS_EMBEDDED(bp)); 3963 3964 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 3965 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 3966 zio_flags |= ZIO_FLAG_SCRUB; 3967 needs_io = B_TRUE; 3968 } else { 3969 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 3970 zio_flags |= ZIO_FLAG_RESILVER; 3971 needs_io = B_FALSE; 3972 } 3973 3974 /* If it's an intent log block, failure is expected. */ 3975 if (zb->zb_level == ZB_ZIL_LEVEL) 3976 zio_flags |= ZIO_FLAG_SPECULATIVE; 3977 3978 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 3979 const dva_t *dva = &bp->blk_dva[d]; 3980 3981 /* 3982 * Keep track of how much data we've examined so that 3983 * zpool(8) status can make useful progress reports. 3984 */ 3985 scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva); 3986 spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva); 3987 3988 /* if it's a resilver, this may not be in the target range */ 3989 if (!needs_io) 3990 needs_io = dsl_scan_need_resilver(spa, dva, psize, 3991 phys_birth); 3992 } 3993 3994 if (needs_io && !zfs_no_scrub_io) { 3995 dsl_scan_enqueue(dp, bp, zio_flags, zb); 3996 } else { 3997 count_block(scn, dp->dp_blkstats, bp); 3998 } 3999 4000 /* do not relocate this block */ 4001 return (0); 4002 } 4003 4004 static void 4005 dsl_scan_scrub_done(zio_t *zio) 4006 { 4007 spa_t *spa = zio->io_spa; 4008 blkptr_t *bp = zio->io_bp; 4009 dsl_scan_io_queue_t *queue = zio->io_private; 4010 4011 abd_free(zio->io_abd); 4012 4013 if (queue == NULL) { 4014 mutex_enter(&spa->spa_scrub_lock); 4015 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 4016 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 4017 cv_broadcast(&spa->spa_scrub_io_cv); 4018 mutex_exit(&spa->spa_scrub_lock); 4019 } else { 4020 mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); 4021 ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); 4022 queue->q_inflight_bytes -= BP_GET_PSIZE(bp); 4023 cv_broadcast(&queue->q_zio_cv); 4024 mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); 4025 } 4026 4027 if (zio->io_error && (zio->io_error != ECKSUM || 4028 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 4029 atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors); 4030 } 4031 } 4032 4033 /* 4034 * Given a scanning zio's information, executes the zio. The zio need 4035 * not necessarily be only sortable, this function simply executes the 4036 * zio, no matter what it is. The optional queue argument allows the 4037 * caller to specify that they want per top level vdev IO rate limiting 4038 * instead of the legacy global limiting. 4039 */ 4040 static void 4041 scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 4042 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) 4043 { 4044 spa_t *spa = dp->dp_spa; 4045 dsl_scan_t *scn = dp->dp_scan; 4046 size_t size = BP_GET_PSIZE(bp); 4047 abd_t *data = abd_alloc_for_io(size, B_FALSE); 4048 4049 ASSERT3U(scn->scn_maxinflight_bytes, >, 0); 4050 4051 if (queue == NULL) { 4052 mutex_enter(&spa->spa_scrub_lock); 4053 while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) 4054 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 4055 spa->spa_scrub_inflight += BP_GET_PSIZE(bp); 4056 mutex_exit(&spa->spa_scrub_lock); 4057 } else { 4058 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 4059 4060 mutex_enter(q_lock); 4061 while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) 4062 cv_wait(&queue->q_zio_cv, q_lock); 4063 queue->q_inflight_bytes += BP_GET_PSIZE(bp); 4064 mutex_exit(q_lock); 4065 } 4066 4067 count_block(scn, dp->dp_blkstats, bp); 4068 zio_nowait(zio_read(scn->scn_zio_root, spa, bp, data, size, 4069 dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); 4070 } 4071 4072 /* 4073 * This is the primary extent sorting algorithm. We balance two parameters: 4074 * 1) how many bytes of I/O are in an extent 4075 * 2) how well the extent is filled with I/O (as a fraction of its total size) 4076 * Since we allow extents to have gaps between their constituent I/Os, it's 4077 * possible to have a fairly large extent that contains the same amount of 4078 * I/O bytes than a much smaller extent, which just packs the I/O more tightly. 4079 * The algorithm sorts based on a score calculated from the extent's size, 4080 * the relative fill volume (in %) and a "fill weight" parameter that controls 4081 * the split between whether we prefer larger extents or more well populated 4082 * extents: 4083 * 4084 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) 4085 * 4086 * Example: 4087 * 1) assume extsz = 64 MiB 4088 * 2) assume fill = 32 MiB (extent is half full) 4089 * 3) assume fill_weight = 3 4090 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 4091 * SCORE = 32M + (50 * 3 * 32M) / 100 4092 * SCORE = 32M + (4800M / 100) 4093 * SCORE = 32M + 48M 4094 * ^ ^ 4095 * | +--- final total relative fill-based score 4096 * +--------- final total fill-based score 4097 * SCORE = 80M 4098 * 4099 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards 4100 * extents that are more completely filled (in a 3:2 ratio) vs just larger. 4101 * Note that as an optimization, we replace multiplication and division by 4102 * 100 with bitshifting by 7 (which effectively multiplies and divides by 128). 4103 */ 4104 static int 4105 ext_size_compare(const void *x, const void *y) 4106 { 4107 const range_seg_gap_t *rsa = x, *rsb = y; 4108 4109 uint64_t sa = rsa->rs_end - rsa->rs_start; 4110 uint64_t sb = rsb->rs_end - rsb->rs_start; 4111 uint64_t score_a, score_b; 4112 4113 score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) * 4114 fill_weight * rsa->rs_fill) >> 7); 4115 score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) * 4116 fill_weight * rsb->rs_fill) >> 7); 4117 4118 if (score_a > score_b) 4119 return (-1); 4120 if (score_a == score_b) { 4121 if (rsa->rs_start < rsb->rs_start) 4122 return (-1); 4123 if (rsa->rs_start == rsb->rs_start) 4124 return (0); 4125 return (1); 4126 } 4127 return (1); 4128 } 4129 4130 /* 4131 * Comparator for the q_sios_by_addr tree. Sorting is simply performed 4132 * based on LBA-order (from lowest to highest). 4133 */ 4134 static int 4135 sio_addr_compare(const void *x, const void *y) 4136 { 4137 const scan_io_t *a = x, *b = y; 4138 4139 return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b))); 4140 } 4141 4142 /* IO queues are created on demand when they are needed. */ 4143 static dsl_scan_io_queue_t * 4144 scan_io_queue_create(vdev_t *vd) 4145 { 4146 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; 4147 dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); 4148 4149 q->q_scn = scn; 4150 q->q_vd = vd; 4151 q->q_sio_memused = 0; 4152 cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); 4153 q->q_exts_by_addr = range_tree_create_impl(&rt_btree_ops, RANGE_SEG_GAP, 4154 &q->q_exts_by_size, 0, 0, ext_size_compare, zfs_scan_max_ext_gap); 4155 avl_create(&q->q_sios_by_addr, sio_addr_compare, 4156 sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); 4157 4158 return (q); 4159 } 4160 4161 /* 4162 * Destroys a scan queue and all segments and scan_io_t's contained in it. 4163 * No further execution of I/O occurs, anything pending in the queue is 4164 * simply freed without being executed. 4165 */ 4166 void 4167 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) 4168 { 4169 dsl_scan_t *scn = queue->q_scn; 4170 scan_io_t *sio; 4171 void *cookie = NULL; 4172 int64_t bytes_dequeued = 0; 4173 4174 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 4175 4176 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != 4177 NULL) { 4178 ASSERT(range_tree_contains(queue->q_exts_by_addr, 4179 SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); 4180 bytes_dequeued += SIO_GET_ASIZE(sio); 4181 queue->q_sio_memused -= SIO_GET_MUSED(sio); 4182 sio_free(sio); 4183 } 4184 4185 ASSERT0(queue->q_sio_memused); 4186 atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued); 4187 range_tree_vacate(queue->q_exts_by_addr, NULL, queue); 4188 range_tree_destroy(queue->q_exts_by_addr); 4189 avl_destroy(&queue->q_sios_by_addr); 4190 cv_destroy(&queue->q_zio_cv); 4191 4192 kmem_free(queue, sizeof (*queue)); 4193 } 4194 4195 /* 4196 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is 4197 * called on behalf of vdev_top_transfer when creating or destroying 4198 * a mirror vdev due to zpool attach/detach. 4199 */ 4200 void 4201 dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) 4202 { 4203 mutex_enter(&svd->vdev_scan_io_queue_lock); 4204 mutex_enter(&tvd->vdev_scan_io_queue_lock); 4205 4206 VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); 4207 tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; 4208 svd->vdev_scan_io_queue = NULL; 4209 if (tvd->vdev_scan_io_queue != NULL) 4210 tvd->vdev_scan_io_queue->q_vd = tvd; 4211 4212 mutex_exit(&tvd->vdev_scan_io_queue_lock); 4213 mutex_exit(&svd->vdev_scan_io_queue_lock); 4214 } 4215 4216 static void 4217 scan_io_queues_destroy(dsl_scan_t *scn) 4218 { 4219 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 4220 4221 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 4222 vdev_t *tvd = rvd->vdev_child[i]; 4223 4224 mutex_enter(&tvd->vdev_scan_io_queue_lock); 4225 if (tvd->vdev_scan_io_queue != NULL) 4226 dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); 4227 tvd->vdev_scan_io_queue = NULL; 4228 mutex_exit(&tvd->vdev_scan_io_queue_lock); 4229 } 4230 } 4231 4232 static void 4233 dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) 4234 { 4235 dsl_pool_t *dp = spa->spa_dsl_pool; 4236 dsl_scan_t *scn = dp->dp_scan; 4237 vdev_t *vdev; 4238 kmutex_t *q_lock; 4239 dsl_scan_io_queue_t *queue; 4240 scan_io_t *srch_sio, *sio; 4241 avl_index_t idx; 4242 uint64_t start, size; 4243 4244 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); 4245 ASSERT(vdev != NULL); 4246 q_lock = &vdev->vdev_scan_io_queue_lock; 4247 queue = vdev->vdev_scan_io_queue; 4248 4249 mutex_enter(q_lock); 4250 if (queue == NULL) { 4251 mutex_exit(q_lock); 4252 return; 4253 } 4254 4255 srch_sio = sio_alloc(BP_GET_NDVAS(bp)); 4256 bp2sio(bp, srch_sio, dva_i); 4257 start = SIO_GET_OFFSET(srch_sio); 4258 size = SIO_GET_ASIZE(srch_sio); 4259 4260 /* 4261 * We can find the zio in two states: 4262 * 1) Cold, just sitting in the queue of zio's to be issued at 4263 * some point in the future. In this case, all we do is 4264 * remove the zio from the q_sios_by_addr tree, decrement 4265 * its data volume from the containing range_seg_t and 4266 * resort the q_exts_by_size tree to reflect that the 4267 * range_seg_t has lost some of its 'fill'. We don't shorten 4268 * the range_seg_t - this is usually rare enough not to be 4269 * worth the extra hassle of trying keep track of precise 4270 * extent boundaries. 4271 * 2) Hot, where the zio is currently in-flight in 4272 * dsl_scan_issue_ios. In this case, we can't simply 4273 * reach in and stop the in-flight zio's, so we instead 4274 * block the caller. Eventually, dsl_scan_issue_ios will 4275 * be done with issuing the zio's it gathered and will 4276 * signal us. 4277 */ 4278 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); 4279 sio_free(srch_sio); 4280 4281 if (sio != NULL) { 4282 int64_t asize = SIO_GET_ASIZE(sio); 4283 blkptr_t tmpbp; 4284 4285 /* Got it while it was cold in the queue */ 4286 ASSERT3U(start, ==, SIO_GET_OFFSET(sio)); 4287 ASSERT3U(size, ==, asize); 4288 avl_remove(&queue->q_sios_by_addr, sio); 4289 queue->q_sio_memused -= SIO_GET_MUSED(sio); 4290 4291 ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); 4292 range_tree_remove_fill(queue->q_exts_by_addr, start, size); 4293 4294 /* 4295 * We only update scn_bytes_pending in the cold path, 4296 * otherwise it will already have been accounted for as 4297 * part of the zio's execution. 4298 */ 4299 atomic_add_64(&scn->scn_bytes_pending, -asize); 4300 4301 /* count the block as though we issued it */ 4302 sio2bp(sio, &tmpbp); 4303 count_block(scn, dp->dp_blkstats, &tmpbp); 4304 4305 sio_free(sio); 4306 } 4307 mutex_exit(q_lock); 4308 } 4309 4310 /* 4311 * Callback invoked when a zio_free() zio is executing. This needs to be 4312 * intercepted to prevent the zio from deallocating a particular portion 4313 * of disk space and it then getting reallocated and written to, while we 4314 * still have it queued up for processing. 4315 */ 4316 void 4317 dsl_scan_freed(spa_t *spa, const blkptr_t *bp) 4318 { 4319 dsl_pool_t *dp = spa->spa_dsl_pool; 4320 dsl_scan_t *scn = dp->dp_scan; 4321 4322 ASSERT(!BP_IS_EMBEDDED(bp)); 4323 ASSERT(scn != NULL); 4324 if (!dsl_scan_is_running(scn)) 4325 return; 4326 4327 for (int i = 0; i < BP_GET_NDVAS(bp); i++) 4328 dsl_scan_freed_dva(spa, bp, i); 4329 } 4330 4331 /* 4332 * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has 4333 * not started, start it. Otherwise, only restart if max txg in DTL range is 4334 * greater than the max txg in the current scan. If the DTL max is less than 4335 * the scan max, then the vdev has not missed any new data since the resilver 4336 * started, so a restart is not needed. 4337 */ 4338 void 4339 dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd) 4340 { 4341 uint64_t min, max; 4342 4343 if (!vdev_resilver_needed(vd, &min, &max)) 4344 return; 4345 4346 if (!dsl_scan_resilvering(dp)) { 4347 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); 4348 return; 4349 } 4350 4351 if (max <= dp->dp_scan->scn_phys.scn_max_txg) 4352 return; 4353 4354 /* restart is needed, check if it can be deferred */ 4355 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) 4356 vdev_defer_resilver(vd); 4357 else 4358 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); 4359 } 4360 4361 /* BEGIN CSTYLED */ 4362 ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW, 4363 "Max bytes in flight per leaf vdev for scrubs and resilvers"); 4364 4365 ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW, 4366 "Min millisecs to scrub per txg"); 4367 4368 ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW, 4369 "Min millisecs to obsolete per txg"); 4370 4371 ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW, 4372 "Min millisecs to free per txg"); 4373 4374 ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW, 4375 "Min millisecs to resilver per txg"); 4376 4377 ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW, 4378 "Set to prevent scans from progressing"); 4379 4380 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW, 4381 "Set to disable scrub I/O"); 4382 4383 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW, 4384 "Set to disable scrub prefetching"); 4385 4386 ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, ULONG, ZMOD_RW, 4387 "Max number of blocks freed in one txg"); 4388 4389 ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, ULONG, ZMOD_RW, 4390 "Max number of dedup blocks freed in one txg"); 4391 4392 ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW, 4393 "Enable processing of the free_bpobj"); 4394 4395 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW, 4396 "Fraction of RAM for scan hard limit"); 4397 4398 ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW, 4399 "IO issuing strategy during scrubbing. " 4400 "0 = default, 1 = LBA, 2 = size"); 4401 4402 ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW, 4403 "Scrub using legacy non-sequential method"); 4404 4405 ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW, 4406 "Scan progress on-disk checkpointing interval"); 4407 4408 ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW, 4409 "Max gap in bytes between sequential scrub / resilver I/Os"); 4410 4411 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW, 4412 "Fraction of hard limit used as soft limit"); 4413 4414 ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW, 4415 "Tunable to attempt to reduce lock contention"); 4416 4417 ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW, 4418 "Tunable to adjust bias towards more filled segments during scans"); 4419 4420 ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW, 4421 "Process all resilvers immediately"); 4422 /* END CSTYLED */ 4423