1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2021 by Delphix. All rights reserved. 24 * Copyright 2016 Gary Mills 25 * Copyright (c) 2017, 2019, Datto Inc. All rights reserved. 26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 27 * Copyright 2019 Joyent, Inc. 28 */ 29 30 #include <sys/dsl_scan.h> 31 #include <sys/dsl_pool.h> 32 #include <sys/dsl_dataset.h> 33 #include <sys/dsl_prop.h> 34 #include <sys/dsl_dir.h> 35 #include <sys/dsl_synctask.h> 36 #include <sys/dnode.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/dmu_objset.h> 39 #include <sys/arc.h> 40 #include <sys/arc_impl.h> 41 #include <sys/zap.h> 42 #include <sys/zio.h> 43 #include <sys/zfs_context.h> 44 #include <sys/fs/zfs.h> 45 #include <sys/zfs_znode.h> 46 #include <sys/spa_impl.h> 47 #include <sys/vdev_impl.h> 48 #include <sys/zil_impl.h> 49 #include <sys/zio_checksum.h> 50 #include <sys/brt.h> 51 #include <sys/ddt.h> 52 #include <sys/sa.h> 53 #include <sys/sa_impl.h> 54 #include <sys/zfeature.h> 55 #include <sys/abd.h> 56 #include <sys/range_tree.h> 57 #include <sys/dbuf.h> 58 #ifdef _KERNEL 59 #include <sys/zfs_vfsops.h> 60 #endif 61 62 /* 63 * Grand theory statement on scan queue sorting 64 * 65 * Scanning is implemented by recursively traversing all indirection levels 66 * in an object and reading all blocks referenced from said objects. This 67 * results in us approximately traversing the object from lowest logical 68 * offset to the highest. For best performance, we would want the logical 69 * blocks to be physically contiguous. However, this is frequently not the 70 * case with pools given the allocation patterns of copy-on-write filesystems. 71 * So instead, we put the I/Os into a reordering queue and issue them in a 72 * way that will most benefit physical disks (LBA-order). 73 * 74 * Queue management: 75 * 76 * Ideally, we would want to scan all metadata and queue up all block I/O 77 * prior to starting to issue it, because that allows us to do an optimal 78 * sorting job. This can however consume large amounts of memory. Therefore 79 * we continuously monitor the size of the queues and constrain them to 5% 80 * (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this 81 * limit, we clear out a few of the largest extents at the head of the queues 82 * to make room for more scanning. Hopefully, these extents will be fairly 83 * large and contiguous, allowing us to approach sequential I/O throughput 84 * even without a fully sorted tree. 85 * 86 * Metadata scanning takes place in dsl_scan_visit(), which is called from 87 * dsl_scan_sync() every spa_sync(). If we have either fully scanned all 88 * metadata on the pool, or we need to make room in memory because our 89 * queues are too large, dsl_scan_visit() is postponed and 90 * scan_io_queues_run() is called from dsl_scan_sync() instead. This implies 91 * that metadata scanning and queued I/O issuing are mutually exclusive. This 92 * allows us to provide maximum sequential I/O throughput for the majority of 93 * I/O's issued since sequential I/O performance is significantly negatively 94 * impacted if it is interleaved with random I/O. 95 * 96 * Implementation Notes 97 * 98 * One side effect of the queued scanning algorithm is that the scanning code 99 * needs to be notified whenever a block is freed. This is needed to allow 100 * the scanning code to remove these I/Os from the issuing queue. Additionally, 101 * we do not attempt to queue gang blocks to be issued sequentially since this 102 * is very hard to do and would have an extremely limited performance benefit. 103 * Instead, we simply issue gang I/Os as soon as we find them using the legacy 104 * algorithm. 105 * 106 * Backwards compatibility 107 * 108 * This new algorithm is backwards compatible with the legacy on-disk data 109 * structures (and therefore does not require a new feature flag). 110 * Periodically during scanning (see zfs_scan_checkpoint_intval), the scan 111 * will stop scanning metadata (in logical order) and wait for all outstanding 112 * sorted I/O to complete. Once this is done, we write out a checkpoint 113 * bookmark, indicating that we have scanned everything logically before it. 114 * If the pool is imported on a machine without the new sorting algorithm, 115 * the scan simply resumes from the last checkpoint using the legacy algorithm. 116 */ 117 118 typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *, 119 const zbookmark_phys_t *); 120 121 static scan_cb_t dsl_scan_scrub_cb; 122 123 static int scan_ds_queue_compare(const void *a, const void *b); 124 static int scan_prefetch_queue_compare(const void *a, const void *b); 125 static void scan_ds_queue_clear(dsl_scan_t *scn); 126 static void scan_ds_prefetch_queue_clear(dsl_scan_t *scn); 127 static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, 128 uint64_t *txg); 129 static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg); 130 static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); 131 static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); 132 static uint64_t dsl_scan_count_data_disks(spa_t *spa); 133 static void read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb); 134 135 extern uint_t zfs_vdev_async_write_active_min_dirty_percent; 136 static int zfs_scan_blkstats = 0; 137 138 /* 139 * 'zpool status' uses bytes processed per pass to report throughput and 140 * estimate time remaining. We define a pass to start when the scanning 141 * phase completes for a sequential resilver. Optionally, this value 142 * may be used to reset the pass statistics every N txgs to provide an 143 * estimated completion time based on currently observed performance. 144 */ 145 static uint_t zfs_scan_report_txgs = 0; 146 147 /* 148 * By default zfs will check to ensure it is not over the hard memory 149 * limit before each txg. If finer-grained control of this is needed 150 * this value can be set to 1 to enable checking before scanning each 151 * block. 152 */ 153 static int zfs_scan_strict_mem_lim = B_FALSE; 154 155 /* 156 * Maximum number of parallelly executed bytes per leaf vdev. We attempt 157 * to strike a balance here between keeping the vdev queues full of I/Os 158 * at all times and not overflowing the queues to cause long latency, 159 * which would cause long txg sync times. No matter what, we will not 160 * overload the drives with I/O, since that is protected by 161 * zfs_vdev_scrub_max_active. 162 */ 163 static uint64_t zfs_scan_vdev_limit = 16 << 20; 164 165 static uint_t zfs_scan_issue_strategy = 0; 166 167 /* don't queue & sort zios, go direct */ 168 static int zfs_scan_legacy = B_FALSE; 169 static uint64_t zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ 170 171 /* 172 * fill_weight is non-tunable at runtime, so we copy it at module init from 173 * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would 174 * break queue sorting. 175 */ 176 static uint_t zfs_scan_fill_weight = 3; 177 static uint64_t fill_weight; 178 179 /* See dsl_scan_should_clear() for details on the memory limit tunables */ 180 static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ 181 static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ 182 183 184 /* fraction of physmem */ 185 static uint_t zfs_scan_mem_lim_fact = 20; 186 187 /* fraction of mem lim above */ 188 static uint_t zfs_scan_mem_lim_soft_fact = 20; 189 190 /* minimum milliseconds to scrub per txg */ 191 static uint_t zfs_scrub_min_time_ms = 1000; 192 193 /* minimum milliseconds to obsolete per txg */ 194 static uint_t zfs_obsolete_min_time_ms = 500; 195 196 /* minimum milliseconds to free per txg */ 197 static uint_t zfs_free_min_time_ms = 1000; 198 199 /* minimum milliseconds to resilver per txg */ 200 static uint_t zfs_resilver_min_time_ms = 3000; 201 202 static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */ 203 int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ 204 static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ 205 static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ 206 static const ddt_class_t zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; 207 /* max number of blocks to free in a single TXG */ 208 static uint64_t zfs_async_block_max_blocks = UINT64_MAX; 209 /* max number of dedup blocks to free in a single TXG */ 210 static uint64_t zfs_max_async_dedup_frees = 100000; 211 212 /* set to disable resilver deferring */ 213 static int zfs_resilver_disable_defer = B_FALSE; 214 215 /* 216 * We wait a few txgs after importing a pool to begin scanning so that 217 * the import / mounting code isn't held up by scrub / resilver IO. 218 * Unfortunately, it is a bit difficult to determine exactly how long 219 * this will take since userspace will trigger fs mounts asynchronously 220 * and the kernel will create zvol minors asynchronously. As a result, 221 * the value provided here is a bit arbitrary, but represents a 222 * reasonable estimate of how many txgs it will take to finish fully 223 * importing a pool 224 */ 225 #define SCAN_IMPORT_WAIT_TXGS 5 226 227 #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ 228 ((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \ 229 (scn)->scn_phys.scn_func == POOL_SCAN_RESILVER) 230 231 /* 232 * Enable/disable the processing of the free_bpobj object. 233 */ 234 static int zfs_free_bpobj_enabled = 1; 235 236 /* Error blocks to be scrubbed in one txg. */ 237 static uint_t zfs_scrub_error_blocks_per_txg = 1 << 12; 238 239 /* the order has to match pool_scan_type */ 240 static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { 241 NULL, 242 dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ 243 dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ 244 }; 245 246 /* In core node for the scn->scn_queue. Represents a dataset to be scanned */ 247 typedef struct { 248 uint64_t sds_dsobj; 249 uint64_t sds_txg; 250 avl_node_t sds_node; 251 } scan_ds_t; 252 253 /* 254 * This controls what conditions are placed on dsl_scan_sync_state(): 255 * SYNC_OPTIONAL) write out scn_phys iff scn_queues_pending == 0 256 * SYNC_MANDATORY) write out scn_phys always. scn_queues_pending must be 0. 257 * SYNC_CACHED) if scn_queues_pending == 0, write out scn_phys. Otherwise 258 * write out the scn_phys_cached version. 259 * See dsl_scan_sync_state for details. 260 */ 261 typedef enum { 262 SYNC_OPTIONAL, 263 SYNC_MANDATORY, 264 SYNC_CACHED 265 } state_sync_type_t; 266 267 /* 268 * This struct represents the minimum information needed to reconstruct a 269 * zio for sequential scanning. This is useful because many of these will 270 * accumulate in the sequential IO queues before being issued, so saving 271 * memory matters here. 272 */ 273 typedef struct scan_io { 274 /* fields from blkptr_t */ 275 uint64_t sio_blk_prop; 276 uint64_t sio_phys_birth; 277 uint64_t sio_birth; 278 zio_cksum_t sio_cksum; 279 uint32_t sio_nr_dvas; 280 281 /* fields from zio_t */ 282 uint32_t sio_flags; 283 zbookmark_phys_t sio_zb; 284 285 /* members for queue sorting */ 286 union { 287 avl_node_t sio_addr_node; /* link into issuing queue */ 288 list_node_t sio_list_node; /* link for issuing to disk */ 289 } sio_nodes; 290 291 /* 292 * There may be up to SPA_DVAS_PER_BP DVAs here from the bp, 293 * depending on how many were in the original bp. Only the 294 * first DVA is really used for sorting and issuing purposes. 295 * The other DVAs (if provided) simply exist so that the zio 296 * layer can find additional copies to repair from in the 297 * event of an error. This array must go at the end of the 298 * struct to allow this for the variable number of elements. 299 */ 300 dva_t sio_dva[]; 301 } scan_io_t; 302 303 #define SIO_SET_OFFSET(sio, x) DVA_SET_OFFSET(&(sio)->sio_dva[0], x) 304 #define SIO_SET_ASIZE(sio, x) DVA_SET_ASIZE(&(sio)->sio_dva[0], x) 305 #define SIO_GET_OFFSET(sio) DVA_GET_OFFSET(&(sio)->sio_dva[0]) 306 #define SIO_GET_ASIZE(sio) DVA_GET_ASIZE(&(sio)->sio_dva[0]) 307 #define SIO_GET_END_OFFSET(sio) \ 308 (SIO_GET_OFFSET(sio) + SIO_GET_ASIZE(sio)) 309 #define SIO_GET_MUSED(sio) \ 310 (sizeof (scan_io_t) + ((sio)->sio_nr_dvas * sizeof (dva_t))) 311 312 struct dsl_scan_io_queue { 313 dsl_scan_t *q_scn; /* associated dsl_scan_t */ 314 vdev_t *q_vd; /* top-level vdev that this queue represents */ 315 zio_t *q_zio; /* scn_zio_root child for waiting on IO */ 316 317 /* trees used for sorting I/Os and extents of I/Os */ 318 range_tree_t *q_exts_by_addr; 319 zfs_btree_t q_exts_by_size; 320 avl_tree_t q_sios_by_addr; 321 uint64_t q_sio_memused; 322 uint64_t q_last_ext_addr; 323 324 /* members for zio rate limiting */ 325 uint64_t q_maxinflight_bytes; 326 uint64_t q_inflight_bytes; 327 kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */ 328 329 /* per txg statistics */ 330 uint64_t q_total_seg_size_this_txg; 331 uint64_t q_segs_this_txg; 332 uint64_t q_total_zio_size_this_txg; 333 uint64_t q_zios_this_txg; 334 }; 335 336 /* private data for dsl_scan_prefetch_cb() */ 337 typedef struct scan_prefetch_ctx { 338 zfs_refcount_t spc_refcnt; /* refcount for memory management */ 339 dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */ 340 boolean_t spc_root; /* is this prefetch for an objset? */ 341 uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */ 342 uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */ 343 } scan_prefetch_ctx_t; 344 345 /* private data for dsl_scan_prefetch() */ 346 typedef struct scan_prefetch_issue_ctx { 347 avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */ 348 scan_prefetch_ctx_t *spic_spc; /* spc for the callback */ 349 blkptr_t spic_bp; /* bp to prefetch */ 350 zbookmark_phys_t spic_zb; /* bookmark to prefetch */ 351 } scan_prefetch_issue_ctx_t; 352 353 static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 354 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue); 355 static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, 356 scan_io_t *sio); 357 358 static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd); 359 static void scan_io_queues_destroy(dsl_scan_t *scn); 360 361 static kmem_cache_t *sio_cache[SPA_DVAS_PER_BP]; 362 363 /* sio->sio_nr_dvas must be set so we know which cache to free from */ 364 static void 365 sio_free(scan_io_t *sio) 366 { 367 ASSERT3U(sio->sio_nr_dvas, >, 0); 368 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); 369 370 kmem_cache_free(sio_cache[sio->sio_nr_dvas - 1], sio); 371 } 372 373 /* It is up to the caller to set sio->sio_nr_dvas for freeing */ 374 static scan_io_t * 375 sio_alloc(unsigned short nr_dvas) 376 { 377 ASSERT3U(nr_dvas, >, 0); 378 ASSERT3U(nr_dvas, <=, SPA_DVAS_PER_BP); 379 380 return (kmem_cache_alloc(sio_cache[nr_dvas - 1], KM_SLEEP)); 381 } 382 383 void 384 scan_init(void) 385 { 386 /* 387 * This is used in ext_size_compare() to weight segments 388 * based on how sparse they are. This cannot be changed 389 * mid-scan and the tree comparison functions don't currently 390 * have a mechanism for passing additional context to the 391 * compare functions. Thus we store this value globally and 392 * we only allow it to be set at module initialization time 393 */ 394 fill_weight = zfs_scan_fill_weight; 395 396 for (int i = 0; i < SPA_DVAS_PER_BP; i++) { 397 char name[36]; 398 399 (void) snprintf(name, sizeof (name), "sio_cache_%d", i); 400 sio_cache[i] = kmem_cache_create(name, 401 (sizeof (scan_io_t) + ((i + 1) * sizeof (dva_t))), 402 0, NULL, NULL, NULL, NULL, NULL, 0); 403 } 404 } 405 406 void 407 scan_fini(void) 408 { 409 for (int i = 0; i < SPA_DVAS_PER_BP; i++) { 410 kmem_cache_destroy(sio_cache[i]); 411 } 412 } 413 414 static inline boolean_t 415 dsl_scan_is_running(const dsl_scan_t *scn) 416 { 417 return (scn->scn_phys.scn_state == DSS_SCANNING); 418 } 419 420 boolean_t 421 dsl_scan_resilvering(dsl_pool_t *dp) 422 { 423 return (dsl_scan_is_running(dp->dp_scan) && 424 dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER); 425 } 426 427 static inline void 428 sio2bp(const scan_io_t *sio, blkptr_t *bp) 429 { 430 memset(bp, 0, sizeof (*bp)); 431 bp->blk_prop = sio->sio_blk_prop; 432 BP_SET_PHYSICAL_BIRTH(bp, sio->sio_phys_birth); 433 BP_SET_LOGICAL_BIRTH(bp, sio->sio_birth); 434 bp->blk_fill = 1; /* we always only work with data pointers */ 435 bp->blk_cksum = sio->sio_cksum; 436 437 ASSERT3U(sio->sio_nr_dvas, >, 0); 438 ASSERT3U(sio->sio_nr_dvas, <=, SPA_DVAS_PER_BP); 439 440 memcpy(bp->blk_dva, sio->sio_dva, sio->sio_nr_dvas * sizeof (dva_t)); 441 } 442 443 static inline void 444 bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i) 445 { 446 sio->sio_blk_prop = bp->blk_prop; 447 sio->sio_phys_birth = BP_GET_PHYSICAL_BIRTH(bp); 448 sio->sio_birth = BP_GET_LOGICAL_BIRTH(bp); 449 sio->sio_cksum = bp->blk_cksum; 450 sio->sio_nr_dvas = BP_GET_NDVAS(bp); 451 452 /* 453 * Copy the DVAs to the sio. We need all copies of the block so 454 * that the self healing code can use the alternate copies if the 455 * first is corrupted. We want the DVA at index dva_i to be first 456 * in the sio since this is the primary one that we want to issue. 457 */ 458 for (int i = 0, j = dva_i; i < sio->sio_nr_dvas; i++, j++) { 459 sio->sio_dva[i] = bp->blk_dva[j % sio->sio_nr_dvas]; 460 } 461 } 462 463 int 464 dsl_scan_init(dsl_pool_t *dp, uint64_t txg) 465 { 466 int err; 467 dsl_scan_t *scn; 468 spa_t *spa = dp->dp_spa; 469 uint64_t f; 470 471 scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP); 472 scn->scn_dp = dp; 473 474 /* 475 * It's possible that we're resuming a scan after a reboot so 476 * make sure that the scan_async_destroying flag is initialized 477 * appropriately. 478 */ 479 ASSERT(!scn->scn_async_destroying); 480 scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa, 481 SPA_FEATURE_ASYNC_DESTROY); 482 483 /* 484 * Calculate the max number of in-flight bytes for pool-wide 485 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). 486 * Limits for the issuing phase are done per top-level vdev and 487 * are handled separately. 488 */ 489 scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, 490 zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); 491 492 avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t), 493 offsetof(scan_ds_t, sds_node)); 494 mutex_init(&scn->scn_queue_lock, NULL, MUTEX_DEFAULT, NULL); 495 avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare, 496 sizeof (scan_prefetch_issue_ctx_t), 497 offsetof(scan_prefetch_issue_ctx_t, spic_avl_node)); 498 499 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 500 "scrub_func", sizeof (uint64_t), 1, &f); 501 if (err == 0) { 502 /* 503 * There was an old-style scrub in progress. Restart a 504 * new-style scrub from the beginning. 505 */ 506 scn->scn_restart_txg = txg; 507 zfs_dbgmsg("old-style scrub was in progress for %s; " 508 "restarting new-style scrub in txg %llu", 509 spa->spa_name, 510 (longlong_t)scn->scn_restart_txg); 511 512 /* 513 * Load the queue obj from the old location so that it 514 * can be freed by dsl_scan_done(). 515 */ 516 (void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 517 "scrub_queue", sizeof (uint64_t), 1, 518 &scn->scn_phys.scn_queue_obj); 519 } else { 520 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 521 DMU_POOL_ERRORSCRUB, sizeof (uint64_t), 522 ERRORSCRUB_PHYS_NUMINTS, &scn->errorscrub_phys); 523 524 if (err != 0 && err != ENOENT) 525 return (err); 526 527 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 528 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 529 &scn->scn_phys); 530 531 /* 532 * Detect if the pool contains the signature of #2094. If it 533 * does properly update the scn->scn_phys structure and notify 534 * the administrator by setting an errata for the pool. 535 */ 536 if (err == EOVERFLOW) { 537 uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1]; 538 VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24); 539 VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==, 540 (23 * sizeof (uint64_t))); 541 542 err = zap_lookup(dp->dp_meta_objset, 543 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN, 544 sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp); 545 if (err == 0) { 546 uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS]; 547 548 if (overflow & ~DSL_SCAN_FLAGS_MASK || 549 scn->scn_async_destroying) { 550 spa->spa_errata = 551 ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY; 552 return (EOVERFLOW); 553 } 554 555 memcpy(&scn->scn_phys, zaptmp, 556 SCAN_PHYS_NUMINTS * sizeof (uint64_t)); 557 scn->scn_phys.scn_flags = overflow; 558 559 /* Required scrub already in progress. */ 560 if (scn->scn_phys.scn_state == DSS_FINISHED || 561 scn->scn_phys.scn_state == DSS_CANCELED) 562 spa->spa_errata = 563 ZPOOL_ERRATA_ZOL_2094_SCRUB; 564 } 565 } 566 567 if (err == ENOENT) 568 return (0); 569 else if (err) 570 return (err); 571 572 /* 573 * We might be restarting after a reboot, so jump the issued 574 * counter to how far we've scanned. We know we're consistent 575 * up to here. 576 */ 577 scn->scn_issued_before_pass = scn->scn_phys.scn_examined - 578 scn->scn_phys.scn_skipped; 579 580 if (dsl_scan_is_running(scn) && 581 spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) { 582 /* 583 * A new-type scrub was in progress on an old 584 * pool, and the pool was accessed by old 585 * software. Restart from the beginning, since 586 * the old software may have changed the pool in 587 * the meantime. 588 */ 589 scn->scn_restart_txg = txg; 590 zfs_dbgmsg("new-style scrub for %s was modified " 591 "by old software; restarting in txg %llu", 592 spa->spa_name, 593 (longlong_t)scn->scn_restart_txg); 594 } else if (dsl_scan_resilvering(dp)) { 595 /* 596 * If a resilver is in progress and there are already 597 * errors, restart it instead of finishing this scan and 598 * then restarting it. If there haven't been any errors 599 * then remember that the incore DTL is valid. 600 */ 601 if (scn->scn_phys.scn_errors > 0) { 602 scn->scn_restart_txg = txg; 603 zfs_dbgmsg("resilver can't excise DTL_MISSING " 604 "when finished; restarting on %s in txg " 605 "%llu", 606 spa->spa_name, 607 (u_longlong_t)scn->scn_restart_txg); 608 } else { 609 /* it's safe to excise DTL when finished */ 610 spa->spa_scrub_started = B_TRUE; 611 } 612 } 613 } 614 615 memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); 616 617 /* reload the queue into the in-core state */ 618 if (scn->scn_phys.scn_queue_obj != 0) { 619 zap_cursor_t zc; 620 zap_attribute_t za; 621 622 for (zap_cursor_init(&zc, dp->dp_meta_objset, 623 scn->scn_phys.scn_queue_obj); 624 zap_cursor_retrieve(&zc, &za) == 0; 625 (void) zap_cursor_advance(&zc)) { 626 scan_ds_queue_insert(scn, 627 zfs_strtonum(za.za_name, NULL), 628 za.za_first_integer); 629 } 630 zap_cursor_fini(&zc); 631 } 632 633 spa_scan_stat_init(spa); 634 vdev_scan_stat_init(spa->spa_root_vdev); 635 636 return (0); 637 } 638 639 void 640 dsl_scan_fini(dsl_pool_t *dp) 641 { 642 if (dp->dp_scan != NULL) { 643 dsl_scan_t *scn = dp->dp_scan; 644 645 if (scn->scn_taskq != NULL) 646 taskq_destroy(scn->scn_taskq); 647 648 scan_ds_queue_clear(scn); 649 avl_destroy(&scn->scn_queue); 650 mutex_destroy(&scn->scn_queue_lock); 651 scan_ds_prefetch_queue_clear(scn); 652 avl_destroy(&scn->scn_prefetch_queue); 653 654 kmem_free(dp->dp_scan, sizeof (dsl_scan_t)); 655 dp->dp_scan = NULL; 656 } 657 } 658 659 static boolean_t 660 dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx) 661 { 662 return (scn->scn_restart_txg != 0 && 663 scn->scn_restart_txg <= tx->tx_txg); 664 } 665 666 boolean_t 667 dsl_scan_resilver_scheduled(dsl_pool_t *dp) 668 { 669 return ((dp->dp_scan && dp->dp_scan->scn_restart_txg != 0) || 670 (spa_async_tasks(dp->dp_spa) & SPA_ASYNC_RESILVER)); 671 } 672 673 boolean_t 674 dsl_scan_scrubbing(const dsl_pool_t *dp) 675 { 676 dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys; 677 678 return (scn_phys->scn_state == DSS_SCANNING && 679 scn_phys->scn_func == POOL_SCAN_SCRUB); 680 } 681 682 boolean_t 683 dsl_errorscrubbing(const dsl_pool_t *dp) 684 { 685 dsl_errorscrub_phys_t *errorscrub_phys = &dp->dp_scan->errorscrub_phys; 686 687 return (errorscrub_phys->dep_state == DSS_ERRORSCRUBBING && 688 errorscrub_phys->dep_func == POOL_SCAN_ERRORSCRUB); 689 } 690 691 boolean_t 692 dsl_errorscrub_is_paused(const dsl_scan_t *scn) 693 { 694 return (dsl_errorscrubbing(scn->scn_dp) && 695 scn->errorscrub_phys.dep_paused_flags); 696 } 697 698 boolean_t 699 dsl_scan_is_paused_scrub(const dsl_scan_t *scn) 700 { 701 return (dsl_scan_scrubbing(scn->scn_dp) && 702 scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED); 703 } 704 705 static void 706 dsl_errorscrub_sync_state(dsl_scan_t *scn, dmu_tx_t *tx) 707 { 708 scn->errorscrub_phys.dep_cursor = 709 zap_cursor_serialize(&scn->errorscrub_cursor); 710 711 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 712 DMU_POOL_DIRECTORY_OBJECT, 713 DMU_POOL_ERRORSCRUB, sizeof (uint64_t), ERRORSCRUB_PHYS_NUMINTS, 714 &scn->errorscrub_phys, tx)); 715 } 716 717 static void 718 dsl_errorscrub_setup_sync(void *arg, dmu_tx_t *tx) 719 { 720 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 721 pool_scan_func_t *funcp = arg; 722 dsl_pool_t *dp = scn->scn_dp; 723 spa_t *spa = dp->dp_spa; 724 725 ASSERT(!dsl_scan_is_running(scn)); 726 ASSERT(!dsl_errorscrubbing(scn->scn_dp)); 727 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 728 729 memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys)); 730 scn->errorscrub_phys.dep_func = *funcp; 731 scn->errorscrub_phys.dep_state = DSS_ERRORSCRUBBING; 732 scn->errorscrub_phys.dep_start_time = gethrestime_sec(); 733 scn->errorscrub_phys.dep_to_examine = spa_get_last_errlog_size(spa); 734 scn->errorscrub_phys.dep_examined = 0; 735 scn->errorscrub_phys.dep_errors = 0; 736 scn->errorscrub_phys.dep_cursor = 0; 737 zap_cursor_init_serialized(&scn->errorscrub_cursor, 738 spa->spa_meta_objset, spa->spa_errlog_last, 739 scn->errorscrub_phys.dep_cursor); 740 741 vdev_config_dirty(spa->spa_root_vdev); 742 spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_START); 743 744 dsl_errorscrub_sync_state(scn, tx); 745 746 spa_history_log_internal(spa, "error scrub setup", tx, 747 "func=%u mintxg=%u maxtxg=%llu", 748 *funcp, 0, (u_longlong_t)tx->tx_txg); 749 } 750 751 static int 752 dsl_errorscrub_setup_check(void *arg, dmu_tx_t *tx) 753 { 754 (void) arg; 755 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 756 757 if (dsl_scan_is_running(scn) || (dsl_errorscrubbing(scn->scn_dp))) { 758 return (SET_ERROR(EBUSY)); 759 } 760 761 if (spa_get_last_errlog_size(scn->scn_dp->dp_spa) == 0) { 762 return (ECANCELED); 763 } 764 return (0); 765 } 766 767 /* 768 * Writes out a persistent dsl_scan_phys_t record to the pool directory. 769 * Because we can be running in the block sorting algorithm, we do not always 770 * want to write out the record, only when it is "safe" to do so. This safety 771 * condition is achieved by making sure that the sorting queues are empty 772 * (scn_queues_pending == 0). When this condition is not true, the sync'd state 773 * is inconsistent with how much actual scanning progress has been made. The 774 * kind of sync to be performed is specified by the sync_type argument. If the 775 * sync is optional, we only sync if the queues are empty. If the sync is 776 * mandatory, we do a hard ASSERT to make sure that the queues are empty. The 777 * third possible state is a "cached" sync. This is done in response to: 778 * 1) The dataset that was in the last sync'd dsl_scan_phys_t having been 779 * destroyed, so we wouldn't be able to restart scanning from it. 780 * 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been 781 * superseded by a newer snapshot. 782 * 3) The dataset that was in the last sync'd dsl_scan_phys_t having been 783 * swapped with its clone. 784 * In all cases, a cached sync simply rewrites the last record we've written, 785 * just slightly modified. For the modifications that are performed to the 786 * last written dsl_scan_phys_t, see dsl_scan_ds_destroyed, 787 * dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped. 788 */ 789 static void 790 dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type) 791 { 792 int i; 793 spa_t *spa = scn->scn_dp->dp_spa; 794 795 ASSERT(sync_type != SYNC_MANDATORY || scn->scn_queues_pending == 0); 796 if (scn->scn_queues_pending == 0) { 797 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 798 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 799 dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue; 800 801 if (q == NULL) 802 continue; 803 804 mutex_enter(&vd->vdev_scan_io_queue_lock); 805 ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL); 806 ASSERT3P(zfs_btree_first(&q->q_exts_by_size, NULL), ==, 807 NULL); 808 ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL); 809 mutex_exit(&vd->vdev_scan_io_queue_lock); 810 } 811 812 if (scn->scn_phys.scn_queue_obj != 0) 813 scan_ds_queue_sync(scn, tx); 814 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 815 DMU_POOL_DIRECTORY_OBJECT, 816 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 817 &scn->scn_phys, tx)); 818 memcpy(&scn->scn_phys_cached, &scn->scn_phys, 819 sizeof (scn->scn_phys)); 820 821 if (scn->scn_checkpointing) 822 zfs_dbgmsg("finish scan checkpoint for %s", 823 spa->spa_name); 824 825 scn->scn_checkpointing = B_FALSE; 826 scn->scn_last_checkpoint = ddi_get_lbolt(); 827 } else if (sync_type == SYNC_CACHED) { 828 VERIFY0(zap_update(scn->scn_dp->dp_meta_objset, 829 DMU_POOL_DIRECTORY_OBJECT, 830 DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, 831 &scn->scn_phys_cached, tx)); 832 } 833 } 834 835 int 836 dsl_scan_setup_check(void *arg, dmu_tx_t *tx) 837 { 838 (void) arg; 839 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 840 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 841 842 if (dsl_scan_is_running(scn) || vdev_rebuild_active(rvd) || 843 dsl_errorscrubbing(scn->scn_dp)) 844 return (SET_ERROR(EBUSY)); 845 846 return (0); 847 } 848 849 void 850 dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) 851 { 852 (void) arg; 853 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 854 pool_scan_func_t *funcp = arg; 855 dmu_object_type_t ot = 0; 856 dsl_pool_t *dp = scn->scn_dp; 857 spa_t *spa = dp->dp_spa; 858 859 ASSERT(!dsl_scan_is_running(scn)); 860 ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS); 861 memset(&scn->scn_phys, 0, sizeof (scn->scn_phys)); 862 863 /* 864 * If we are starting a fresh scrub, we erase the error scrub 865 * information from disk. 866 */ 867 memset(&scn->errorscrub_phys, 0, sizeof (scn->errorscrub_phys)); 868 dsl_errorscrub_sync_state(scn, tx); 869 870 scn->scn_phys.scn_func = *funcp; 871 scn->scn_phys.scn_state = DSS_SCANNING; 872 scn->scn_phys.scn_min_txg = 0; 873 scn->scn_phys.scn_max_txg = tx->tx_txg; 874 scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */ 875 scn->scn_phys.scn_start_time = gethrestime_sec(); 876 scn->scn_phys.scn_errors = 0; 877 scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc; 878 scn->scn_issued_before_pass = 0; 879 scn->scn_restart_txg = 0; 880 scn->scn_done_txg = 0; 881 scn->scn_last_checkpoint = 0; 882 scn->scn_checkpointing = B_FALSE; 883 spa_scan_stat_init(spa); 884 vdev_scan_stat_init(spa->spa_root_vdev); 885 886 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 887 scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max; 888 889 /* rewrite all disk labels */ 890 vdev_config_dirty(spa->spa_root_vdev); 891 892 if (vdev_resilver_needed(spa->spa_root_vdev, 893 &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { 894 nvlist_t *aux = fnvlist_alloc(); 895 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, 896 "healing"); 897 spa_event_notify(spa, NULL, aux, 898 ESC_ZFS_RESILVER_START); 899 nvlist_free(aux); 900 } else { 901 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START); 902 } 903 904 spa->spa_scrub_started = B_TRUE; 905 /* 906 * If this is an incremental scrub, limit the DDT scrub phase 907 * to just the auto-ditto class (for correctness); the rest 908 * of the scrub should go faster using top-down pruning. 909 */ 910 if (scn->scn_phys.scn_min_txg > TXG_INITIAL) 911 scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO; 912 913 /* 914 * When starting a resilver clear any existing rebuild state. 915 * This is required to prevent stale rebuild status from 916 * being reported when a rebuild is run, then a resilver and 917 * finally a scrub. In which case only the scrub status 918 * should be reported by 'zpool status'. 919 */ 920 if (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) { 921 vdev_t *rvd = spa->spa_root_vdev; 922 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 923 vdev_t *vd = rvd->vdev_child[i]; 924 vdev_rebuild_clear_sync( 925 (void *)(uintptr_t)vd->vdev_id, tx); 926 } 927 } 928 } 929 930 /* back to the generic stuff */ 931 932 if (zfs_scan_blkstats) { 933 if (dp->dp_blkstats == NULL) { 934 dp->dp_blkstats = 935 vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP); 936 } 937 memset(&dp->dp_blkstats->zab_type, 0, 938 sizeof (dp->dp_blkstats->zab_type)); 939 } else { 940 if (dp->dp_blkstats) { 941 vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 942 dp->dp_blkstats = NULL; 943 } 944 } 945 946 if (spa_version(spa) < SPA_VERSION_DSL_SCRUB) 947 ot = DMU_OT_ZAP_OTHER; 948 949 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, 950 ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx); 951 952 memcpy(&scn->scn_phys_cached, &scn->scn_phys, sizeof (scn->scn_phys)); 953 954 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 955 956 spa_history_log_internal(spa, "scan setup", tx, 957 "func=%u mintxg=%llu maxtxg=%llu", 958 *funcp, (u_longlong_t)scn->scn_phys.scn_min_txg, 959 (u_longlong_t)scn->scn_phys.scn_max_txg); 960 } 961 962 /* 963 * Called by ZFS_IOC_POOL_SCRUB and ZFS_IOC_POOL_SCAN ioctl to start a scrub, 964 * error scrub or resilver. Can also be called to resume a paused scrub or 965 * error scrub. 966 */ 967 int 968 dsl_scan(dsl_pool_t *dp, pool_scan_func_t func) 969 { 970 spa_t *spa = dp->dp_spa; 971 dsl_scan_t *scn = dp->dp_scan; 972 973 /* 974 * Purge all vdev caches and probe all devices. We do this here 975 * rather than in sync context because this requires a writer lock 976 * on the spa_config lock, which we can't do from sync context. The 977 * spa_scrub_reopen flag indicates that vdev_open() should not 978 * attempt to start another scrub. 979 */ 980 spa_vdev_state_enter(spa, SCL_NONE); 981 spa->spa_scrub_reopen = B_TRUE; 982 vdev_reopen(spa->spa_root_vdev); 983 spa->spa_scrub_reopen = B_FALSE; 984 (void) spa_vdev_state_exit(spa, NULL, 0); 985 986 if (func == POOL_SCAN_RESILVER) { 987 dsl_scan_restart_resilver(spa->spa_dsl_pool, 0); 988 return (0); 989 } 990 991 if (func == POOL_SCAN_ERRORSCRUB) { 992 if (dsl_errorscrub_is_paused(dp->dp_scan)) { 993 /* 994 * got error scrub start cmd, resume paused error scrub. 995 */ 996 int err = dsl_scrub_set_pause_resume(scn->scn_dp, 997 POOL_SCRUB_NORMAL); 998 if (err == 0) { 999 spa_event_notify(spa, NULL, NULL, 1000 ESC_ZFS_ERRORSCRUB_RESUME); 1001 return (ECANCELED); 1002 } 1003 return (SET_ERROR(err)); 1004 } 1005 1006 return (dsl_sync_task(spa_name(dp->dp_spa), 1007 dsl_errorscrub_setup_check, dsl_errorscrub_setup_sync, 1008 &func, 0, ZFS_SPACE_CHECK_RESERVED)); 1009 } 1010 1011 if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) { 1012 /* got scrub start cmd, resume paused scrub */ 1013 int err = dsl_scrub_set_pause_resume(scn->scn_dp, 1014 POOL_SCRUB_NORMAL); 1015 if (err == 0) { 1016 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME); 1017 return (SET_ERROR(ECANCELED)); 1018 } 1019 return (SET_ERROR(err)); 1020 } 1021 1022 return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check, 1023 dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_EXTRA_RESERVED)); 1024 } 1025 1026 static void 1027 dsl_errorscrub_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 1028 { 1029 dsl_pool_t *dp = scn->scn_dp; 1030 spa_t *spa = dp->dp_spa; 1031 1032 if (complete) { 1033 spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_FINISH); 1034 spa_history_log_internal(spa, "error scrub done", tx, 1035 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 1036 } else { 1037 spa_history_log_internal(spa, "error scrub canceled", tx, 1038 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 1039 } 1040 1041 scn->errorscrub_phys.dep_state = complete ? DSS_FINISHED : DSS_CANCELED; 1042 spa->spa_scrub_active = B_FALSE; 1043 spa_errlog_rotate(spa); 1044 scn->errorscrub_phys.dep_end_time = gethrestime_sec(); 1045 zap_cursor_fini(&scn->errorscrub_cursor); 1046 1047 if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) 1048 spa->spa_errata = 0; 1049 1050 ASSERT(!dsl_errorscrubbing(scn->scn_dp)); 1051 } 1052 1053 static void 1054 dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx) 1055 { 1056 static const char *old_names[] = { 1057 "scrub_bookmark", 1058 "scrub_ddt_bookmark", 1059 "scrub_ddt_class_max", 1060 "scrub_queue", 1061 "scrub_min_txg", 1062 "scrub_max_txg", 1063 "scrub_func", 1064 "scrub_errors", 1065 NULL 1066 }; 1067 1068 dsl_pool_t *dp = scn->scn_dp; 1069 spa_t *spa = dp->dp_spa; 1070 int i; 1071 1072 /* Remove any remnants of an old-style scrub. */ 1073 for (i = 0; old_names[i]; i++) { 1074 (void) zap_remove(dp->dp_meta_objset, 1075 DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx); 1076 } 1077 1078 if (scn->scn_phys.scn_queue_obj != 0) { 1079 VERIFY0(dmu_object_free(dp->dp_meta_objset, 1080 scn->scn_phys.scn_queue_obj, tx)); 1081 scn->scn_phys.scn_queue_obj = 0; 1082 } 1083 scan_ds_queue_clear(scn); 1084 scan_ds_prefetch_queue_clear(scn); 1085 1086 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 1087 1088 /* 1089 * If we were "restarted" from a stopped state, don't bother 1090 * with anything else. 1091 */ 1092 if (!dsl_scan_is_running(scn)) { 1093 ASSERT(!scn->scn_is_sorted); 1094 return; 1095 } 1096 1097 if (scn->scn_is_sorted) { 1098 scan_io_queues_destroy(scn); 1099 scn->scn_is_sorted = B_FALSE; 1100 1101 if (scn->scn_taskq != NULL) { 1102 taskq_destroy(scn->scn_taskq); 1103 scn->scn_taskq = NULL; 1104 } 1105 } 1106 1107 scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED; 1108 1109 spa_notify_waiters(spa); 1110 1111 if (dsl_scan_restarting(scn, tx)) 1112 spa_history_log_internal(spa, "scan aborted, restarting", tx, 1113 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 1114 else if (!complete) 1115 spa_history_log_internal(spa, "scan cancelled", tx, 1116 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 1117 else 1118 spa_history_log_internal(spa, "scan done", tx, 1119 "errors=%llu", (u_longlong_t)spa_approx_errlog_size(spa)); 1120 1121 if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) { 1122 spa->spa_scrub_active = B_FALSE; 1123 1124 /* 1125 * If the scrub/resilver completed, update all DTLs to 1126 * reflect this. Whether it succeeded or not, vacate 1127 * all temporary scrub DTLs. 1128 * 1129 * As the scrub does not currently support traversing 1130 * data that have been freed but are part of a checkpoint, 1131 * we don't mark the scrub as done in the DTLs as faults 1132 * may still exist in those vdevs. 1133 */ 1134 if (complete && 1135 !spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 1136 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 1137 scn->scn_phys.scn_max_txg, B_TRUE, B_FALSE); 1138 1139 if (scn->scn_phys.scn_min_txg) { 1140 nvlist_t *aux = fnvlist_alloc(); 1141 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, 1142 "healing"); 1143 spa_event_notify(spa, NULL, aux, 1144 ESC_ZFS_RESILVER_FINISH); 1145 nvlist_free(aux); 1146 } else { 1147 spa_event_notify(spa, NULL, NULL, 1148 ESC_ZFS_SCRUB_FINISH); 1149 } 1150 } else { 1151 vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg, 1152 0, B_TRUE, B_FALSE); 1153 } 1154 spa_errlog_rotate(spa); 1155 1156 /* 1157 * Don't clear flag until after vdev_dtl_reassess to ensure that 1158 * DTL_MISSING will get updated when possible. 1159 */ 1160 spa->spa_scrub_started = B_FALSE; 1161 1162 /* 1163 * We may have finished replacing a device. 1164 * Let the async thread assess this and handle the detach. 1165 */ 1166 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 1167 1168 /* 1169 * Clear any resilver_deferred flags in the config. 1170 * If there are drives that need resilvering, kick 1171 * off an asynchronous request to start resilver. 1172 * vdev_clear_resilver_deferred() may update the config 1173 * before the resilver can restart. In the event of 1174 * a crash during this period, the spa loading code 1175 * will find the drives that need to be resilvered 1176 * and start the resilver then. 1177 */ 1178 if (spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER) && 1179 vdev_clear_resilver_deferred(spa->spa_root_vdev, tx)) { 1180 spa_history_log_internal(spa, 1181 "starting deferred resilver", tx, "errors=%llu", 1182 (u_longlong_t)spa_approx_errlog_size(spa)); 1183 spa_async_request(spa, SPA_ASYNC_RESILVER); 1184 } 1185 1186 /* Clear recent error events (i.e. duplicate events tracking) */ 1187 if (complete) 1188 zfs_ereport_clear(spa, NULL); 1189 } 1190 1191 scn->scn_phys.scn_end_time = gethrestime_sec(); 1192 1193 if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB) 1194 spa->spa_errata = 0; 1195 1196 ASSERT(!dsl_scan_is_running(scn)); 1197 } 1198 1199 static int 1200 dsl_errorscrub_pause_resume_check(void *arg, dmu_tx_t *tx) 1201 { 1202 pool_scrub_cmd_t *cmd = arg; 1203 dsl_pool_t *dp = dmu_tx_pool(tx); 1204 dsl_scan_t *scn = dp->dp_scan; 1205 1206 if (*cmd == POOL_SCRUB_PAUSE) { 1207 /* 1208 * can't pause a error scrub when there is no in-progress 1209 * error scrub. 1210 */ 1211 if (!dsl_errorscrubbing(dp)) 1212 return (SET_ERROR(ENOENT)); 1213 1214 /* can't pause a paused error scrub */ 1215 if (dsl_errorscrub_is_paused(scn)) 1216 return (SET_ERROR(EBUSY)); 1217 } else if (*cmd != POOL_SCRUB_NORMAL) { 1218 return (SET_ERROR(ENOTSUP)); 1219 } 1220 1221 return (0); 1222 } 1223 1224 static void 1225 dsl_errorscrub_pause_resume_sync(void *arg, dmu_tx_t *tx) 1226 { 1227 pool_scrub_cmd_t *cmd = arg; 1228 dsl_pool_t *dp = dmu_tx_pool(tx); 1229 spa_t *spa = dp->dp_spa; 1230 dsl_scan_t *scn = dp->dp_scan; 1231 1232 if (*cmd == POOL_SCRUB_PAUSE) { 1233 spa->spa_scan_pass_errorscrub_pause = gethrestime_sec(); 1234 scn->errorscrub_phys.dep_paused_flags = B_TRUE; 1235 dsl_errorscrub_sync_state(scn, tx); 1236 spa_event_notify(spa, NULL, NULL, ESC_ZFS_ERRORSCRUB_PAUSED); 1237 } else { 1238 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); 1239 if (dsl_errorscrub_is_paused(scn)) { 1240 /* 1241 * We need to keep track of how much time we spend 1242 * paused per pass so that we can adjust the error scrub 1243 * rate shown in the output of 'zpool status'. 1244 */ 1245 spa->spa_scan_pass_errorscrub_spent_paused += 1246 gethrestime_sec() - 1247 spa->spa_scan_pass_errorscrub_pause; 1248 1249 spa->spa_scan_pass_errorscrub_pause = 0; 1250 scn->errorscrub_phys.dep_paused_flags = B_FALSE; 1251 1252 zap_cursor_init_serialized( 1253 &scn->errorscrub_cursor, 1254 spa->spa_meta_objset, spa->spa_errlog_last, 1255 scn->errorscrub_phys.dep_cursor); 1256 1257 dsl_errorscrub_sync_state(scn, tx); 1258 } 1259 } 1260 } 1261 1262 static int 1263 dsl_errorscrub_cancel_check(void *arg, dmu_tx_t *tx) 1264 { 1265 (void) arg; 1266 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1267 /* can't cancel a error scrub when there is no one in-progress */ 1268 if (!dsl_errorscrubbing(scn->scn_dp)) 1269 return (SET_ERROR(ENOENT)); 1270 return (0); 1271 } 1272 1273 static void 1274 dsl_errorscrub_cancel_sync(void *arg, dmu_tx_t *tx) 1275 { 1276 (void) arg; 1277 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1278 1279 dsl_errorscrub_done(scn, B_FALSE, tx); 1280 dsl_errorscrub_sync_state(scn, tx); 1281 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, 1282 ESC_ZFS_ERRORSCRUB_ABORT); 1283 } 1284 1285 static int 1286 dsl_scan_cancel_check(void *arg, dmu_tx_t *tx) 1287 { 1288 (void) arg; 1289 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1290 1291 if (!dsl_scan_is_running(scn)) 1292 return (SET_ERROR(ENOENT)); 1293 return (0); 1294 } 1295 1296 static void 1297 dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx) 1298 { 1299 (void) arg; 1300 dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan; 1301 1302 dsl_scan_done(scn, B_FALSE, tx); 1303 dsl_scan_sync_state(scn, tx, SYNC_MANDATORY); 1304 spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT); 1305 } 1306 1307 int 1308 dsl_scan_cancel(dsl_pool_t *dp) 1309 { 1310 if (dsl_errorscrubbing(dp)) { 1311 return (dsl_sync_task(spa_name(dp->dp_spa), 1312 dsl_errorscrub_cancel_check, dsl_errorscrub_cancel_sync, 1313 NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 1314 } 1315 return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check, 1316 dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED)); 1317 } 1318 1319 static int 1320 dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx) 1321 { 1322 pool_scrub_cmd_t *cmd = arg; 1323 dsl_pool_t *dp = dmu_tx_pool(tx); 1324 dsl_scan_t *scn = dp->dp_scan; 1325 1326 if (*cmd == POOL_SCRUB_PAUSE) { 1327 /* can't pause a scrub when there is no in-progress scrub */ 1328 if (!dsl_scan_scrubbing(dp)) 1329 return (SET_ERROR(ENOENT)); 1330 1331 /* can't pause a paused scrub */ 1332 if (dsl_scan_is_paused_scrub(scn)) 1333 return (SET_ERROR(EBUSY)); 1334 } else if (*cmd != POOL_SCRUB_NORMAL) { 1335 return (SET_ERROR(ENOTSUP)); 1336 } 1337 1338 return (0); 1339 } 1340 1341 static void 1342 dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx) 1343 { 1344 pool_scrub_cmd_t *cmd = arg; 1345 dsl_pool_t *dp = dmu_tx_pool(tx); 1346 spa_t *spa = dp->dp_spa; 1347 dsl_scan_t *scn = dp->dp_scan; 1348 1349 if (*cmd == POOL_SCRUB_PAUSE) { 1350 /* can't pause a scrub when there is no in-progress scrub */ 1351 spa->spa_scan_pass_scrub_pause = gethrestime_sec(); 1352 scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED; 1353 scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED; 1354 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1355 spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED); 1356 spa_notify_waiters(spa); 1357 } else { 1358 ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL); 1359 if (dsl_scan_is_paused_scrub(scn)) { 1360 /* 1361 * We need to keep track of how much time we spend 1362 * paused per pass so that we can adjust the scrub rate 1363 * shown in the output of 'zpool status' 1364 */ 1365 spa->spa_scan_pass_scrub_spent_paused += 1366 gethrestime_sec() - spa->spa_scan_pass_scrub_pause; 1367 spa->spa_scan_pass_scrub_pause = 0; 1368 scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED; 1369 scn->scn_phys_cached.scn_flags &= ~DSF_SCRUB_PAUSED; 1370 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 1371 } 1372 } 1373 } 1374 1375 /* 1376 * Set scrub pause/resume state if it makes sense to do so 1377 */ 1378 int 1379 dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd) 1380 { 1381 if (dsl_errorscrubbing(dp)) { 1382 return (dsl_sync_task(spa_name(dp->dp_spa), 1383 dsl_errorscrub_pause_resume_check, 1384 dsl_errorscrub_pause_resume_sync, &cmd, 3, 1385 ZFS_SPACE_CHECK_RESERVED)); 1386 } 1387 return (dsl_sync_task(spa_name(dp->dp_spa), 1388 dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3, 1389 ZFS_SPACE_CHECK_RESERVED)); 1390 } 1391 1392 1393 /* start a new scan, or restart an existing one. */ 1394 void 1395 dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg) 1396 { 1397 if (txg == 0) { 1398 dmu_tx_t *tx; 1399 tx = dmu_tx_create_dd(dp->dp_mos_dir); 1400 VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT)); 1401 1402 txg = dmu_tx_get_txg(tx); 1403 dp->dp_scan->scn_restart_txg = txg; 1404 dmu_tx_commit(tx); 1405 } else { 1406 dp->dp_scan->scn_restart_txg = txg; 1407 } 1408 zfs_dbgmsg("restarting resilver for %s at txg=%llu", 1409 dp->dp_spa->spa_name, (longlong_t)txg); 1410 } 1411 1412 void 1413 dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp) 1414 { 1415 zio_free(dp->dp_spa, txg, bp); 1416 } 1417 1418 void 1419 dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp) 1420 { 1421 ASSERT(dsl_pool_sync_context(dp)); 1422 zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags)); 1423 } 1424 1425 static int 1426 scan_ds_queue_compare(const void *a, const void *b) 1427 { 1428 const scan_ds_t *sds_a = a, *sds_b = b; 1429 1430 if (sds_a->sds_dsobj < sds_b->sds_dsobj) 1431 return (-1); 1432 if (sds_a->sds_dsobj == sds_b->sds_dsobj) 1433 return (0); 1434 return (1); 1435 } 1436 1437 static void 1438 scan_ds_queue_clear(dsl_scan_t *scn) 1439 { 1440 void *cookie = NULL; 1441 scan_ds_t *sds; 1442 while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) { 1443 kmem_free(sds, sizeof (*sds)); 1444 } 1445 } 1446 1447 static boolean_t 1448 scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg) 1449 { 1450 scan_ds_t srch, *sds; 1451 1452 srch.sds_dsobj = dsobj; 1453 sds = avl_find(&scn->scn_queue, &srch, NULL); 1454 if (sds != NULL && txg != NULL) 1455 *txg = sds->sds_txg; 1456 return (sds != NULL); 1457 } 1458 1459 static void 1460 scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg) 1461 { 1462 scan_ds_t *sds; 1463 avl_index_t where; 1464 1465 sds = kmem_zalloc(sizeof (*sds), KM_SLEEP); 1466 sds->sds_dsobj = dsobj; 1467 sds->sds_txg = txg; 1468 1469 VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL); 1470 avl_insert(&scn->scn_queue, sds, where); 1471 } 1472 1473 static void 1474 scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj) 1475 { 1476 scan_ds_t srch, *sds; 1477 1478 srch.sds_dsobj = dsobj; 1479 1480 sds = avl_find(&scn->scn_queue, &srch, NULL); 1481 VERIFY(sds != NULL); 1482 avl_remove(&scn->scn_queue, sds); 1483 kmem_free(sds, sizeof (*sds)); 1484 } 1485 1486 static void 1487 scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx) 1488 { 1489 dsl_pool_t *dp = scn->scn_dp; 1490 spa_t *spa = dp->dp_spa; 1491 dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ? 1492 DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER; 1493 1494 ASSERT0(scn->scn_queues_pending); 1495 ASSERT(scn->scn_phys.scn_queue_obj != 0); 1496 1497 VERIFY0(dmu_object_free(dp->dp_meta_objset, 1498 scn->scn_phys.scn_queue_obj, tx)); 1499 scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot, 1500 DMU_OT_NONE, 0, tx); 1501 for (scan_ds_t *sds = avl_first(&scn->scn_queue); 1502 sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) { 1503 VERIFY0(zap_add_int_key(dp->dp_meta_objset, 1504 scn->scn_phys.scn_queue_obj, sds->sds_dsobj, 1505 sds->sds_txg, tx)); 1506 } 1507 } 1508 1509 /* 1510 * Computes the memory limit state that we're currently in. A sorted scan 1511 * needs quite a bit of memory to hold the sorting queue, so we need to 1512 * reasonably constrain the size so it doesn't impact overall system 1513 * performance. We compute two limits: 1514 * 1) Hard memory limit: if the amount of memory used by the sorting 1515 * queues on a pool gets above this value, we stop the metadata 1516 * scanning portion and start issuing the queued up and sorted 1517 * I/Os to reduce memory usage. 1518 * This limit is calculated as a fraction of physmem (by default 5%). 1519 * We constrain the lower bound of the hard limit to an absolute 1520 * minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain 1521 * the upper bound to 5% of the total pool size - no chance we'll 1522 * ever need that much memory, but just to keep the value in check. 1523 * 2) Soft memory limit: once we hit the hard memory limit, we start 1524 * issuing I/O to reduce queue memory usage, but we don't want to 1525 * completely empty out the queues, since we might be able to find I/Os 1526 * that will fill in the gaps of our non-sequential IOs at some point 1527 * in the future. So we stop the issuing of I/Os once the amount of 1528 * memory used drops below the soft limit (at which point we stop issuing 1529 * I/O and start scanning metadata again). 1530 * 1531 * This limit is calculated by subtracting a fraction of the hard 1532 * limit from the hard limit. By default this fraction is 5%, so 1533 * the soft limit is 95% of the hard limit. We cap the size of the 1534 * difference between the hard and soft limits at an absolute 1535 * maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is 1536 * sufficient to not cause too frequent switching between the 1537 * metadata scan and I/O issue (even at 2k recordsize, 128 MiB's 1538 * worth of queues is about 1.2 GiB of on-pool data, so scanning 1539 * that should take at least a decent fraction of a second). 1540 */ 1541 static boolean_t 1542 dsl_scan_should_clear(dsl_scan_t *scn) 1543 { 1544 spa_t *spa = scn->scn_dp->dp_spa; 1545 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 1546 uint64_t alloc, mlim_hard, mlim_soft, mused; 1547 1548 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 1549 alloc += metaslab_class_get_alloc(spa_special_class(spa)); 1550 alloc += metaslab_class_get_alloc(spa_dedup_class(spa)); 1551 1552 mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE, 1553 zfs_scan_mem_lim_min); 1554 mlim_hard = MIN(mlim_hard, alloc / 20); 1555 mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact, 1556 zfs_scan_mem_lim_soft_max); 1557 mused = 0; 1558 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 1559 vdev_t *tvd = rvd->vdev_child[i]; 1560 dsl_scan_io_queue_t *queue; 1561 1562 mutex_enter(&tvd->vdev_scan_io_queue_lock); 1563 queue = tvd->vdev_scan_io_queue; 1564 if (queue != NULL) { 1565 /* 1566 * # of extents in exts_by_addr = # in exts_by_size. 1567 * B-tree efficiency is ~75%, but can be as low as 50%. 1568 */ 1569 mused += zfs_btree_numnodes(&queue->q_exts_by_size) * 1570 ((sizeof (range_seg_gap_t) + sizeof (uint64_t)) * 1571 3 / 2) + queue->q_sio_memused; 1572 } 1573 mutex_exit(&tvd->vdev_scan_io_queue_lock); 1574 } 1575 1576 dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused); 1577 1578 if (mused == 0) 1579 ASSERT0(scn->scn_queues_pending); 1580 1581 /* 1582 * If we are above our hard limit, we need to clear out memory. 1583 * If we are below our soft limit, we need to accumulate sequential IOs. 1584 * Otherwise, we should keep doing whatever we are currently doing. 1585 */ 1586 if (mused >= mlim_hard) 1587 return (B_TRUE); 1588 else if (mused < mlim_soft) 1589 return (B_FALSE); 1590 else 1591 return (scn->scn_clearing); 1592 } 1593 1594 static boolean_t 1595 dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) 1596 { 1597 /* we never skip user/group accounting objects */ 1598 if (zb && (int64_t)zb->zb_object < 0) 1599 return (B_FALSE); 1600 1601 if (scn->scn_suspending) 1602 return (B_TRUE); /* we're already suspending */ 1603 1604 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) 1605 return (B_FALSE); /* we're resuming */ 1606 1607 /* We only know how to resume from level-0 and objset blocks. */ 1608 if (zb && (zb->zb_level != 0 && zb->zb_level != ZB_ROOT_LEVEL)) 1609 return (B_FALSE); 1610 1611 /* 1612 * We suspend if: 1613 * - we have scanned for at least the minimum time (default 1 sec 1614 * for scrub, 3 sec for resilver), and either we have sufficient 1615 * dirty data that we are starting to write more quickly 1616 * (default 30%), someone is explicitly waiting for this txg 1617 * to complete, or we have used up all of the time in the txg 1618 * timeout (default 5 sec). 1619 * or 1620 * - the spa is shutting down because this pool is being exported 1621 * or the machine is rebooting. 1622 * or 1623 * - the scan queue has reached its memory use limit 1624 */ 1625 uint64_t curr_time_ns = gethrtime(); 1626 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 1627 uint64_t sync_time_ns = curr_time_ns - 1628 scn->scn_dp->dp_spa->spa_sync_starttime; 1629 uint64_t dirty_min_bytes = zfs_dirty_data_max * 1630 zfs_vdev_async_write_active_min_dirty_percent / 100; 1631 uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 1632 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 1633 1634 if ((NSEC2MSEC(scan_time_ns) > mintime && 1635 (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || 1636 txg_sync_waiting(scn->scn_dp) || 1637 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 1638 spa_shutting_down(scn->scn_dp->dp_spa) || 1639 (zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) { 1640 if (zb && zb->zb_level == ZB_ROOT_LEVEL) { 1641 dprintf("suspending at first available bookmark " 1642 "%llx/%llx/%llx/%llx\n", 1643 (longlong_t)zb->zb_objset, 1644 (longlong_t)zb->zb_object, 1645 (longlong_t)zb->zb_level, 1646 (longlong_t)zb->zb_blkid); 1647 SET_BOOKMARK(&scn->scn_phys.scn_bookmark, 1648 zb->zb_objset, 0, 0, 0); 1649 } else if (zb != NULL) { 1650 dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n", 1651 (longlong_t)zb->zb_objset, 1652 (longlong_t)zb->zb_object, 1653 (longlong_t)zb->zb_level, 1654 (longlong_t)zb->zb_blkid); 1655 scn->scn_phys.scn_bookmark = *zb; 1656 } else { 1657 #ifdef ZFS_DEBUG 1658 dsl_scan_phys_t *scnp = &scn->scn_phys; 1659 dprintf("suspending at at DDT bookmark " 1660 "%llx/%llx/%llx/%llx\n", 1661 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 1662 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 1663 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 1664 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 1665 #endif 1666 } 1667 scn->scn_suspending = B_TRUE; 1668 return (B_TRUE); 1669 } 1670 return (B_FALSE); 1671 } 1672 1673 static boolean_t 1674 dsl_error_scrub_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) 1675 { 1676 /* 1677 * We suspend if: 1678 * - we have scrubbed for at least the minimum time (default 1 sec 1679 * for error scrub), someone is explicitly waiting for this txg 1680 * to complete, or we have used up all of the time in the txg 1681 * timeout (default 5 sec). 1682 * or 1683 * - the spa is shutting down because this pool is being exported 1684 * or the machine is rebooting. 1685 */ 1686 uint64_t curr_time_ns = gethrtime(); 1687 uint64_t error_scrub_time_ns = curr_time_ns - scn->scn_sync_start_time; 1688 uint64_t sync_time_ns = curr_time_ns - 1689 scn->scn_dp->dp_spa->spa_sync_starttime; 1690 int mintime = zfs_scrub_min_time_ms; 1691 1692 if ((NSEC2MSEC(error_scrub_time_ns) > mintime && 1693 (txg_sync_waiting(scn->scn_dp) || 1694 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 1695 spa_shutting_down(scn->scn_dp->dp_spa)) { 1696 if (zb) { 1697 dprintf("error scrub suspending at bookmark " 1698 "%llx/%llx/%llx/%llx\n", 1699 (longlong_t)zb->zb_objset, 1700 (longlong_t)zb->zb_object, 1701 (longlong_t)zb->zb_level, 1702 (longlong_t)zb->zb_blkid); 1703 } 1704 return (B_TRUE); 1705 } 1706 return (B_FALSE); 1707 } 1708 1709 typedef struct zil_scan_arg { 1710 dsl_pool_t *zsa_dp; 1711 zil_header_t *zsa_zh; 1712 } zil_scan_arg_t; 1713 1714 static int 1715 dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg, 1716 uint64_t claim_txg) 1717 { 1718 (void) zilog; 1719 zil_scan_arg_t *zsa = arg; 1720 dsl_pool_t *dp = zsa->zsa_dp; 1721 dsl_scan_t *scn = dp->dp_scan; 1722 zil_header_t *zh = zsa->zsa_zh; 1723 zbookmark_phys_t zb; 1724 1725 ASSERT(!BP_IS_REDACTED(bp)); 1726 if (BP_IS_HOLE(bp) || 1727 BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) 1728 return (0); 1729 1730 /* 1731 * One block ("stubby") can be allocated a long time ago; we 1732 * want to visit that one because it has been allocated 1733 * (on-disk) even if it hasn't been claimed (even though for 1734 * scrub there's nothing to do to it). 1735 */ 1736 if (claim_txg == 0 && 1737 BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(dp->dp_spa)) 1738 return (0); 1739 1740 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1741 ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]); 1742 1743 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1744 return (0); 1745 } 1746 1747 static int 1748 dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg, 1749 uint64_t claim_txg) 1750 { 1751 (void) zilog; 1752 if (lrc->lrc_txtype == TX_WRITE) { 1753 zil_scan_arg_t *zsa = arg; 1754 dsl_pool_t *dp = zsa->zsa_dp; 1755 dsl_scan_t *scn = dp->dp_scan; 1756 zil_header_t *zh = zsa->zsa_zh; 1757 const lr_write_t *lr = (const lr_write_t *)lrc; 1758 const blkptr_t *bp = &lr->lr_blkptr; 1759 zbookmark_phys_t zb; 1760 1761 ASSERT(!BP_IS_REDACTED(bp)); 1762 if (BP_IS_HOLE(bp) || 1763 BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) 1764 return (0); 1765 1766 /* 1767 * birth can be < claim_txg if this record's txg is 1768 * already txg sync'ed (but this log block contains 1769 * other records that are not synced) 1770 */ 1771 if (claim_txg == 0 || BP_GET_LOGICAL_BIRTH(bp) < claim_txg) 1772 return (0); 1773 1774 ASSERT3U(BP_GET_LSIZE(bp), !=, 0); 1775 SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], 1776 lr->lr_foid, ZB_ZIL_LEVEL, 1777 lr->lr_offset / BP_GET_LSIZE(bp)); 1778 1779 VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb)); 1780 } 1781 return (0); 1782 } 1783 1784 static void 1785 dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh) 1786 { 1787 uint64_t claim_txg = zh->zh_claim_txg; 1788 zil_scan_arg_t zsa = { dp, zh }; 1789 zilog_t *zilog; 1790 1791 ASSERT(spa_writeable(dp->dp_spa)); 1792 1793 /* 1794 * We only want to visit blocks that have been claimed but not yet 1795 * replayed (or, in read-only mode, blocks that *would* be claimed). 1796 */ 1797 if (claim_txg == 0) 1798 return; 1799 1800 zilog = zil_alloc(dp->dp_meta_objset, zh); 1801 1802 (void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa, 1803 claim_txg, B_FALSE); 1804 1805 zil_free(zilog); 1806 } 1807 1808 /* 1809 * We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea 1810 * here is to sort the AVL tree by the order each block will be needed. 1811 */ 1812 static int 1813 scan_prefetch_queue_compare(const void *a, const void *b) 1814 { 1815 const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b; 1816 const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc; 1817 const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc; 1818 1819 return (zbookmark_compare(spc_a->spc_datablkszsec, 1820 spc_a->spc_indblkshift, spc_b->spc_datablkszsec, 1821 spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb)); 1822 } 1823 1824 static void 1825 scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, const void *tag) 1826 { 1827 if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) { 1828 zfs_refcount_destroy(&spc->spc_refcnt); 1829 kmem_free(spc, sizeof (scan_prefetch_ctx_t)); 1830 } 1831 } 1832 1833 static scan_prefetch_ctx_t * 1834 scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, const void *tag) 1835 { 1836 scan_prefetch_ctx_t *spc; 1837 1838 spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP); 1839 zfs_refcount_create(&spc->spc_refcnt); 1840 zfs_refcount_add(&spc->spc_refcnt, tag); 1841 spc->spc_scn = scn; 1842 if (dnp != NULL) { 1843 spc->spc_datablkszsec = dnp->dn_datablkszsec; 1844 spc->spc_indblkshift = dnp->dn_indblkshift; 1845 spc->spc_root = B_FALSE; 1846 } else { 1847 spc->spc_datablkszsec = 0; 1848 spc->spc_indblkshift = 0; 1849 spc->spc_root = B_TRUE; 1850 } 1851 1852 return (spc); 1853 } 1854 1855 static void 1856 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, const void *tag) 1857 { 1858 zfs_refcount_add(&spc->spc_refcnt, tag); 1859 } 1860 1861 static void 1862 scan_ds_prefetch_queue_clear(dsl_scan_t *scn) 1863 { 1864 spa_t *spa = scn->scn_dp->dp_spa; 1865 void *cookie = NULL; 1866 scan_prefetch_issue_ctx_t *spic = NULL; 1867 1868 mutex_enter(&spa->spa_scrub_lock); 1869 while ((spic = avl_destroy_nodes(&scn->scn_prefetch_queue, 1870 &cookie)) != NULL) { 1871 scan_prefetch_ctx_rele(spic->spic_spc, scn); 1872 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1873 } 1874 mutex_exit(&spa->spa_scrub_lock); 1875 } 1876 1877 static boolean_t 1878 dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc, 1879 const zbookmark_phys_t *zb) 1880 { 1881 zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark; 1882 dnode_phys_t tmp_dnp; 1883 dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp; 1884 1885 if (zb->zb_objset != last_zb->zb_objset) 1886 return (B_TRUE); 1887 if ((int64_t)zb->zb_object < 0) 1888 return (B_FALSE); 1889 1890 tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec; 1891 tmp_dnp.dn_indblkshift = spc->spc_indblkshift; 1892 1893 if (zbookmark_subtree_completed(dnp, zb, last_zb)) 1894 return (B_TRUE); 1895 1896 return (B_FALSE); 1897 } 1898 1899 static void 1900 dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb) 1901 { 1902 avl_index_t idx; 1903 dsl_scan_t *scn = spc->spc_scn; 1904 spa_t *spa = scn->scn_dp->dp_spa; 1905 scan_prefetch_issue_ctx_t *spic; 1906 1907 if (zfs_no_scrub_prefetch || BP_IS_REDACTED(bp)) 1908 return; 1909 1910 if (BP_IS_HOLE(bp) || 1911 BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg || 1912 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE && 1913 BP_GET_TYPE(bp) != DMU_OT_OBJSET)) 1914 return; 1915 1916 if (dsl_scan_check_prefetch_resume(spc, zb)) 1917 return; 1918 1919 scan_prefetch_ctx_add_ref(spc, scn); 1920 spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP); 1921 spic->spic_spc = spc; 1922 spic->spic_bp = *bp; 1923 spic->spic_zb = *zb; 1924 1925 /* 1926 * Add the IO to the queue of blocks to prefetch. This allows us to 1927 * prioritize blocks that we will need first for the main traversal 1928 * thread. 1929 */ 1930 mutex_enter(&spa->spa_scrub_lock); 1931 if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) { 1932 /* this block is already queued for prefetch */ 1933 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 1934 scan_prefetch_ctx_rele(spc, scn); 1935 mutex_exit(&spa->spa_scrub_lock); 1936 return; 1937 } 1938 1939 avl_insert(&scn->scn_prefetch_queue, spic, idx); 1940 cv_broadcast(&spa->spa_scrub_io_cv); 1941 mutex_exit(&spa->spa_scrub_lock); 1942 } 1943 1944 static void 1945 dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp, 1946 uint64_t objset, uint64_t object) 1947 { 1948 int i; 1949 zbookmark_phys_t zb; 1950 scan_prefetch_ctx_t *spc; 1951 1952 if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1953 return; 1954 1955 SET_BOOKMARK(&zb, objset, object, 0, 0); 1956 1957 spc = scan_prefetch_ctx_create(scn, dnp, FTAG); 1958 1959 for (i = 0; i < dnp->dn_nblkptr; i++) { 1960 zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]); 1961 zb.zb_blkid = i; 1962 dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb); 1963 } 1964 1965 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1966 zb.zb_level = 0; 1967 zb.zb_blkid = DMU_SPILL_BLKID; 1968 dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb); 1969 } 1970 1971 scan_prefetch_ctx_rele(spc, FTAG); 1972 } 1973 1974 static void 1975 dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 1976 arc_buf_t *buf, void *private) 1977 { 1978 (void) zio; 1979 scan_prefetch_ctx_t *spc = private; 1980 dsl_scan_t *scn = spc->spc_scn; 1981 spa_t *spa = scn->scn_dp->dp_spa; 1982 1983 /* broadcast that the IO has completed for rate limiting purposes */ 1984 mutex_enter(&spa->spa_scrub_lock); 1985 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 1986 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 1987 cv_broadcast(&spa->spa_scrub_io_cv); 1988 mutex_exit(&spa->spa_scrub_lock); 1989 1990 /* if there was an error or we are done prefetching, just cleanup */ 1991 if (buf == NULL || scn->scn_prefetch_stop) 1992 goto out; 1993 1994 if (BP_GET_LEVEL(bp) > 0) { 1995 int i; 1996 blkptr_t *cbp; 1997 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 1998 zbookmark_phys_t czb; 1999 2000 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 2001 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 2002 zb->zb_level - 1, zb->zb_blkid * epb + i); 2003 dsl_scan_prefetch(spc, cbp, &czb); 2004 } 2005 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 2006 dnode_phys_t *cdnp; 2007 int i; 2008 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 2009 2010 for (i = 0, cdnp = buf->b_data; i < epb; 2011 i += cdnp->dn_extra_slots + 1, 2012 cdnp += cdnp->dn_extra_slots + 1) { 2013 dsl_scan_prefetch_dnode(scn, cdnp, 2014 zb->zb_objset, zb->zb_blkid * epb + i); 2015 } 2016 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 2017 objset_phys_t *osp = buf->b_data; 2018 2019 dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode, 2020 zb->zb_objset, DMU_META_DNODE_OBJECT); 2021 2022 if (OBJSET_BUF_HAS_USERUSED(buf)) { 2023 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) { 2024 dsl_scan_prefetch_dnode(scn, 2025 &osp->os_projectused_dnode, zb->zb_objset, 2026 DMU_PROJECTUSED_OBJECT); 2027 } 2028 dsl_scan_prefetch_dnode(scn, 2029 &osp->os_groupused_dnode, zb->zb_objset, 2030 DMU_GROUPUSED_OBJECT); 2031 dsl_scan_prefetch_dnode(scn, 2032 &osp->os_userused_dnode, zb->zb_objset, 2033 DMU_USERUSED_OBJECT); 2034 } 2035 } 2036 2037 out: 2038 if (buf != NULL) 2039 arc_buf_destroy(buf, private); 2040 scan_prefetch_ctx_rele(spc, scn); 2041 } 2042 2043 static void 2044 dsl_scan_prefetch_thread(void *arg) 2045 { 2046 dsl_scan_t *scn = arg; 2047 spa_t *spa = scn->scn_dp->dp_spa; 2048 scan_prefetch_issue_ctx_t *spic; 2049 2050 /* loop until we are told to stop */ 2051 while (!scn->scn_prefetch_stop) { 2052 arc_flags_t flags = ARC_FLAG_NOWAIT | 2053 ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH; 2054 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 2055 2056 mutex_enter(&spa->spa_scrub_lock); 2057 2058 /* 2059 * Wait until we have an IO to issue and are not above our 2060 * maximum in flight limit. 2061 */ 2062 while (!scn->scn_prefetch_stop && 2063 (avl_numnodes(&scn->scn_prefetch_queue) == 0 || 2064 spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) { 2065 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 2066 } 2067 2068 /* recheck if we should stop since we waited for the cv */ 2069 if (scn->scn_prefetch_stop) { 2070 mutex_exit(&spa->spa_scrub_lock); 2071 break; 2072 } 2073 2074 /* remove the prefetch IO from the tree */ 2075 spic = avl_first(&scn->scn_prefetch_queue); 2076 spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp); 2077 avl_remove(&scn->scn_prefetch_queue, spic); 2078 2079 mutex_exit(&spa->spa_scrub_lock); 2080 2081 if (BP_IS_PROTECTED(&spic->spic_bp)) { 2082 ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE || 2083 BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET); 2084 ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0); 2085 zio_flags |= ZIO_FLAG_RAW; 2086 } 2087 2088 /* We don't need data L1 buffer since we do not prefetch L0. */ 2089 blkptr_t *bp = &spic->spic_bp; 2090 if (BP_GET_LEVEL(bp) == 1 && BP_GET_TYPE(bp) != DMU_OT_DNODE && 2091 BP_GET_TYPE(bp) != DMU_OT_OBJSET) 2092 flags |= ARC_FLAG_NO_BUF; 2093 2094 /* issue the prefetch asynchronously */ 2095 (void) arc_read(scn->scn_zio_root, spa, bp, 2096 dsl_scan_prefetch_cb, spic->spic_spc, ZIO_PRIORITY_SCRUB, 2097 zio_flags, &flags, &spic->spic_zb); 2098 2099 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 2100 } 2101 2102 ASSERT(scn->scn_prefetch_stop); 2103 2104 /* free any prefetches we didn't get to complete */ 2105 mutex_enter(&spa->spa_scrub_lock); 2106 while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) { 2107 avl_remove(&scn->scn_prefetch_queue, spic); 2108 scan_prefetch_ctx_rele(spic->spic_spc, scn); 2109 kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t)); 2110 } 2111 ASSERT0(avl_numnodes(&scn->scn_prefetch_queue)); 2112 mutex_exit(&spa->spa_scrub_lock); 2113 } 2114 2115 static boolean_t 2116 dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp, 2117 const zbookmark_phys_t *zb) 2118 { 2119 /* 2120 * We never skip over user/group accounting objects (obj<0) 2121 */ 2122 if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) && 2123 (int64_t)zb->zb_object >= 0) { 2124 /* 2125 * If we already visited this bp & everything below (in 2126 * a prior txg sync), don't bother doing it again. 2127 */ 2128 if (zbookmark_subtree_completed(dnp, zb, 2129 &scn->scn_phys.scn_bookmark)) 2130 return (B_TRUE); 2131 2132 /* 2133 * If we found the block we're trying to resume from, or 2134 * we went past it, zero it out to indicate that it's OK 2135 * to start checking for suspending again. 2136 */ 2137 if (zbookmark_subtree_tbd(dnp, zb, 2138 &scn->scn_phys.scn_bookmark)) { 2139 dprintf("resuming at %llx/%llx/%llx/%llx\n", 2140 (longlong_t)zb->zb_objset, 2141 (longlong_t)zb->zb_object, 2142 (longlong_t)zb->zb_level, 2143 (longlong_t)zb->zb_blkid); 2144 memset(&scn->scn_phys.scn_bookmark, 0, sizeof (*zb)); 2145 } 2146 } 2147 return (B_FALSE); 2148 } 2149 2150 static void dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb, 2151 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 2152 dmu_objset_type_t ostype, dmu_tx_t *tx); 2153 inline __attribute__((always_inline)) static void dsl_scan_visitdnode( 2154 dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype, 2155 dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx); 2156 2157 /* 2158 * Return nonzero on i/o error. 2159 * Return new buf to write out in *bufp. 2160 */ 2161 inline __attribute__((always_inline)) static int 2162 dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, 2163 dnode_phys_t *dnp, const blkptr_t *bp, 2164 const zbookmark_phys_t *zb, dmu_tx_t *tx) 2165 { 2166 dsl_pool_t *dp = scn->scn_dp; 2167 spa_t *spa = dp->dp_spa; 2168 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD; 2169 int err; 2170 2171 ASSERT(!BP_IS_REDACTED(bp)); 2172 2173 /* 2174 * There is an unlikely case of encountering dnodes with contradicting 2175 * dn_bonuslen and DNODE_FLAG_SPILL_BLKPTR flag before in files created 2176 * or modified before commit 4254acb was merged. As it is not possible 2177 * to know which of the two is correct, report an error. 2178 */ 2179 if (dnp != NULL && 2180 dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) { 2181 scn->scn_phys.scn_errors++; 2182 spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp)); 2183 return (SET_ERROR(EINVAL)); 2184 } 2185 2186 if (BP_GET_LEVEL(bp) > 0) { 2187 arc_flags_t flags = ARC_FLAG_WAIT; 2188 int i; 2189 blkptr_t *cbp; 2190 int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT; 2191 arc_buf_t *buf; 2192 2193 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 2194 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 2195 if (err) { 2196 scn->scn_phys.scn_errors++; 2197 return (err); 2198 } 2199 for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) { 2200 zbookmark_phys_t czb; 2201 2202 SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object, 2203 zb->zb_level - 1, 2204 zb->zb_blkid * epb + i); 2205 dsl_scan_visitbp(cbp, &czb, dnp, 2206 ds, scn, ostype, tx); 2207 } 2208 arc_buf_destroy(buf, &buf); 2209 } else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) { 2210 arc_flags_t flags = ARC_FLAG_WAIT; 2211 dnode_phys_t *cdnp; 2212 int i; 2213 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT; 2214 arc_buf_t *buf; 2215 2216 if (BP_IS_PROTECTED(bp)) { 2217 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); 2218 zio_flags |= ZIO_FLAG_RAW; 2219 } 2220 2221 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 2222 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 2223 if (err) { 2224 scn->scn_phys.scn_errors++; 2225 return (err); 2226 } 2227 for (i = 0, cdnp = buf->b_data; i < epb; 2228 i += cdnp->dn_extra_slots + 1, 2229 cdnp += cdnp->dn_extra_slots + 1) { 2230 dsl_scan_visitdnode(scn, ds, ostype, 2231 cdnp, zb->zb_blkid * epb + i, tx); 2232 } 2233 2234 arc_buf_destroy(buf, &buf); 2235 } else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 2236 arc_flags_t flags = ARC_FLAG_WAIT; 2237 objset_phys_t *osp; 2238 arc_buf_t *buf; 2239 2240 err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf, 2241 ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb); 2242 if (err) { 2243 scn->scn_phys.scn_errors++; 2244 return (err); 2245 } 2246 2247 osp = buf->b_data; 2248 2249 dsl_scan_visitdnode(scn, ds, osp->os_type, 2250 &osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx); 2251 2252 if (OBJSET_BUF_HAS_USERUSED(buf)) { 2253 /* 2254 * We also always visit user/group/project accounting 2255 * objects, and never skip them, even if we are 2256 * suspending. This is necessary so that the 2257 * space deltas from this txg get integrated. 2258 */ 2259 if (OBJSET_BUF_HAS_PROJECTUSED(buf)) 2260 dsl_scan_visitdnode(scn, ds, osp->os_type, 2261 &osp->os_projectused_dnode, 2262 DMU_PROJECTUSED_OBJECT, tx); 2263 dsl_scan_visitdnode(scn, ds, osp->os_type, 2264 &osp->os_groupused_dnode, 2265 DMU_GROUPUSED_OBJECT, tx); 2266 dsl_scan_visitdnode(scn, ds, osp->os_type, 2267 &osp->os_userused_dnode, 2268 DMU_USERUSED_OBJECT, tx); 2269 } 2270 arc_buf_destroy(buf, &buf); 2271 } else if (!zfs_blkptr_verify(spa, bp, 2272 BLK_CONFIG_NEEDED, BLK_VERIFY_LOG)) { 2273 /* 2274 * Sanity check the block pointer contents, this is handled 2275 * by arc_read() for the cases above. 2276 */ 2277 scn->scn_phys.scn_errors++; 2278 spa_log_error(spa, zb, BP_GET_LOGICAL_BIRTH(bp)); 2279 return (SET_ERROR(EINVAL)); 2280 } 2281 2282 return (0); 2283 } 2284 2285 inline __attribute__((always_inline)) static void 2286 dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds, 2287 dmu_objset_type_t ostype, dnode_phys_t *dnp, 2288 uint64_t object, dmu_tx_t *tx) 2289 { 2290 int j; 2291 2292 for (j = 0; j < dnp->dn_nblkptr; j++) { 2293 zbookmark_phys_t czb; 2294 2295 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 2296 dnp->dn_nlevels - 1, j); 2297 dsl_scan_visitbp(&dnp->dn_blkptr[j], 2298 &czb, dnp, ds, scn, ostype, tx); 2299 } 2300 2301 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 2302 zbookmark_phys_t czb; 2303 SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object, 2304 0, DMU_SPILL_BLKID); 2305 dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp), 2306 &czb, dnp, ds, scn, ostype, tx); 2307 } 2308 } 2309 2310 /* 2311 * The arguments are in this order because mdb can only print the 2312 * first 5; we want them to be useful. 2313 */ 2314 static void 2315 dsl_scan_visitbp(const blkptr_t *bp, const zbookmark_phys_t *zb, 2316 dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn, 2317 dmu_objset_type_t ostype, dmu_tx_t *tx) 2318 { 2319 dsl_pool_t *dp = scn->scn_dp; 2320 2321 if (dsl_scan_check_suspend(scn, zb)) 2322 return; 2323 2324 if (dsl_scan_check_resume(scn, dnp, zb)) 2325 return; 2326 2327 scn->scn_visited_this_txg++; 2328 2329 if (BP_IS_HOLE(bp)) { 2330 scn->scn_holes_this_txg++; 2331 return; 2332 } 2333 2334 if (BP_IS_REDACTED(bp)) { 2335 ASSERT(dsl_dataset_feature_is_active(ds, 2336 SPA_FEATURE_REDACTED_DATASETS)); 2337 return; 2338 } 2339 2340 /* 2341 * Check if this block contradicts any filesystem flags. 2342 */ 2343 spa_feature_t f = SPA_FEATURE_LARGE_BLOCKS; 2344 if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) 2345 ASSERT(dsl_dataset_feature_is_active(ds, f)); 2346 2347 f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp)); 2348 if (f != SPA_FEATURE_NONE) 2349 ASSERT(dsl_dataset_feature_is_active(ds, f)); 2350 2351 f = zio_compress_to_feature(BP_GET_COMPRESS(bp)); 2352 if (f != SPA_FEATURE_NONE) 2353 ASSERT(dsl_dataset_feature_is_active(ds, f)); 2354 2355 if (BP_GET_LOGICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_min_txg) { 2356 scn->scn_lt_min_this_txg++; 2357 return; 2358 } 2359 2360 if (dsl_scan_recurse(scn, ds, ostype, dnp, bp, zb, tx) != 0) 2361 return; 2362 2363 /* 2364 * If dsl_scan_ddt() has already visited this block, it will have 2365 * already done any translations or scrubbing, so don't call the 2366 * callback again. 2367 */ 2368 if (ddt_class_contains(dp->dp_spa, 2369 scn->scn_phys.scn_ddt_class_max, bp)) { 2370 scn->scn_ddt_contained_this_txg++; 2371 return; 2372 } 2373 2374 /* 2375 * If this block is from the future (after cur_max_txg), then we 2376 * are doing this on behalf of a deleted snapshot, and we will 2377 * revisit the future block on the next pass of this dataset. 2378 * Don't scan it now unless we need to because something 2379 * under it was modified. 2380 */ 2381 if (BP_GET_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) { 2382 scn->scn_gt_max_this_txg++; 2383 return; 2384 } 2385 2386 scan_funcs[scn->scn_phys.scn_func](dp, bp, zb); 2387 } 2388 2389 static void 2390 dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp, 2391 dmu_tx_t *tx) 2392 { 2393 zbookmark_phys_t zb; 2394 scan_prefetch_ctx_t *spc; 2395 2396 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 2397 ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); 2398 2399 if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) { 2400 SET_BOOKMARK(&scn->scn_prefetch_bookmark, 2401 zb.zb_objset, 0, 0, 0); 2402 } else { 2403 scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark; 2404 } 2405 2406 scn->scn_objsets_visited_this_txg++; 2407 2408 spc = scan_prefetch_ctx_create(scn, NULL, FTAG); 2409 dsl_scan_prefetch(spc, bp, &zb); 2410 scan_prefetch_ctx_rele(spc, FTAG); 2411 2412 dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx); 2413 2414 dprintf_ds(ds, "finished scan%s", ""); 2415 } 2416 2417 static void 2418 ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys) 2419 { 2420 if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) { 2421 if (ds->ds_is_snapshot) { 2422 /* 2423 * Note: 2424 * - scn_cur_{min,max}_txg stays the same. 2425 * - Setting the flag is not really necessary if 2426 * scn_cur_max_txg == scn_max_txg, because there 2427 * is nothing after this snapshot that we care 2428 * about. However, we set it anyway and then 2429 * ignore it when we retraverse it in 2430 * dsl_scan_visitds(). 2431 */ 2432 scn_phys->scn_bookmark.zb_objset = 2433 dsl_dataset_phys(ds)->ds_next_snap_obj; 2434 zfs_dbgmsg("destroying ds %llu on %s; currently " 2435 "traversing; reset zb_objset to %llu", 2436 (u_longlong_t)ds->ds_object, 2437 ds->ds_dir->dd_pool->dp_spa->spa_name, 2438 (u_longlong_t)dsl_dataset_phys(ds)-> 2439 ds_next_snap_obj); 2440 scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN; 2441 } else { 2442 SET_BOOKMARK(&scn_phys->scn_bookmark, 2443 ZB_DESTROYED_OBJSET, 0, 0, 0); 2444 zfs_dbgmsg("destroying ds %llu on %s; currently " 2445 "traversing; reset bookmark to -1,0,0,0", 2446 (u_longlong_t)ds->ds_object, 2447 ds->ds_dir->dd_pool->dp_spa->spa_name); 2448 } 2449 } 2450 } 2451 2452 /* 2453 * Invoked when a dataset is destroyed. We need to make sure that: 2454 * 2455 * 1) If it is the dataset that was currently being scanned, we write 2456 * a new dsl_scan_phys_t and marking the objset reference in it 2457 * as destroyed. 2458 * 2) Remove it from the work queue, if it was present. 2459 * 2460 * If the dataset was actually a snapshot, instead of marking the dataset 2461 * as destroyed, we instead substitute the next snapshot in line. 2462 */ 2463 void 2464 dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx) 2465 { 2466 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2467 dsl_scan_t *scn = dp->dp_scan; 2468 uint64_t mintxg; 2469 2470 if (!dsl_scan_is_running(scn)) 2471 return; 2472 2473 ds_destroyed_scn_phys(ds, &scn->scn_phys); 2474 ds_destroyed_scn_phys(ds, &scn->scn_phys_cached); 2475 2476 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 2477 scan_ds_queue_remove(scn, ds->ds_object); 2478 if (ds->ds_is_snapshot) 2479 scan_ds_queue_insert(scn, 2480 dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg); 2481 } 2482 2483 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2484 ds->ds_object, &mintxg) == 0) { 2485 ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1); 2486 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2487 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 2488 if (ds->ds_is_snapshot) { 2489 /* 2490 * We keep the same mintxg; it could be > 2491 * ds_creation_txg if the previous snapshot was 2492 * deleted too. 2493 */ 2494 VERIFY(zap_add_int_key(dp->dp_meta_objset, 2495 scn->scn_phys.scn_queue_obj, 2496 dsl_dataset_phys(ds)->ds_next_snap_obj, 2497 mintxg, tx) == 0); 2498 zfs_dbgmsg("destroying ds %llu on %s; in queue; " 2499 "replacing with %llu", 2500 (u_longlong_t)ds->ds_object, 2501 dp->dp_spa->spa_name, 2502 (u_longlong_t)dsl_dataset_phys(ds)-> 2503 ds_next_snap_obj); 2504 } else { 2505 zfs_dbgmsg("destroying ds %llu on %s; in queue; " 2506 "removing", 2507 (u_longlong_t)ds->ds_object, 2508 dp->dp_spa->spa_name); 2509 } 2510 } 2511 2512 /* 2513 * dsl_scan_sync() should be called after this, and should sync 2514 * out our changed state, but just to be safe, do it here. 2515 */ 2516 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2517 } 2518 2519 static void 2520 ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark) 2521 { 2522 if (scn_bookmark->zb_objset == ds->ds_object) { 2523 scn_bookmark->zb_objset = 2524 dsl_dataset_phys(ds)->ds_prev_snap_obj; 2525 zfs_dbgmsg("snapshotting ds %llu on %s; currently traversing; " 2526 "reset zb_objset to %llu", 2527 (u_longlong_t)ds->ds_object, 2528 ds->ds_dir->dd_pool->dp_spa->spa_name, 2529 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 2530 } 2531 } 2532 2533 /* 2534 * Called when a dataset is snapshotted. If we were currently traversing 2535 * this snapshot, we reset our bookmark to point at the newly created 2536 * snapshot. We also modify our work queue to remove the old snapshot and 2537 * replace with the new one. 2538 */ 2539 void 2540 dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx) 2541 { 2542 dsl_pool_t *dp = ds->ds_dir->dd_pool; 2543 dsl_scan_t *scn = dp->dp_scan; 2544 uint64_t mintxg; 2545 2546 if (!dsl_scan_is_running(scn)) 2547 return; 2548 2549 ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0); 2550 2551 ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark); 2552 ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark); 2553 2554 if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) { 2555 scan_ds_queue_remove(scn, ds->ds_object); 2556 scan_ds_queue_insert(scn, 2557 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg); 2558 } 2559 2560 if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj, 2561 ds->ds_object, &mintxg) == 0) { 2562 VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset, 2563 scn->scn_phys.scn_queue_obj, ds->ds_object, tx)); 2564 VERIFY(zap_add_int_key(dp->dp_meta_objset, 2565 scn->scn_phys.scn_queue_obj, 2566 dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0); 2567 zfs_dbgmsg("snapshotting ds %llu on %s; in queue; " 2568 "replacing with %llu", 2569 (u_longlong_t)ds->ds_object, 2570 dp->dp_spa->spa_name, 2571 (u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj); 2572 } 2573 2574 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2575 } 2576 2577 static void 2578 ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2, 2579 zbookmark_phys_t *scn_bookmark) 2580 { 2581 if (scn_bookmark->zb_objset == ds1->ds_object) { 2582 scn_bookmark->zb_objset = ds2->ds_object; 2583 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " 2584 "reset zb_objset to %llu", 2585 (u_longlong_t)ds1->ds_object, 2586 ds1->ds_dir->dd_pool->dp_spa->spa_name, 2587 (u_longlong_t)ds2->ds_object); 2588 } else if (scn_bookmark->zb_objset == ds2->ds_object) { 2589 scn_bookmark->zb_objset = ds1->ds_object; 2590 zfs_dbgmsg("clone_swap ds %llu on %s; currently traversing; " 2591 "reset zb_objset to %llu", 2592 (u_longlong_t)ds2->ds_object, 2593 ds2->ds_dir->dd_pool->dp_spa->spa_name, 2594 (u_longlong_t)ds1->ds_object); 2595 } 2596 } 2597 2598 /* 2599 * Called when an origin dataset and its clone are swapped. If we were 2600 * currently traversing the dataset, we need to switch to traversing the 2601 * newly promoted clone. 2602 */ 2603 void 2604 dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx) 2605 { 2606 dsl_pool_t *dp = ds1->ds_dir->dd_pool; 2607 dsl_scan_t *scn = dp->dp_scan; 2608 uint64_t mintxg1, mintxg2; 2609 boolean_t ds1_queued, ds2_queued; 2610 2611 if (!dsl_scan_is_running(scn)) 2612 return; 2613 2614 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark); 2615 ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark); 2616 2617 /* 2618 * Handle the in-memory scan queue. 2619 */ 2620 ds1_queued = scan_ds_queue_contains(scn, ds1->ds_object, &mintxg1); 2621 ds2_queued = scan_ds_queue_contains(scn, ds2->ds_object, &mintxg2); 2622 2623 /* Sanity checking. */ 2624 if (ds1_queued) { 2625 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2626 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2627 } 2628 if (ds2_queued) { 2629 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2630 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2631 } 2632 2633 if (ds1_queued && ds2_queued) { 2634 /* 2635 * If both are queued, we don't need to do anything. 2636 * The swapping code below would not handle this case correctly, 2637 * since we can't insert ds2 if it is already there. That's 2638 * because scan_ds_queue_insert() prohibits a duplicate insert 2639 * and panics. 2640 */ 2641 } else if (ds1_queued) { 2642 scan_ds_queue_remove(scn, ds1->ds_object); 2643 scan_ds_queue_insert(scn, ds2->ds_object, mintxg1); 2644 } else if (ds2_queued) { 2645 scan_ds_queue_remove(scn, ds2->ds_object); 2646 scan_ds_queue_insert(scn, ds1->ds_object, mintxg2); 2647 } 2648 2649 /* 2650 * Handle the on-disk scan queue. 2651 * The on-disk state is an out-of-date version of the in-memory state, 2652 * so the in-memory and on-disk values for ds1_queued and ds2_queued may 2653 * be different. Therefore we need to apply the swap logic to the 2654 * on-disk state independently of the in-memory state. 2655 */ 2656 ds1_queued = zap_lookup_int_key(dp->dp_meta_objset, 2657 scn->scn_phys.scn_queue_obj, ds1->ds_object, &mintxg1) == 0; 2658 ds2_queued = zap_lookup_int_key(dp->dp_meta_objset, 2659 scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg2) == 0; 2660 2661 /* Sanity checking. */ 2662 if (ds1_queued) { 2663 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2664 ASSERT3U(mintxg1, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2665 } 2666 if (ds2_queued) { 2667 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg); 2668 ASSERT3U(mintxg2, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg); 2669 } 2670 2671 if (ds1_queued && ds2_queued) { 2672 /* 2673 * If both are queued, we don't need to do anything. 2674 * Alternatively, we could check for EEXIST from 2675 * zap_add_int_key() and back out to the original state, but 2676 * that would be more work than checking for this case upfront. 2677 */ 2678 } else if (ds1_queued) { 2679 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2680 scn->scn_phys.scn_queue_obj, ds1->ds_object, tx)); 2681 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2682 scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg1, tx)); 2683 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " 2684 "replacing with %llu", 2685 (u_longlong_t)ds1->ds_object, 2686 dp->dp_spa->spa_name, 2687 (u_longlong_t)ds2->ds_object); 2688 } else if (ds2_queued) { 2689 VERIFY3S(0, ==, zap_remove_int(dp->dp_meta_objset, 2690 scn->scn_phys.scn_queue_obj, ds2->ds_object, tx)); 2691 VERIFY3S(0, ==, zap_add_int_key(dp->dp_meta_objset, 2692 scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg2, tx)); 2693 zfs_dbgmsg("clone_swap ds %llu on %s; in queue; " 2694 "replacing with %llu", 2695 (u_longlong_t)ds2->ds_object, 2696 dp->dp_spa->spa_name, 2697 (u_longlong_t)ds1->ds_object); 2698 } 2699 2700 dsl_scan_sync_state(scn, tx, SYNC_CACHED); 2701 } 2702 2703 static int 2704 enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2705 { 2706 uint64_t originobj = *(uint64_t *)arg; 2707 dsl_dataset_t *ds; 2708 int err; 2709 dsl_scan_t *scn = dp->dp_scan; 2710 2711 if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj) 2712 return (0); 2713 2714 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2715 if (err) 2716 return (err); 2717 2718 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) { 2719 dsl_dataset_t *prev; 2720 err = dsl_dataset_hold_obj(dp, 2721 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2722 2723 dsl_dataset_rele(ds, FTAG); 2724 if (err) 2725 return (err); 2726 ds = prev; 2727 } 2728 mutex_enter(&scn->scn_queue_lock); 2729 scan_ds_queue_insert(scn, ds->ds_object, 2730 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2731 mutex_exit(&scn->scn_queue_lock); 2732 dsl_dataset_rele(ds, FTAG); 2733 return (0); 2734 } 2735 2736 static void 2737 dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) 2738 { 2739 dsl_pool_t *dp = scn->scn_dp; 2740 dsl_dataset_t *ds; 2741 2742 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 2743 2744 if (scn->scn_phys.scn_cur_min_txg >= 2745 scn->scn_phys.scn_max_txg) { 2746 /* 2747 * This can happen if this snapshot was created after the 2748 * scan started, and we already completed a previous snapshot 2749 * that was created after the scan started. This snapshot 2750 * only references blocks with: 2751 * 2752 * birth < our ds_creation_txg 2753 * cur_min_txg is no less than ds_creation_txg. 2754 * We have already visited these blocks. 2755 * or 2756 * birth > scn_max_txg 2757 * The scan requested not to visit these blocks. 2758 * 2759 * Subsequent snapshots (and clones) can reference our 2760 * blocks, or blocks with even higher birth times. 2761 * Therefore we do not need to visit them either, 2762 * so we do not add them to the work queue. 2763 * 2764 * Note that checking for cur_min_txg >= cur_max_txg 2765 * is not sufficient, because in that case we may need to 2766 * visit subsequent snapshots. This happens when min_txg > 0, 2767 * which raises cur_min_txg. In this case we will visit 2768 * this dataset but skip all of its blocks, because the 2769 * rootbp's birth time is < cur_min_txg. Then we will 2770 * add the next snapshots/clones to the work queue. 2771 */ 2772 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2773 dsl_dataset_name(ds, dsname); 2774 zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because " 2775 "cur_min_txg (%llu) >= max_txg (%llu)", 2776 (longlong_t)dsobj, dsname, 2777 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2778 (longlong_t)scn->scn_phys.scn_max_txg); 2779 kmem_free(dsname, MAXNAMELEN); 2780 2781 goto out; 2782 } 2783 2784 /* 2785 * Only the ZIL in the head (non-snapshot) is valid. Even though 2786 * snapshots can have ZIL block pointers (which may be the same 2787 * BP as in the head), they must be ignored. In addition, $ORIGIN 2788 * doesn't have a objset (i.e. its ds_bp is a hole) so we don't 2789 * need to look for a ZIL in it either. So we traverse the ZIL here, 2790 * rather than in scan_recurse(), because the regular snapshot 2791 * block-sharing rules don't apply to it. 2792 */ 2793 if (!dsl_dataset_is_snapshot(ds) && 2794 (dp->dp_origin_snap == NULL || 2795 ds->ds_dir != dp->dp_origin_snap->ds_dir)) { 2796 objset_t *os; 2797 if (dmu_objset_from_ds(ds, &os) != 0) { 2798 goto out; 2799 } 2800 dsl_scan_zil(dp, &os->os_zil_header); 2801 } 2802 2803 /* 2804 * Iterate over the bps in this ds. 2805 */ 2806 dmu_buf_will_dirty(ds->ds_dbuf, tx); 2807 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 2808 dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx); 2809 rrw_exit(&ds->ds_bp_rwlock, FTAG); 2810 2811 char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); 2812 dsl_dataset_name(ds, dsname); 2813 zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " 2814 "suspending=%u", 2815 (longlong_t)dsobj, dsname, 2816 (longlong_t)scn->scn_phys.scn_cur_min_txg, 2817 (longlong_t)scn->scn_phys.scn_cur_max_txg, 2818 (int)scn->scn_suspending); 2819 kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN); 2820 2821 if (scn->scn_suspending) 2822 goto out; 2823 2824 /* 2825 * We've finished this pass over this dataset. 2826 */ 2827 2828 /* 2829 * If we did not completely visit this dataset, do another pass. 2830 */ 2831 if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) { 2832 zfs_dbgmsg("incomplete pass on %s; visiting again", 2833 dp->dp_spa->spa_name); 2834 scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN; 2835 scan_ds_queue_insert(scn, ds->ds_object, 2836 scn->scn_phys.scn_cur_max_txg); 2837 goto out; 2838 } 2839 2840 /* 2841 * Add descendant datasets to work queue. 2842 */ 2843 if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) { 2844 scan_ds_queue_insert(scn, 2845 dsl_dataset_phys(ds)->ds_next_snap_obj, 2846 dsl_dataset_phys(ds)->ds_creation_txg); 2847 } 2848 if (dsl_dataset_phys(ds)->ds_num_children > 1) { 2849 boolean_t usenext = B_FALSE; 2850 if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) { 2851 uint64_t count; 2852 /* 2853 * A bug in a previous version of the code could 2854 * cause upgrade_clones_cb() to not set 2855 * ds_next_snap_obj when it should, leading to a 2856 * missing entry. Therefore we can only use the 2857 * next_clones_obj when its count is correct. 2858 */ 2859 int err = zap_count(dp->dp_meta_objset, 2860 dsl_dataset_phys(ds)->ds_next_clones_obj, &count); 2861 if (err == 0 && 2862 count == dsl_dataset_phys(ds)->ds_num_children - 1) 2863 usenext = B_TRUE; 2864 } 2865 2866 if (usenext) { 2867 zap_cursor_t zc; 2868 zap_attribute_t za; 2869 for (zap_cursor_init(&zc, dp->dp_meta_objset, 2870 dsl_dataset_phys(ds)->ds_next_clones_obj); 2871 zap_cursor_retrieve(&zc, &za) == 0; 2872 (void) zap_cursor_advance(&zc)) { 2873 scan_ds_queue_insert(scn, 2874 zfs_strtonum(za.za_name, NULL), 2875 dsl_dataset_phys(ds)->ds_creation_txg); 2876 } 2877 zap_cursor_fini(&zc); 2878 } else { 2879 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 2880 enqueue_clones_cb, &ds->ds_object, 2881 DS_FIND_CHILDREN)); 2882 } 2883 } 2884 2885 out: 2886 dsl_dataset_rele(ds, FTAG); 2887 } 2888 2889 static int 2890 enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 2891 { 2892 (void) arg; 2893 dsl_dataset_t *ds; 2894 int err; 2895 dsl_scan_t *scn = dp->dp_scan; 2896 2897 err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 2898 if (err) 2899 return (err); 2900 2901 while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 2902 dsl_dataset_t *prev; 2903 err = dsl_dataset_hold_obj(dp, 2904 dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 2905 if (err) { 2906 dsl_dataset_rele(ds, FTAG); 2907 return (err); 2908 } 2909 2910 /* 2911 * If this is a clone, we don't need to worry about it for now. 2912 */ 2913 if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) { 2914 dsl_dataset_rele(ds, FTAG); 2915 dsl_dataset_rele(prev, FTAG); 2916 return (0); 2917 } 2918 dsl_dataset_rele(ds, FTAG); 2919 ds = prev; 2920 } 2921 2922 mutex_enter(&scn->scn_queue_lock); 2923 scan_ds_queue_insert(scn, ds->ds_object, 2924 dsl_dataset_phys(ds)->ds_prev_snap_txg); 2925 mutex_exit(&scn->scn_queue_lock); 2926 dsl_dataset_rele(ds, FTAG); 2927 return (0); 2928 } 2929 2930 void 2931 dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, 2932 ddt_entry_t *dde, dmu_tx_t *tx) 2933 { 2934 (void) tx; 2935 const ddt_key_t *ddk = &dde->dde_key; 2936 ddt_phys_t *ddp = dde->dde_phys; 2937 blkptr_t bp; 2938 zbookmark_phys_t zb = { 0 }; 2939 2940 if (!dsl_scan_is_running(scn)) 2941 return; 2942 2943 /* 2944 * This function is special because it is the only thing 2945 * that can add scan_io_t's to the vdev scan queues from 2946 * outside dsl_scan_sync(). For the most part this is ok 2947 * as long as it is called from within syncing context. 2948 * However, dsl_scan_sync() expects that no new sio's will 2949 * be added between when all the work for a scan is done 2950 * and the next txg when the scan is actually marked as 2951 * completed. This check ensures we do not issue new sio's 2952 * during this period. 2953 */ 2954 if (scn->scn_done_txg != 0) 2955 return; 2956 2957 for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { 2958 if (ddp->ddp_phys_birth == 0 || 2959 ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg) 2960 continue; 2961 ddt_bp_create(checksum, ddk, ddp, &bp); 2962 2963 scn->scn_visited_this_txg++; 2964 scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb); 2965 } 2966 } 2967 2968 /* 2969 * Scrub/dedup interaction. 2970 * 2971 * If there are N references to a deduped block, we don't want to scrub it 2972 * N times -- ideally, we should scrub it exactly once. 2973 * 2974 * We leverage the fact that the dde's replication class (ddt_class_t) 2975 * is ordered from highest replication class (DDT_CLASS_DITTO) to lowest 2976 * (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order. 2977 * 2978 * To prevent excess scrubbing, the scrub begins by walking the DDT 2979 * to find all blocks with refcnt > 1, and scrubs each of these once. 2980 * Since there are two replication classes which contain blocks with 2981 * refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first. 2982 * Finally the top-down scrub begins, only visiting blocks with refcnt == 1. 2983 * 2984 * There would be nothing more to say if a block's refcnt couldn't change 2985 * during a scrub, but of course it can so we must account for changes 2986 * in a block's replication class. 2987 * 2988 * Here's an example of what can occur: 2989 * 2990 * If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1 2991 * when visited during the top-down scrub phase, it will be scrubbed twice. 2992 * This negates our scrub optimization, but is otherwise harmless. 2993 * 2994 * If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1 2995 * on each visit during the top-down scrub phase, it will never be scrubbed. 2996 * To catch this, ddt_sync_entry() notifies the scrub code whenever a block's 2997 * reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to 2998 * DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1 2999 * while a scrub is in progress, it scrubs the block right then. 3000 */ 3001 static void 3002 dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx) 3003 { 3004 ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark; 3005 ddt_entry_t dde = {{{{0}}}}; 3006 int error; 3007 uint64_t n = 0; 3008 3009 while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) { 3010 ddt_t *ddt; 3011 3012 if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max) 3013 break; 3014 dprintf("visiting ddb=%llu/%llu/%llu/%llx\n", 3015 (longlong_t)ddb->ddb_class, 3016 (longlong_t)ddb->ddb_type, 3017 (longlong_t)ddb->ddb_checksum, 3018 (longlong_t)ddb->ddb_cursor); 3019 3020 /* There should be no pending changes to the dedup table */ 3021 ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum]; 3022 ASSERT(avl_first(&ddt->ddt_tree) == NULL); 3023 3024 dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx); 3025 n++; 3026 3027 if (dsl_scan_check_suspend(scn, NULL)) 3028 break; 3029 } 3030 3031 zfs_dbgmsg("scanned %llu ddt entries on %s with class_max = %u; " 3032 "suspending=%u", (longlong_t)n, scn->scn_dp->dp_spa->spa_name, 3033 (int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending); 3034 3035 ASSERT(error == 0 || error == ENOENT); 3036 ASSERT(error != ENOENT || 3037 ddb->ddb_class > scn->scn_phys.scn_ddt_class_max); 3038 } 3039 3040 static uint64_t 3041 dsl_scan_ds_maxtxg(dsl_dataset_t *ds) 3042 { 3043 uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg; 3044 if (ds->ds_is_snapshot) 3045 return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg)); 3046 return (smt); 3047 } 3048 3049 static void 3050 dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) 3051 { 3052 scan_ds_t *sds; 3053 dsl_pool_t *dp = scn->scn_dp; 3054 3055 if (scn->scn_phys.scn_ddt_bookmark.ddb_class <= 3056 scn->scn_phys.scn_ddt_class_max) { 3057 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 3058 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 3059 dsl_scan_ddt(scn, tx); 3060 if (scn->scn_suspending) 3061 return; 3062 } 3063 3064 if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) { 3065 /* First do the MOS & ORIGIN */ 3066 3067 scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg; 3068 scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg; 3069 dsl_scan_visit_rootbp(scn, NULL, 3070 &dp->dp_meta_rootbp, tx); 3071 if (scn->scn_suspending) 3072 return; 3073 3074 if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) { 3075 VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 3076 enqueue_cb, NULL, DS_FIND_CHILDREN)); 3077 } else { 3078 dsl_scan_visitds(scn, 3079 dp->dp_origin_snap->ds_object, tx); 3080 } 3081 ASSERT(!scn->scn_suspending); 3082 } else if (scn->scn_phys.scn_bookmark.zb_objset != 3083 ZB_DESTROYED_OBJSET) { 3084 uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset; 3085 /* 3086 * If we were suspended, continue from here. Note if the 3087 * ds we were suspended on was deleted, the zb_objset may 3088 * be -1, so we will skip this and find a new objset 3089 * below. 3090 */ 3091 dsl_scan_visitds(scn, dsobj, tx); 3092 if (scn->scn_suspending) 3093 return; 3094 } 3095 3096 /* 3097 * In case we suspended right at the end of the ds, zero the 3098 * bookmark so we don't think that we're still trying to resume. 3099 */ 3100 memset(&scn->scn_phys.scn_bookmark, 0, sizeof (zbookmark_phys_t)); 3101 3102 /* 3103 * Keep pulling things out of the dataset avl queue. Updates to the 3104 * persistent zap-object-as-queue happen only at checkpoints. 3105 */ 3106 while ((sds = avl_first(&scn->scn_queue)) != NULL) { 3107 dsl_dataset_t *ds; 3108 uint64_t dsobj = sds->sds_dsobj; 3109 uint64_t txg = sds->sds_txg; 3110 3111 /* dequeue and free the ds from the queue */ 3112 scan_ds_queue_remove(scn, dsobj); 3113 sds = NULL; 3114 3115 /* set up min / max txg */ 3116 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 3117 if (txg != 0) { 3118 scn->scn_phys.scn_cur_min_txg = 3119 MAX(scn->scn_phys.scn_min_txg, txg); 3120 } else { 3121 scn->scn_phys.scn_cur_min_txg = 3122 MAX(scn->scn_phys.scn_min_txg, 3123 dsl_dataset_phys(ds)->ds_prev_snap_txg); 3124 } 3125 scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds); 3126 dsl_dataset_rele(ds, FTAG); 3127 3128 dsl_scan_visitds(scn, dsobj, tx); 3129 if (scn->scn_suspending) 3130 return; 3131 } 3132 3133 /* No more objsets to fetch, we're done */ 3134 scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET; 3135 ASSERT0(scn->scn_suspending); 3136 } 3137 3138 static uint64_t 3139 dsl_scan_count_data_disks(spa_t *spa) 3140 { 3141 vdev_t *rvd = spa->spa_root_vdev; 3142 uint64_t i, leaves = 0; 3143 3144 for (i = 0; i < rvd->vdev_children; i++) { 3145 vdev_t *vd = rvd->vdev_child[i]; 3146 if (vd->vdev_islog || vd->vdev_isspare || vd->vdev_isl2cache) 3147 continue; 3148 leaves += vdev_get_ndisks(vd) - vdev_get_nparity(vd); 3149 } 3150 return (leaves); 3151 } 3152 3153 static void 3154 scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp) 3155 { 3156 int i; 3157 uint64_t cur_size = 0; 3158 3159 for (i = 0; i < BP_GET_NDVAS(bp); i++) { 3160 cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]); 3161 } 3162 3163 q->q_total_zio_size_this_txg += cur_size; 3164 q->q_zios_this_txg++; 3165 } 3166 3167 static void 3168 scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start, 3169 uint64_t end) 3170 { 3171 q->q_total_seg_size_this_txg += end - start; 3172 q->q_segs_this_txg++; 3173 } 3174 3175 static boolean_t 3176 scan_io_queue_check_suspend(dsl_scan_t *scn) 3177 { 3178 /* See comment in dsl_scan_check_suspend() */ 3179 uint64_t curr_time_ns = gethrtime(); 3180 uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time; 3181 uint64_t sync_time_ns = curr_time_ns - 3182 scn->scn_dp->dp_spa->spa_sync_starttime; 3183 uint64_t dirty_min_bytes = zfs_dirty_data_max * 3184 zfs_vdev_async_write_active_min_dirty_percent / 100; 3185 uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? 3186 zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; 3187 3188 return ((NSEC2MSEC(scan_time_ns) > mintime && 3189 (scn->scn_dp->dp_dirty_total >= dirty_min_bytes || 3190 txg_sync_waiting(scn->scn_dp) || 3191 NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) || 3192 spa_shutting_down(scn->scn_dp->dp_spa)); 3193 } 3194 3195 /* 3196 * Given a list of scan_io_t's in io_list, this issues the I/Os out to 3197 * disk. This consumes the io_list and frees the scan_io_t's. This is 3198 * called when emptying queues, either when we're up against the memory 3199 * limit or when we have finished scanning. Returns B_TRUE if we stopped 3200 * processing the list before we finished. Any sios that were not issued 3201 * will remain in the io_list. 3202 */ 3203 static boolean_t 3204 scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list) 3205 { 3206 dsl_scan_t *scn = queue->q_scn; 3207 scan_io_t *sio; 3208 boolean_t suspended = B_FALSE; 3209 3210 while ((sio = list_head(io_list)) != NULL) { 3211 blkptr_t bp; 3212 3213 if (scan_io_queue_check_suspend(scn)) { 3214 suspended = B_TRUE; 3215 break; 3216 } 3217 3218 sio2bp(sio, &bp); 3219 scan_exec_io(scn->scn_dp, &bp, sio->sio_flags, 3220 &sio->sio_zb, queue); 3221 (void) list_remove_head(io_list); 3222 scan_io_queues_update_zio_stats(queue, &bp); 3223 sio_free(sio); 3224 } 3225 return (suspended); 3226 } 3227 3228 /* 3229 * This function removes sios from an IO queue which reside within a given 3230 * range_seg_t and inserts them (in offset order) into a list. Note that 3231 * we only ever return a maximum of 32 sios at once. If there are more sios 3232 * to process within this segment that did not make it onto the list we 3233 * return B_TRUE and otherwise B_FALSE. 3234 */ 3235 static boolean_t 3236 scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list) 3237 { 3238 scan_io_t *srch_sio, *sio, *next_sio; 3239 avl_index_t idx; 3240 uint_t num_sios = 0; 3241 int64_t bytes_issued = 0; 3242 3243 ASSERT(rs != NULL); 3244 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3245 3246 srch_sio = sio_alloc(1); 3247 srch_sio->sio_nr_dvas = 1; 3248 SIO_SET_OFFSET(srch_sio, rs_get_start(rs, queue->q_exts_by_addr)); 3249 3250 /* 3251 * The exact start of the extent might not contain any matching zios, 3252 * so if that's the case, examine the next one in the tree. 3253 */ 3254 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); 3255 sio_free(srch_sio); 3256 3257 if (sio == NULL) 3258 sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER); 3259 3260 while (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, 3261 queue->q_exts_by_addr) && num_sios <= 32) { 3262 ASSERT3U(SIO_GET_OFFSET(sio), >=, rs_get_start(rs, 3263 queue->q_exts_by_addr)); 3264 ASSERT3U(SIO_GET_END_OFFSET(sio), <=, rs_get_end(rs, 3265 queue->q_exts_by_addr)); 3266 3267 next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio); 3268 avl_remove(&queue->q_sios_by_addr, sio); 3269 if (avl_is_empty(&queue->q_sios_by_addr)) 3270 atomic_add_64(&queue->q_scn->scn_queues_pending, -1); 3271 queue->q_sio_memused -= SIO_GET_MUSED(sio); 3272 3273 bytes_issued += SIO_GET_ASIZE(sio); 3274 num_sios++; 3275 list_insert_tail(list, sio); 3276 sio = next_sio; 3277 } 3278 3279 /* 3280 * We limit the number of sios we process at once to 32 to avoid 3281 * biting off more than we can chew. If we didn't take everything 3282 * in the segment we update it to reflect the work we were able to 3283 * complete. Otherwise, we remove it from the range tree entirely. 3284 */ 3285 if (sio != NULL && SIO_GET_OFFSET(sio) < rs_get_end(rs, 3286 queue->q_exts_by_addr)) { 3287 range_tree_adjust_fill(queue->q_exts_by_addr, rs, 3288 -bytes_issued); 3289 range_tree_resize_segment(queue->q_exts_by_addr, rs, 3290 SIO_GET_OFFSET(sio), rs_get_end(rs, 3291 queue->q_exts_by_addr) - SIO_GET_OFFSET(sio)); 3292 queue->q_last_ext_addr = SIO_GET_OFFSET(sio); 3293 return (B_TRUE); 3294 } else { 3295 uint64_t rstart = rs_get_start(rs, queue->q_exts_by_addr); 3296 uint64_t rend = rs_get_end(rs, queue->q_exts_by_addr); 3297 range_tree_remove(queue->q_exts_by_addr, rstart, rend - rstart); 3298 queue->q_last_ext_addr = -1; 3299 return (B_FALSE); 3300 } 3301 } 3302 3303 /* 3304 * This is called from the queue emptying thread and selects the next 3305 * extent from which we are to issue I/Os. The behavior of this function 3306 * depends on the state of the scan, the current memory consumption and 3307 * whether or not we are performing a scan shutdown. 3308 * 1) We select extents in an elevator algorithm (LBA-order) if the scan 3309 * needs to perform a checkpoint 3310 * 2) We select the largest available extent if we are up against the 3311 * memory limit. 3312 * 3) Otherwise we don't select any extents. 3313 */ 3314 static range_seg_t * 3315 scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) 3316 { 3317 dsl_scan_t *scn = queue->q_scn; 3318 range_tree_t *rt = queue->q_exts_by_addr; 3319 3320 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 3321 ASSERT(scn->scn_is_sorted); 3322 3323 if (!scn->scn_checkpointing && !scn->scn_clearing) 3324 return (NULL); 3325 3326 /* 3327 * During normal clearing, we want to issue our largest segments 3328 * first, keeping IO as sequential as possible, and leaving the 3329 * smaller extents for later with the hope that they might eventually 3330 * grow to larger sequential segments. However, when the scan is 3331 * checkpointing, no new extents will be added to the sorting queue, 3332 * so the way we are sorted now is as good as it will ever get. 3333 * In this case, we instead switch to issuing extents in LBA order. 3334 */ 3335 if ((zfs_scan_issue_strategy < 1 && scn->scn_checkpointing) || 3336 zfs_scan_issue_strategy == 1) 3337 return (range_tree_first(rt)); 3338 3339 /* 3340 * Try to continue previous extent if it is not completed yet. After 3341 * shrink in scan_io_queue_gather() it may no longer be the best, but 3342 * otherwise we leave shorter remnant every txg. 3343 */ 3344 uint64_t start; 3345 uint64_t size = 1ULL << rt->rt_shift; 3346 range_seg_t *addr_rs; 3347 if (queue->q_last_ext_addr != -1) { 3348 start = queue->q_last_ext_addr; 3349 addr_rs = range_tree_find(rt, start, size); 3350 if (addr_rs != NULL) 3351 return (addr_rs); 3352 } 3353 3354 /* 3355 * Nothing to continue, so find new best extent. 3356 */ 3357 uint64_t *v = zfs_btree_first(&queue->q_exts_by_size, NULL); 3358 if (v == NULL) 3359 return (NULL); 3360 queue->q_last_ext_addr = start = *v << rt->rt_shift; 3361 3362 /* 3363 * We need to get the original entry in the by_addr tree so we can 3364 * modify it. 3365 */ 3366 addr_rs = range_tree_find(rt, start, size); 3367 ASSERT3P(addr_rs, !=, NULL); 3368 ASSERT3U(rs_get_start(addr_rs, rt), ==, start); 3369 ASSERT3U(rs_get_end(addr_rs, rt), >, start); 3370 return (addr_rs); 3371 } 3372 3373 static void 3374 scan_io_queues_run_one(void *arg) 3375 { 3376 dsl_scan_io_queue_t *queue = arg; 3377 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 3378 boolean_t suspended = B_FALSE; 3379 range_seg_t *rs; 3380 scan_io_t *sio; 3381 zio_t *zio; 3382 list_t sio_list; 3383 3384 ASSERT(queue->q_scn->scn_is_sorted); 3385 3386 list_create(&sio_list, sizeof (scan_io_t), 3387 offsetof(scan_io_t, sio_nodes.sio_list_node)); 3388 zio = zio_null(queue->q_scn->scn_zio_root, queue->q_scn->scn_dp->dp_spa, 3389 NULL, NULL, NULL, ZIO_FLAG_CANFAIL); 3390 mutex_enter(q_lock); 3391 queue->q_zio = zio; 3392 3393 /* Calculate maximum in-flight bytes for this vdev. */ 3394 queue->q_maxinflight_bytes = MAX(1, zfs_scan_vdev_limit * 3395 (vdev_get_ndisks(queue->q_vd) - vdev_get_nparity(queue->q_vd))); 3396 3397 /* reset per-queue scan statistics for this txg */ 3398 queue->q_total_seg_size_this_txg = 0; 3399 queue->q_segs_this_txg = 0; 3400 queue->q_total_zio_size_this_txg = 0; 3401 queue->q_zios_this_txg = 0; 3402 3403 /* loop until we run out of time or sios */ 3404 while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) { 3405 uint64_t seg_start = 0, seg_end = 0; 3406 boolean_t more_left; 3407 3408 ASSERT(list_is_empty(&sio_list)); 3409 3410 /* loop while we still have sios left to process in this rs */ 3411 do { 3412 scan_io_t *first_sio, *last_sio; 3413 3414 /* 3415 * We have selected which extent needs to be 3416 * processed next. Gather up the corresponding sios. 3417 */ 3418 more_left = scan_io_queue_gather(queue, rs, &sio_list); 3419 ASSERT(!list_is_empty(&sio_list)); 3420 first_sio = list_head(&sio_list); 3421 last_sio = list_tail(&sio_list); 3422 3423 seg_end = SIO_GET_END_OFFSET(last_sio); 3424 if (seg_start == 0) 3425 seg_start = SIO_GET_OFFSET(first_sio); 3426 3427 /* 3428 * Issuing sios can take a long time so drop the 3429 * queue lock. The sio queue won't be updated by 3430 * other threads since we're in syncing context so 3431 * we can be sure that our trees will remain exactly 3432 * as we left them. 3433 */ 3434 mutex_exit(q_lock); 3435 suspended = scan_io_queue_issue(queue, &sio_list); 3436 mutex_enter(q_lock); 3437 3438 if (suspended) 3439 break; 3440 } while (more_left); 3441 3442 /* update statistics for debugging purposes */ 3443 scan_io_queues_update_seg_stats(queue, seg_start, seg_end); 3444 3445 if (suspended) 3446 break; 3447 } 3448 3449 /* 3450 * If we were suspended in the middle of processing, 3451 * requeue any unfinished sios and exit. 3452 */ 3453 while ((sio = list_remove_head(&sio_list)) != NULL) 3454 scan_io_queue_insert_impl(queue, sio); 3455 3456 queue->q_zio = NULL; 3457 mutex_exit(q_lock); 3458 zio_nowait(zio); 3459 list_destroy(&sio_list); 3460 } 3461 3462 /* 3463 * Performs an emptying run on all scan queues in the pool. This just 3464 * punches out one thread per top-level vdev, each of which processes 3465 * only that vdev's scan queue. We can parallelize the I/O here because 3466 * we know that each queue's I/Os only affect its own top-level vdev. 3467 * 3468 * This function waits for the queue runs to complete, and must be 3469 * called from dsl_scan_sync (or in general, syncing context). 3470 */ 3471 static void 3472 scan_io_queues_run(dsl_scan_t *scn) 3473 { 3474 spa_t *spa = scn->scn_dp->dp_spa; 3475 3476 ASSERT(scn->scn_is_sorted); 3477 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3478 3479 if (scn->scn_queues_pending == 0) 3480 return; 3481 3482 if (scn->scn_taskq == NULL) { 3483 int nthreads = spa->spa_root_vdev->vdev_children; 3484 3485 /* 3486 * We need to make this taskq *always* execute as many 3487 * threads in parallel as we have top-level vdevs and no 3488 * less, otherwise strange serialization of the calls to 3489 * scan_io_queues_run_one can occur during spa_sync runs 3490 * and that significantly impacts performance. 3491 */ 3492 scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads, 3493 minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE); 3494 } 3495 3496 for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 3497 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 3498 3499 mutex_enter(&vd->vdev_scan_io_queue_lock); 3500 if (vd->vdev_scan_io_queue != NULL) { 3501 VERIFY(taskq_dispatch(scn->scn_taskq, 3502 scan_io_queues_run_one, vd->vdev_scan_io_queue, 3503 TQ_SLEEP) != TASKQID_INVALID); 3504 } 3505 mutex_exit(&vd->vdev_scan_io_queue_lock); 3506 } 3507 3508 /* 3509 * Wait for the queues to finish issuing their IOs for this run 3510 * before we return. There may still be IOs in flight at this 3511 * point. 3512 */ 3513 taskq_wait(scn->scn_taskq); 3514 } 3515 3516 static boolean_t 3517 dsl_scan_async_block_should_pause(dsl_scan_t *scn) 3518 { 3519 uint64_t elapsed_nanosecs; 3520 3521 if (zfs_recover) 3522 return (B_FALSE); 3523 3524 if (zfs_async_block_max_blocks != 0 && 3525 scn->scn_visited_this_txg >= zfs_async_block_max_blocks) { 3526 return (B_TRUE); 3527 } 3528 3529 if (zfs_max_async_dedup_frees != 0 && 3530 scn->scn_dedup_frees_this_txg >= zfs_max_async_dedup_frees) { 3531 return (B_TRUE); 3532 } 3533 3534 elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time; 3535 return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout || 3536 (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms && 3537 txg_sync_waiting(scn->scn_dp)) || 3538 spa_shutting_down(scn->scn_dp->dp_spa)); 3539 } 3540 3541 static int 3542 dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 3543 { 3544 dsl_scan_t *scn = arg; 3545 3546 if (!scn->scn_is_bptree || 3547 (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) { 3548 if (dsl_scan_async_block_should_pause(scn)) 3549 return (SET_ERROR(ERESTART)); 3550 } 3551 3552 zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa, 3553 dmu_tx_get_txg(tx), bp, 0)); 3554 dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD, 3555 -bp_get_dsize_sync(scn->scn_dp->dp_spa, bp), 3556 -BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx); 3557 scn->scn_visited_this_txg++; 3558 if (BP_GET_DEDUP(bp)) 3559 scn->scn_dedup_frees_this_txg++; 3560 return (0); 3561 } 3562 3563 static void 3564 dsl_scan_update_stats(dsl_scan_t *scn) 3565 { 3566 spa_t *spa = scn->scn_dp->dp_spa; 3567 uint64_t i; 3568 uint64_t seg_size_total = 0, zio_size_total = 0; 3569 uint64_t seg_count_total = 0, zio_count_total = 0; 3570 3571 for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) { 3572 vdev_t *vd = spa->spa_root_vdev->vdev_child[i]; 3573 dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue; 3574 3575 if (queue == NULL) 3576 continue; 3577 3578 seg_size_total += queue->q_total_seg_size_this_txg; 3579 zio_size_total += queue->q_total_zio_size_this_txg; 3580 seg_count_total += queue->q_segs_this_txg; 3581 zio_count_total += queue->q_zios_this_txg; 3582 } 3583 3584 if (seg_count_total == 0 || zio_count_total == 0) { 3585 scn->scn_avg_seg_size_this_txg = 0; 3586 scn->scn_avg_zio_size_this_txg = 0; 3587 scn->scn_segs_this_txg = 0; 3588 scn->scn_zios_this_txg = 0; 3589 return; 3590 } 3591 3592 scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total; 3593 scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total; 3594 scn->scn_segs_this_txg = seg_count_total; 3595 scn->scn_zios_this_txg = zio_count_total; 3596 } 3597 3598 static int 3599 bpobj_dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3600 dmu_tx_t *tx) 3601 { 3602 ASSERT(!bp_freed); 3603 return (dsl_scan_free_block_cb(arg, bp, tx)); 3604 } 3605 3606 static int 3607 dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, 3608 dmu_tx_t *tx) 3609 { 3610 ASSERT(!bp_freed); 3611 dsl_scan_t *scn = arg; 3612 const dva_t *dva = &bp->blk_dva[0]; 3613 3614 if (dsl_scan_async_block_should_pause(scn)) 3615 return (SET_ERROR(ERESTART)); 3616 3617 spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa, 3618 DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva), 3619 DVA_GET_ASIZE(dva), tx); 3620 scn->scn_visited_this_txg++; 3621 return (0); 3622 } 3623 3624 boolean_t 3625 dsl_scan_active(dsl_scan_t *scn) 3626 { 3627 spa_t *spa = scn->scn_dp->dp_spa; 3628 uint64_t used = 0, comp, uncomp; 3629 boolean_t clones_left; 3630 3631 if (spa->spa_load_state != SPA_LOAD_NONE) 3632 return (B_FALSE); 3633 if (spa_shutting_down(spa)) 3634 return (B_FALSE); 3635 if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) || 3636 (scn->scn_async_destroying && !scn->scn_async_stalled)) 3637 return (B_TRUE); 3638 3639 if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 3640 (void) bpobj_space(&scn->scn_dp->dp_free_bpobj, 3641 &used, &comp, &uncomp); 3642 } 3643 clones_left = spa_livelist_delete_check(spa); 3644 return ((used != 0) || (clones_left)); 3645 } 3646 3647 boolean_t 3648 dsl_errorscrub_active(dsl_scan_t *scn) 3649 { 3650 spa_t *spa = scn->scn_dp->dp_spa; 3651 if (spa->spa_load_state != SPA_LOAD_NONE) 3652 return (B_FALSE); 3653 if (spa_shutting_down(spa)) 3654 return (B_FALSE); 3655 if (dsl_errorscrubbing(scn->scn_dp)) 3656 return (B_TRUE); 3657 return (B_FALSE); 3658 } 3659 3660 static boolean_t 3661 dsl_scan_check_deferred(vdev_t *vd) 3662 { 3663 boolean_t need_resilver = B_FALSE; 3664 3665 for (int c = 0; c < vd->vdev_children; c++) { 3666 need_resilver |= 3667 dsl_scan_check_deferred(vd->vdev_child[c]); 3668 } 3669 3670 if (!vdev_is_concrete(vd) || vd->vdev_aux || 3671 !vd->vdev_ops->vdev_op_leaf) 3672 return (need_resilver); 3673 3674 if (!vd->vdev_resilver_deferred) 3675 need_resilver = B_TRUE; 3676 3677 return (need_resilver); 3678 } 3679 3680 static boolean_t 3681 dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize, 3682 uint64_t phys_birth) 3683 { 3684 vdev_t *vd; 3685 3686 vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 3687 3688 if (vd->vdev_ops == &vdev_indirect_ops) { 3689 /* 3690 * The indirect vdev can point to multiple 3691 * vdevs. For simplicity, always create 3692 * the resilver zio_t. zio_vdev_io_start() 3693 * will bypass the child resilver i/o's if 3694 * they are on vdevs that don't have DTL's. 3695 */ 3696 return (B_TRUE); 3697 } 3698 3699 if (DVA_GET_GANG(dva)) { 3700 /* 3701 * Gang members may be spread across multiple 3702 * vdevs, so the best estimate we have is the 3703 * scrub range, which has already been checked. 3704 * XXX -- it would be better to change our 3705 * allocation policy to ensure that all 3706 * gang members reside on the same vdev. 3707 */ 3708 return (B_TRUE); 3709 } 3710 3711 /* 3712 * Check if the top-level vdev must resilver this offset. 3713 * When the offset does not intersect with a dirty leaf DTL 3714 * then it may be possible to skip the resilver IO. The psize 3715 * is provided instead of asize to simplify the check for RAIDZ. 3716 */ 3717 if (!vdev_dtl_need_resilver(vd, dva, psize, phys_birth)) 3718 return (B_FALSE); 3719 3720 /* 3721 * Check that this top-level vdev has a device under it which 3722 * is resilvering and is not deferred. 3723 */ 3724 if (!dsl_scan_check_deferred(vd)) 3725 return (B_FALSE); 3726 3727 return (B_TRUE); 3728 } 3729 3730 static int 3731 dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx) 3732 { 3733 dsl_scan_t *scn = dp->dp_scan; 3734 spa_t *spa = dp->dp_spa; 3735 int err = 0; 3736 3737 if (spa_suspend_async_destroy(spa)) 3738 return (0); 3739 3740 if (zfs_free_bpobj_enabled && 3741 spa_version(spa) >= SPA_VERSION_DEADLISTS) { 3742 scn->scn_is_bptree = B_FALSE; 3743 scn->scn_async_block_min_time_ms = zfs_free_min_time_ms; 3744 scn->scn_zio_root = zio_root(spa, NULL, 3745 NULL, ZIO_FLAG_MUSTSUCCEED); 3746 err = bpobj_iterate(&dp->dp_free_bpobj, 3747 bpobj_dsl_scan_free_block_cb, scn, tx); 3748 VERIFY0(zio_wait(scn->scn_zio_root)); 3749 scn->scn_zio_root = NULL; 3750 3751 if (err != 0 && err != ERESTART) 3752 zfs_panic_recover("error %u from bpobj_iterate()", err); 3753 } 3754 3755 if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) { 3756 ASSERT(scn->scn_async_destroying); 3757 scn->scn_is_bptree = B_TRUE; 3758 scn->scn_zio_root = zio_root(spa, NULL, 3759 NULL, ZIO_FLAG_MUSTSUCCEED); 3760 err = bptree_iterate(dp->dp_meta_objset, 3761 dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx); 3762 VERIFY0(zio_wait(scn->scn_zio_root)); 3763 scn->scn_zio_root = NULL; 3764 3765 if (err == EIO || err == ECKSUM) { 3766 err = 0; 3767 } else if (err != 0 && err != ERESTART) { 3768 zfs_panic_recover("error %u from " 3769 "traverse_dataset_destroyed()", err); 3770 } 3771 3772 if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) { 3773 /* finished; deactivate async destroy feature */ 3774 spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx); 3775 ASSERT(!spa_feature_is_active(spa, 3776 SPA_FEATURE_ASYNC_DESTROY)); 3777 VERIFY0(zap_remove(dp->dp_meta_objset, 3778 DMU_POOL_DIRECTORY_OBJECT, 3779 DMU_POOL_BPTREE_OBJ, tx)); 3780 VERIFY0(bptree_free(dp->dp_meta_objset, 3781 dp->dp_bptree_obj, tx)); 3782 dp->dp_bptree_obj = 0; 3783 scn->scn_async_destroying = B_FALSE; 3784 scn->scn_async_stalled = B_FALSE; 3785 } else { 3786 /* 3787 * If we didn't make progress, mark the async 3788 * destroy as stalled, so that we will not initiate 3789 * a spa_sync() on its behalf. Note that we only 3790 * check this if we are not finished, because if the 3791 * bptree had no blocks for us to visit, we can 3792 * finish without "making progress". 3793 */ 3794 scn->scn_async_stalled = 3795 (scn->scn_visited_this_txg == 0); 3796 } 3797 } 3798 if (scn->scn_visited_this_txg) { 3799 zfs_dbgmsg("freed %llu blocks in %llums from " 3800 "free_bpobj/bptree on %s in txg %llu; err=%u", 3801 (longlong_t)scn->scn_visited_this_txg, 3802 (longlong_t) 3803 NSEC2MSEC(gethrtime() - scn->scn_sync_start_time), 3804 spa->spa_name, (longlong_t)tx->tx_txg, err); 3805 scn->scn_visited_this_txg = 0; 3806 scn->scn_dedup_frees_this_txg = 0; 3807 3808 /* 3809 * Write out changes to the DDT and the BRT that may be required 3810 * as a result of the blocks freed. This ensures that the DDT 3811 * and the BRT are clean when a scrub/resilver runs. 3812 */ 3813 ddt_sync(spa, tx->tx_txg); 3814 brt_sync(spa, tx->tx_txg); 3815 } 3816 if (err != 0) 3817 return (err); 3818 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3819 zfs_free_leak_on_eio && 3820 (dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 || 3821 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 || 3822 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) { 3823 /* 3824 * We have finished background destroying, but there is still 3825 * some space left in the dp_free_dir. Transfer this leaked 3826 * space to the dp_leak_dir. 3827 */ 3828 if (dp->dp_leak_dir == NULL) { 3829 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 3830 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 3831 LEAK_DIR_NAME, tx); 3832 VERIFY0(dsl_pool_open_special_dir(dp, 3833 LEAK_DIR_NAME, &dp->dp_leak_dir)); 3834 rrw_exit(&dp->dp_config_rwlock, FTAG); 3835 } 3836 dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD, 3837 dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3838 dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3839 dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3840 dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD, 3841 -dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes, 3842 -dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes, 3843 -dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx); 3844 } 3845 3846 if (dp->dp_free_dir != NULL && !scn->scn_async_destroying && 3847 !spa_livelist_delete_check(spa)) { 3848 /* finished; verify that space accounting went to zero */ 3849 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes); 3850 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes); 3851 ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes); 3852 } 3853 3854 spa_notify_waiters(spa); 3855 3856 EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj), 3857 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 3858 DMU_POOL_OBSOLETE_BPOBJ)); 3859 if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) { 3860 ASSERT(spa_feature_is_active(dp->dp_spa, 3861 SPA_FEATURE_OBSOLETE_COUNTS)); 3862 3863 scn->scn_is_bptree = B_FALSE; 3864 scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms; 3865 err = bpobj_iterate(&dp->dp_obsolete_bpobj, 3866 dsl_scan_obsolete_block_cb, scn, tx); 3867 if (err != 0 && err != ERESTART) 3868 zfs_panic_recover("error %u from bpobj_iterate()", err); 3869 3870 if (bpobj_is_empty(&dp->dp_obsolete_bpobj)) 3871 dsl_pool_destroy_obsolete_bpobj(dp, tx); 3872 } 3873 return (0); 3874 } 3875 3876 static void 3877 name_to_bookmark(char *buf, zbookmark_phys_t *zb) 3878 { 3879 zb->zb_objset = zfs_strtonum(buf, &buf); 3880 ASSERT(*buf == ':'); 3881 zb->zb_object = zfs_strtonum(buf + 1, &buf); 3882 ASSERT(*buf == ':'); 3883 zb->zb_level = (int)zfs_strtonum(buf + 1, &buf); 3884 ASSERT(*buf == ':'); 3885 zb->zb_blkid = zfs_strtonum(buf + 1, &buf); 3886 ASSERT(*buf == '\0'); 3887 } 3888 3889 static void 3890 name_to_object(char *buf, uint64_t *obj) 3891 { 3892 *obj = zfs_strtonum(buf, &buf); 3893 ASSERT(*buf == '\0'); 3894 } 3895 3896 static void 3897 read_by_block_level(dsl_scan_t *scn, zbookmark_phys_t zb) 3898 { 3899 dsl_pool_t *dp = scn->scn_dp; 3900 dsl_dataset_t *ds; 3901 objset_t *os; 3902 if (dsl_dataset_hold_obj(dp, zb.zb_objset, FTAG, &ds) != 0) 3903 return; 3904 3905 if (dmu_objset_from_ds(ds, &os) != 0) { 3906 dsl_dataset_rele(ds, FTAG); 3907 return; 3908 } 3909 3910 /* 3911 * If the key is not loaded dbuf_dnode_findbp() will error out with 3912 * EACCES. However in that case dnode_hold() will eventually call 3913 * dbuf_read()->zio_wait() which may call spa_log_error(). This will 3914 * lead to a deadlock due to us holding the mutex spa_errlist_lock. 3915 * Avoid this by checking here if the keys are loaded, if not return. 3916 * If the keys are not loaded the head_errlog feature is meaningless 3917 * as we cannot figure out the birth txg of the block pointer. 3918 */ 3919 if (dsl_dataset_get_keystatus(ds->ds_dir) == 3920 ZFS_KEYSTATUS_UNAVAILABLE) { 3921 dsl_dataset_rele(ds, FTAG); 3922 return; 3923 } 3924 3925 dnode_t *dn; 3926 blkptr_t bp; 3927 3928 if (dnode_hold(os, zb.zb_object, FTAG, &dn) != 0) { 3929 dsl_dataset_rele(ds, FTAG); 3930 return; 3931 } 3932 3933 rw_enter(&dn->dn_struct_rwlock, RW_READER); 3934 int error = dbuf_dnode_findbp(dn, zb.zb_level, zb.zb_blkid, &bp, NULL, 3935 NULL); 3936 3937 if (error) { 3938 rw_exit(&dn->dn_struct_rwlock); 3939 dnode_rele(dn, FTAG); 3940 dsl_dataset_rele(ds, FTAG); 3941 return; 3942 } 3943 3944 if (!error && BP_IS_HOLE(&bp)) { 3945 rw_exit(&dn->dn_struct_rwlock); 3946 dnode_rele(dn, FTAG); 3947 dsl_dataset_rele(ds, FTAG); 3948 return; 3949 } 3950 3951 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | 3952 ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB; 3953 3954 /* If it's an intent log block, failure is expected. */ 3955 if (zb.zb_level == ZB_ZIL_LEVEL) 3956 zio_flags |= ZIO_FLAG_SPECULATIVE; 3957 3958 ASSERT(!BP_IS_EMBEDDED(&bp)); 3959 scan_exec_io(dp, &bp, zio_flags, &zb, NULL); 3960 rw_exit(&dn->dn_struct_rwlock); 3961 dnode_rele(dn, FTAG); 3962 dsl_dataset_rele(ds, FTAG); 3963 } 3964 3965 /* 3966 * We keep track of the scrubbed error blocks in "count". This will be used 3967 * when deciding whether we exceeded zfs_scrub_error_blocks_per_txg. This 3968 * function is modelled after check_filesystem(). 3969 */ 3970 static int 3971 scrub_filesystem(spa_t *spa, uint64_t fs, zbookmark_err_phys_t *zep, 3972 int *count) 3973 { 3974 dsl_dataset_t *ds; 3975 dsl_pool_t *dp = spa->spa_dsl_pool; 3976 dsl_scan_t *scn = dp->dp_scan; 3977 3978 int error = dsl_dataset_hold_obj(dp, fs, FTAG, &ds); 3979 if (error != 0) 3980 return (error); 3981 3982 uint64_t latest_txg; 3983 uint64_t txg_to_consider = spa->spa_syncing_txg; 3984 boolean_t check_snapshot = B_TRUE; 3985 3986 error = find_birth_txg(ds, zep, &latest_txg); 3987 3988 /* 3989 * If find_birth_txg() errors out, then err on the side of caution and 3990 * proceed. In worst case scenario scrub all objects. If zep->zb_birth 3991 * is 0 (e.g. in case of encryption with unloaded keys) also proceed to 3992 * scrub all objects. 3993 */ 3994 if (error == 0 && zep->zb_birth == latest_txg) { 3995 /* Block neither free nor re written. */ 3996 zbookmark_phys_t zb; 3997 zep_to_zb(fs, zep, &zb); 3998 scn->scn_zio_root = zio_root(spa, NULL, NULL, 3999 ZIO_FLAG_CANFAIL); 4000 /* We have already acquired the config lock for spa */ 4001 read_by_block_level(scn, zb); 4002 4003 (void) zio_wait(scn->scn_zio_root); 4004 scn->scn_zio_root = NULL; 4005 4006 scn->errorscrub_phys.dep_examined++; 4007 scn->errorscrub_phys.dep_to_examine--; 4008 (*count)++; 4009 if ((*count) == zfs_scrub_error_blocks_per_txg || 4010 dsl_error_scrub_check_suspend(scn, &zb)) { 4011 dsl_dataset_rele(ds, FTAG); 4012 return (SET_ERROR(EFAULT)); 4013 } 4014 4015 check_snapshot = B_FALSE; 4016 } else if (error == 0) { 4017 txg_to_consider = latest_txg; 4018 } 4019 4020 /* 4021 * Retrieve the number of snapshots if the dataset is not a snapshot. 4022 */ 4023 uint64_t snap_count = 0; 4024 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) { 4025 4026 error = zap_count(spa->spa_meta_objset, 4027 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count); 4028 4029 if (error != 0) { 4030 dsl_dataset_rele(ds, FTAG); 4031 return (error); 4032 } 4033 } 4034 4035 if (snap_count == 0) { 4036 /* Filesystem without snapshots. */ 4037 dsl_dataset_rele(ds, FTAG); 4038 return (0); 4039 } 4040 4041 uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 4042 uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 4043 4044 dsl_dataset_rele(ds, FTAG); 4045 4046 /* Check only snapshots created from this file system. */ 4047 while (snap_obj != 0 && zep->zb_birth < snap_obj_txg && 4048 snap_obj_txg <= txg_to_consider) { 4049 4050 error = dsl_dataset_hold_obj(dp, snap_obj, FTAG, &ds); 4051 if (error != 0) 4052 return (error); 4053 4054 if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != fs) { 4055 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 4056 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 4057 dsl_dataset_rele(ds, FTAG); 4058 continue; 4059 } 4060 4061 boolean_t affected = B_TRUE; 4062 if (check_snapshot) { 4063 uint64_t blk_txg; 4064 error = find_birth_txg(ds, zep, &blk_txg); 4065 4066 /* 4067 * Scrub the snapshot also when zb_birth == 0 or when 4068 * find_birth_txg() returns an error. 4069 */ 4070 affected = (error == 0 && zep->zb_birth == blk_txg) || 4071 (error != 0) || (zep->zb_birth == 0); 4072 } 4073 4074 /* Scrub snapshots. */ 4075 if (affected) { 4076 zbookmark_phys_t zb; 4077 zep_to_zb(snap_obj, zep, &zb); 4078 scn->scn_zio_root = zio_root(spa, NULL, NULL, 4079 ZIO_FLAG_CANFAIL); 4080 /* We have already acquired the config lock for spa */ 4081 read_by_block_level(scn, zb); 4082 4083 (void) zio_wait(scn->scn_zio_root); 4084 scn->scn_zio_root = NULL; 4085 4086 scn->errorscrub_phys.dep_examined++; 4087 scn->errorscrub_phys.dep_to_examine--; 4088 (*count)++; 4089 if ((*count) == zfs_scrub_error_blocks_per_txg || 4090 dsl_error_scrub_check_suspend(scn, &zb)) { 4091 dsl_dataset_rele(ds, FTAG); 4092 return (EFAULT); 4093 } 4094 } 4095 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg; 4096 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 4097 dsl_dataset_rele(ds, FTAG); 4098 } 4099 return (0); 4100 } 4101 4102 void 4103 dsl_errorscrub_sync(dsl_pool_t *dp, dmu_tx_t *tx) 4104 { 4105 spa_t *spa = dp->dp_spa; 4106 dsl_scan_t *scn = dp->dp_scan; 4107 4108 /* 4109 * Only process scans in sync pass 1. 4110 */ 4111 4112 if (spa_sync_pass(spa) > 1) 4113 return; 4114 4115 /* 4116 * If the spa is shutting down, then stop scanning. This will 4117 * ensure that the scan does not dirty any new data during the 4118 * shutdown phase. 4119 */ 4120 if (spa_shutting_down(spa)) 4121 return; 4122 4123 if (!dsl_errorscrub_active(scn) || dsl_errorscrub_is_paused(scn)) { 4124 return; 4125 } 4126 4127 if (dsl_scan_resilvering(scn->scn_dp)) { 4128 /* cancel the error scrub if resilver started */ 4129 dsl_scan_cancel(scn->scn_dp); 4130 return; 4131 } 4132 4133 spa->spa_scrub_active = B_TRUE; 4134 scn->scn_sync_start_time = gethrtime(); 4135 4136 /* 4137 * zfs_scan_suspend_progress can be set to disable scrub progress. 4138 * See more detailed comment in dsl_scan_sync(). 4139 */ 4140 if (zfs_scan_suspend_progress) { 4141 uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; 4142 int mintime = zfs_scrub_min_time_ms; 4143 4144 while (zfs_scan_suspend_progress && 4145 !txg_sync_waiting(scn->scn_dp) && 4146 !spa_shutting_down(scn->scn_dp->dp_spa) && 4147 NSEC2MSEC(scan_time_ns) < mintime) { 4148 delay(hz); 4149 scan_time_ns = gethrtime() - scn->scn_sync_start_time; 4150 } 4151 return; 4152 } 4153 4154 int i = 0; 4155 zap_attribute_t *za; 4156 zbookmark_phys_t *zb; 4157 boolean_t limit_exceeded = B_FALSE; 4158 4159 za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP); 4160 zb = kmem_zalloc(sizeof (zbookmark_phys_t), KM_SLEEP); 4161 4162 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) { 4163 for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0; 4164 zap_cursor_advance(&scn->errorscrub_cursor)) { 4165 name_to_bookmark(za->za_name, zb); 4166 4167 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 4168 NULL, ZIO_FLAG_CANFAIL); 4169 dsl_pool_config_enter(dp, FTAG); 4170 read_by_block_level(scn, *zb); 4171 dsl_pool_config_exit(dp, FTAG); 4172 4173 (void) zio_wait(scn->scn_zio_root); 4174 scn->scn_zio_root = NULL; 4175 4176 scn->errorscrub_phys.dep_examined += 1; 4177 scn->errorscrub_phys.dep_to_examine -= 1; 4178 i++; 4179 if (i == zfs_scrub_error_blocks_per_txg || 4180 dsl_error_scrub_check_suspend(scn, zb)) { 4181 limit_exceeded = B_TRUE; 4182 break; 4183 } 4184 } 4185 4186 if (!limit_exceeded) 4187 dsl_errorscrub_done(scn, B_TRUE, tx); 4188 4189 dsl_errorscrub_sync_state(scn, tx); 4190 kmem_free(za, sizeof (*za)); 4191 kmem_free(zb, sizeof (*zb)); 4192 return; 4193 } 4194 4195 int error = 0; 4196 for (; zap_cursor_retrieve(&scn->errorscrub_cursor, za) == 0; 4197 zap_cursor_advance(&scn->errorscrub_cursor)) { 4198 4199 zap_cursor_t *head_ds_cursor; 4200 zap_attribute_t *head_ds_attr; 4201 zbookmark_err_phys_t head_ds_block; 4202 4203 head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP); 4204 head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP); 4205 4206 uint64_t head_ds_err_obj = za->za_first_integer; 4207 uint64_t head_ds; 4208 name_to_object(za->za_name, &head_ds); 4209 boolean_t config_held = B_FALSE; 4210 uint64_t top_affected_fs; 4211 4212 for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset, 4213 head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor, 4214 head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) { 4215 4216 name_to_errphys(head_ds_attr->za_name, &head_ds_block); 4217 4218 /* 4219 * In case we are called from spa_sync the pool 4220 * config is already held. 4221 */ 4222 if (!dsl_pool_config_held(dp)) { 4223 dsl_pool_config_enter(dp, FTAG); 4224 config_held = B_TRUE; 4225 } 4226 4227 error = find_top_affected_fs(spa, 4228 head_ds, &head_ds_block, &top_affected_fs); 4229 if (error) 4230 break; 4231 4232 error = scrub_filesystem(spa, top_affected_fs, 4233 &head_ds_block, &i); 4234 4235 if (error == SET_ERROR(EFAULT)) { 4236 limit_exceeded = B_TRUE; 4237 break; 4238 } 4239 } 4240 4241 zap_cursor_fini(head_ds_cursor); 4242 kmem_free(head_ds_cursor, sizeof (*head_ds_cursor)); 4243 kmem_free(head_ds_attr, sizeof (*head_ds_attr)); 4244 4245 if (config_held) 4246 dsl_pool_config_exit(dp, FTAG); 4247 } 4248 4249 kmem_free(za, sizeof (*za)); 4250 kmem_free(zb, sizeof (*zb)); 4251 if (!limit_exceeded) 4252 dsl_errorscrub_done(scn, B_TRUE, tx); 4253 4254 dsl_errorscrub_sync_state(scn, tx); 4255 } 4256 4257 /* 4258 * This is the primary entry point for scans that is called from syncing 4259 * context. Scans must happen entirely during syncing context so that we 4260 * can guarantee that blocks we are currently scanning will not change out 4261 * from under us. While a scan is active, this function controls how quickly 4262 * transaction groups proceed, instead of the normal handling provided by 4263 * txg_sync_thread(). 4264 */ 4265 void 4266 dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) 4267 { 4268 int err = 0; 4269 dsl_scan_t *scn = dp->dp_scan; 4270 spa_t *spa = dp->dp_spa; 4271 state_sync_type_t sync_type = SYNC_OPTIONAL; 4272 4273 if (spa->spa_resilver_deferred && 4274 !spa_feature_is_active(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) 4275 spa_feature_incr(spa, SPA_FEATURE_RESILVER_DEFER, tx); 4276 4277 /* 4278 * Check for scn_restart_txg before checking spa_load_state, so 4279 * that we can restart an old-style scan while the pool is being 4280 * imported (see dsl_scan_init). We also restart scans if there 4281 * is a deferred resilver and the user has manually disabled 4282 * deferred resilvers via the tunable. 4283 */ 4284 if (dsl_scan_restarting(scn, tx) || 4285 (spa->spa_resilver_deferred && zfs_resilver_disable_defer)) { 4286 pool_scan_func_t func = POOL_SCAN_SCRUB; 4287 dsl_scan_done(scn, B_FALSE, tx); 4288 if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) 4289 func = POOL_SCAN_RESILVER; 4290 zfs_dbgmsg("restarting scan func=%u on %s txg=%llu", 4291 func, dp->dp_spa->spa_name, (longlong_t)tx->tx_txg); 4292 dsl_scan_setup_sync(&func, tx); 4293 } 4294 4295 /* 4296 * Only process scans in sync pass 1. 4297 */ 4298 if (spa_sync_pass(spa) > 1) 4299 return; 4300 4301 /* 4302 * If the spa is shutting down, then stop scanning. This will 4303 * ensure that the scan does not dirty any new data during the 4304 * shutdown phase. 4305 */ 4306 if (spa_shutting_down(spa)) 4307 return; 4308 4309 /* 4310 * If the scan is inactive due to a stalled async destroy, try again. 4311 */ 4312 if (!scn->scn_async_stalled && !dsl_scan_active(scn)) 4313 return; 4314 4315 /* reset scan statistics */ 4316 scn->scn_visited_this_txg = 0; 4317 scn->scn_dedup_frees_this_txg = 0; 4318 scn->scn_holes_this_txg = 0; 4319 scn->scn_lt_min_this_txg = 0; 4320 scn->scn_gt_max_this_txg = 0; 4321 scn->scn_ddt_contained_this_txg = 0; 4322 scn->scn_objsets_visited_this_txg = 0; 4323 scn->scn_avg_seg_size_this_txg = 0; 4324 scn->scn_segs_this_txg = 0; 4325 scn->scn_avg_zio_size_this_txg = 0; 4326 scn->scn_zios_this_txg = 0; 4327 scn->scn_suspending = B_FALSE; 4328 scn->scn_sync_start_time = gethrtime(); 4329 spa->spa_scrub_active = B_TRUE; 4330 4331 /* 4332 * First process the async destroys. If we suspend, don't do 4333 * any scrubbing or resilvering. This ensures that there are no 4334 * async destroys while we are scanning, so the scan code doesn't 4335 * have to worry about traversing it. It is also faster to free the 4336 * blocks than to scrub them. 4337 */ 4338 err = dsl_process_async_destroys(dp, tx); 4339 if (err != 0) 4340 return; 4341 4342 if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn)) 4343 return; 4344 4345 /* 4346 * Wait a few txgs after importing to begin scanning so that 4347 * we can get the pool imported quickly. 4348 */ 4349 if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS) 4350 return; 4351 4352 /* 4353 * zfs_scan_suspend_progress can be set to disable scan progress. 4354 * We don't want to spin the txg_sync thread, so we add a delay 4355 * here to simulate the time spent doing a scan. This is mostly 4356 * useful for testing and debugging. 4357 */ 4358 if (zfs_scan_suspend_progress) { 4359 uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; 4360 uint_t mintime = (scn->scn_phys.scn_func == 4361 POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms : 4362 zfs_scrub_min_time_ms; 4363 4364 while (zfs_scan_suspend_progress && 4365 !txg_sync_waiting(scn->scn_dp) && 4366 !spa_shutting_down(scn->scn_dp->dp_spa) && 4367 NSEC2MSEC(scan_time_ns) < mintime) { 4368 delay(hz); 4369 scan_time_ns = gethrtime() - scn->scn_sync_start_time; 4370 } 4371 return; 4372 } 4373 4374 /* 4375 * Disabled by default, set zfs_scan_report_txgs to report 4376 * average performance over the last zfs_scan_report_txgs TXGs. 4377 */ 4378 if (zfs_scan_report_txgs != 0 && 4379 tx->tx_txg % zfs_scan_report_txgs == 0) { 4380 scn->scn_issued_before_pass += spa->spa_scan_pass_issued; 4381 spa_scan_stat_init(spa); 4382 } 4383 4384 /* 4385 * It is possible to switch from unsorted to sorted at any time, 4386 * but afterwards the scan will remain sorted unless reloaded from 4387 * a checkpoint after a reboot. 4388 */ 4389 if (!zfs_scan_legacy) { 4390 scn->scn_is_sorted = B_TRUE; 4391 if (scn->scn_last_checkpoint == 0) 4392 scn->scn_last_checkpoint = ddi_get_lbolt(); 4393 } 4394 4395 /* 4396 * For sorted scans, determine what kind of work we will be doing 4397 * this txg based on our memory limitations and whether or not we 4398 * need to perform a checkpoint. 4399 */ 4400 if (scn->scn_is_sorted) { 4401 /* 4402 * If we are over our checkpoint interval, set scn_clearing 4403 * so that we can begin checkpointing immediately. The 4404 * checkpoint allows us to save a consistent bookmark 4405 * representing how much data we have scrubbed so far. 4406 * Otherwise, use the memory limit to determine if we should 4407 * scan for metadata or start issue scrub IOs. We accumulate 4408 * metadata until we hit our hard memory limit at which point 4409 * we issue scrub IOs until we are at our soft memory limit. 4410 */ 4411 if (scn->scn_checkpointing || 4412 ddi_get_lbolt() - scn->scn_last_checkpoint > 4413 SEC_TO_TICK(zfs_scan_checkpoint_intval)) { 4414 if (!scn->scn_checkpointing) 4415 zfs_dbgmsg("begin scan checkpoint for %s", 4416 spa->spa_name); 4417 4418 scn->scn_checkpointing = B_TRUE; 4419 scn->scn_clearing = B_TRUE; 4420 } else { 4421 boolean_t should_clear = dsl_scan_should_clear(scn); 4422 if (should_clear && !scn->scn_clearing) { 4423 zfs_dbgmsg("begin scan clearing for %s", 4424 spa->spa_name); 4425 scn->scn_clearing = B_TRUE; 4426 } else if (!should_clear && scn->scn_clearing) { 4427 zfs_dbgmsg("finish scan clearing for %s", 4428 spa->spa_name); 4429 scn->scn_clearing = B_FALSE; 4430 } 4431 } 4432 } else { 4433 ASSERT0(scn->scn_checkpointing); 4434 ASSERT0(scn->scn_clearing); 4435 } 4436 4437 if (!scn->scn_clearing && scn->scn_done_txg == 0) { 4438 /* Need to scan metadata for more blocks to scrub */ 4439 dsl_scan_phys_t *scnp = &scn->scn_phys; 4440 taskqid_t prefetch_tqid; 4441 4442 /* 4443 * Calculate the max number of in-flight bytes for pool-wide 4444 * scanning operations (minimum 1MB, maximum 1/4 of arc_c_max). 4445 * Limits for the issuing phase are done per top-level vdev and 4446 * are handled separately. 4447 */ 4448 scn->scn_maxinflight_bytes = MIN(arc_c_max / 4, MAX(1ULL << 20, 4449 zfs_scan_vdev_limit * dsl_scan_count_data_disks(spa))); 4450 4451 if (scnp->scn_ddt_bookmark.ddb_class <= 4452 scnp->scn_ddt_class_max) { 4453 ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark)); 4454 zfs_dbgmsg("doing scan sync for %s txg %llu; " 4455 "ddt bm=%llu/%llu/%llu/%llx", 4456 spa->spa_name, 4457 (longlong_t)tx->tx_txg, 4458 (longlong_t)scnp->scn_ddt_bookmark.ddb_class, 4459 (longlong_t)scnp->scn_ddt_bookmark.ddb_type, 4460 (longlong_t)scnp->scn_ddt_bookmark.ddb_checksum, 4461 (longlong_t)scnp->scn_ddt_bookmark.ddb_cursor); 4462 } else { 4463 zfs_dbgmsg("doing scan sync for %s txg %llu; " 4464 "bm=%llu/%llu/%llu/%llu", 4465 spa->spa_name, 4466 (longlong_t)tx->tx_txg, 4467 (longlong_t)scnp->scn_bookmark.zb_objset, 4468 (longlong_t)scnp->scn_bookmark.zb_object, 4469 (longlong_t)scnp->scn_bookmark.zb_level, 4470 (longlong_t)scnp->scn_bookmark.zb_blkid); 4471 } 4472 4473 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 4474 NULL, ZIO_FLAG_CANFAIL); 4475 4476 scn->scn_prefetch_stop = B_FALSE; 4477 prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq, 4478 dsl_scan_prefetch_thread, scn, TQ_SLEEP); 4479 ASSERT(prefetch_tqid != TASKQID_INVALID); 4480 4481 dsl_pool_config_enter(dp, FTAG); 4482 dsl_scan_visit(scn, tx); 4483 dsl_pool_config_exit(dp, FTAG); 4484 4485 mutex_enter(&dp->dp_spa->spa_scrub_lock); 4486 scn->scn_prefetch_stop = B_TRUE; 4487 cv_broadcast(&spa->spa_scrub_io_cv); 4488 mutex_exit(&dp->dp_spa->spa_scrub_lock); 4489 4490 taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid); 4491 (void) zio_wait(scn->scn_zio_root); 4492 scn->scn_zio_root = NULL; 4493 4494 zfs_dbgmsg("scan visited %llu blocks of %s in %llums " 4495 "(%llu os's, %llu holes, %llu < mintxg, " 4496 "%llu in ddt, %llu > maxtxg)", 4497 (longlong_t)scn->scn_visited_this_txg, 4498 spa->spa_name, 4499 (longlong_t)NSEC2MSEC(gethrtime() - 4500 scn->scn_sync_start_time), 4501 (longlong_t)scn->scn_objsets_visited_this_txg, 4502 (longlong_t)scn->scn_holes_this_txg, 4503 (longlong_t)scn->scn_lt_min_this_txg, 4504 (longlong_t)scn->scn_ddt_contained_this_txg, 4505 (longlong_t)scn->scn_gt_max_this_txg); 4506 4507 if (!scn->scn_suspending) { 4508 ASSERT0(avl_numnodes(&scn->scn_queue)); 4509 scn->scn_done_txg = tx->tx_txg + 1; 4510 if (scn->scn_is_sorted) { 4511 scn->scn_checkpointing = B_TRUE; 4512 scn->scn_clearing = B_TRUE; 4513 scn->scn_issued_before_pass += 4514 spa->spa_scan_pass_issued; 4515 spa_scan_stat_init(spa); 4516 } 4517 zfs_dbgmsg("scan complete for %s txg %llu", 4518 spa->spa_name, 4519 (longlong_t)tx->tx_txg); 4520 } 4521 } else if (scn->scn_is_sorted && scn->scn_queues_pending != 0) { 4522 ASSERT(scn->scn_clearing); 4523 4524 /* need to issue scrubbing IOs from per-vdev queues */ 4525 scn->scn_zio_root = zio_root(dp->dp_spa, NULL, 4526 NULL, ZIO_FLAG_CANFAIL); 4527 scan_io_queues_run(scn); 4528 (void) zio_wait(scn->scn_zio_root); 4529 scn->scn_zio_root = NULL; 4530 4531 /* calculate and dprintf the current memory usage */ 4532 (void) dsl_scan_should_clear(scn); 4533 dsl_scan_update_stats(scn); 4534 4535 zfs_dbgmsg("scan issued %llu blocks for %s (%llu segs) " 4536 "in %llums (avg_block_size = %llu, avg_seg_size = %llu)", 4537 (longlong_t)scn->scn_zios_this_txg, 4538 spa->spa_name, 4539 (longlong_t)scn->scn_segs_this_txg, 4540 (longlong_t)NSEC2MSEC(gethrtime() - 4541 scn->scn_sync_start_time), 4542 (longlong_t)scn->scn_avg_zio_size_this_txg, 4543 (longlong_t)scn->scn_avg_seg_size_this_txg); 4544 } else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) { 4545 /* Finished with everything. Mark the scrub as complete */ 4546 zfs_dbgmsg("scan issuing complete txg %llu for %s", 4547 (longlong_t)tx->tx_txg, 4548 spa->spa_name); 4549 ASSERT3U(scn->scn_done_txg, !=, 0); 4550 ASSERT0(spa->spa_scrub_inflight); 4551 ASSERT0(scn->scn_queues_pending); 4552 dsl_scan_done(scn, B_TRUE, tx); 4553 sync_type = SYNC_MANDATORY; 4554 } 4555 4556 dsl_scan_sync_state(scn, tx, sync_type); 4557 } 4558 4559 static void 4560 count_block_issued(spa_t *spa, const blkptr_t *bp, boolean_t all) 4561 { 4562 /* 4563 * Don't count embedded bp's, since we already did the work of 4564 * scanning these when we scanned the containing block. 4565 */ 4566 if (BP_IS_EMBEDDED(bp)) 4567 return; 4568 4569 /* 4570 * Update the spa's stats on how many bytes we have issued. 4571 * Sequential scrubs create a zio for each DVA of the bp. Each 4572 * of these will include all DVAs for repair purposes, but the 4573 * zio code will only try the first one unless there is an issue. 4574 * Therefore, we should only count the first DVA for these IOs. 4575 */ 4576 atomic_add_64(&spa->spa_scan_pass_issued, 4577 all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0])); 4578 } 4579 4580 static void 4581 count_block_skipped(dsl_scan_t *scn, const blkptr_t *bp, boolean_t all) 4582 { 4583 if (BP_IS_EMBEDDED(bp)) 4584 return; 4585 atomic_add_64(&scn->scn_phys.scn_skipped, 4586 all ? BP_GET_ASIZE(bp) : DVA_GET_ASIZE(&bp->blk_dva[0])); 4587 } 4588 4589 static void 4590 count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp) 4591 { 4592 /* 4593 * If we resume after a reboot, zab will be NULL; don't record 4594 * incomplete stats in that case. 4595 */ 4596 if (zab == NULL) 4597 return; 4598 4599 for (int i = 0; i < 4; i++) { 4600 int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS; 4601 int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL; 4602 4603 if (t & DMU_OT_NEWTYPE) 4604 t = DMU_OT_OTHER; 4605 zfs_blkstat_t *zb = &zab->zab_type[l][t]; 4606 int equal; 4607 4608 zb->zb_count++; 4609 zb->zb_asize += BP_GET_ASIZE(bp); 4610 zb->zb_lsize += BP_GET_LSIZE(bp); 4611 zb->zb_psize += BP_GET_PSIZE(bp); 4612 zb->zb_gangs += BP_COUNT_GANG(bp); 4613 4614 switch (BP_GET_NDVAS(bp)) { 4615 case 2: 4616 if (DVA_GET_VDEV(&bp->blk_dva[0]) == 4617 DVA_GET_VDEV(&bp->blk_dva[1])) 4618 zb->zb_ditto_2_of_2_samevdev++; 4619 break; 4620 case 3: 4621 equal = (DVA_GET_VDEV(&bp->blk_dva[0]) == 4622 DVA_GET_VDEV(&bp->blk_dva[1])) + 4623 (DVA_GET_VDEV(&bp->blk_dva[0]) == 4624 DVA_GET_VDEV(&bp->blk_dva[2])) + 4625 (DVA_GET_VDEV(&bp->blk_dva[1]) == 4626 DVA_GET_VDEV(&bp->blk_dva[2])); 4627 if (equal == 1) 4628 zb->zb_ditto_2_of_3_samevdev++; 4629 else if (equal == 3) 4630 zb->zb_ditto_3_of_3_samevdev++; 4631 break; 4632 } 4633 } 4634 } 4635 4636 static void 4637 scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio) 4638 { 4639 avl_index_t idx; 4640 dsl_scan_t *scn = queue->q_scn; 4641 4642 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 4643 4644 if (unlikely(avl_is_empty(&queue->q_sios_by_addr))) 4645 atomic_add_64(&scn->scn_queues_pending, 1); 4646 if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) { 4647 /* block is already scheduled for reading */ 4648 sio_free(sio); 4649 return; 4650 } 4651 avl_insert(&queue->q_sios_by_addr, sio, idx); 4652 queue->q_sio_memused += SIO_GET_MUSED(sio); 4653 range_tree_add(queue->q_exts_by_addr, SIO_GET_OFFSET(sio), 4654 SIO_GET_ASIZE(sio)); 4655 } 4656 4657 /* 4658 * Given all the info we got from our metadata scanning process, we 4659 * construct a scan_io_t and insert it into the scan sorting queue. The 4660 * I/O must already be suitable for us to process. This is controlled 4661 * by dsl_scan_enqueue(). 4662 */ 4663 static void 4664 scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i, 4665 int zio_flags, const zbookmark_phys_t *zb) 4666 { 4667 scan_io_t *sio = sio_alloc(BP_GET_NDVAS(bp)); 4668 4669 ASSERT0(BP_IS_GANG(bp)); 4670 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 4671 4672 bp2sio(bp, sio, dva_i); 4673 sio->sio_flags = zio_flags; 4674 sio->sio_zb = *zb; 4675 4676 queue->q_last_ext_addr = -1; 4677 scan_io_queue_insert_impl(queue, sio); 4678 } 4679 4680 /* 4681 * Given a set of I/O parameters as discovered by the metadata traversal 4682 * process, attempts to place the I/O into the sorted queues (if allowed), 4683 * or immediately executes the I/O. 4684 */ 4685 static void 4686 dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 4687 const zbookmark_phys_t *zb) 4688 { 4689 spa_t *spa = dp->dp_spa; 4690 4691 ASSERT(!BP_IS_EMBEDDED(bp)); 4692 4693 /* 4694 * Gang blocks are hard to issue sequentially, so we just issue them 4695 * here immediately instead of queuing them. 4696 */ 4697 if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) { 4698 scan_exec_io(dp, bp, zio_flags, zb, NULL); 4699 return; 4700 } 4701 4702 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 4703 dva_t dva; 4704 vdev_t *vdev; 4705 4706 dva = bp->blk_dva[i]; 4707 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva)); 4708 ASSERT(vdev != NULL); 4709 4710 mutex_enter(&vdev->vdev_scan_io_queue_lock); 4711 if (vdev->vdev_scan_io_queue == NULL) 4712 vdev->vdev_scan_io_queue = scan_io_queue_create(vdev); 4713 ASSERT(dp->dp_scan != NULL); 4714 scan_io_queue_insert(vdev->vdev_scan_io_queue, bp, 4715 i, zio_flags, zb); 4716 mutex_exit(&vdev->vdev_scan_io_queue_lock); 4717 } 4718 } 4719 4720 static int 4721 dsl_scan_scrub_cb(dsl_pool_t *dp, 4722 const blkptr_t *bp, const zbookmark_phys_t *zb) 4723 { 4724 dsl_scan_t *scn = dp->dp_scan; 4725 spa_t *spa = dp->dp_spa; 4726 uint64_t phys_birth = BP_GET_BIRTH(bp); 4727 size_t psize = BP_GET_PSIZE(bp); 4728 boolean_t needs_io = B_FALSE; 4729 int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; 4730 4731 count_block(dp->dp_blkstats, bp); 4732 if (phys_birth <= scn->scn_phys.scn_min_txg || 4733 phys_birth >= scn->scn_phys.scn_max_txg) { 4734 count_block_skipped(scn, bp, B_TRUE); 4735 return (0); 4736 } 4737 4738 /* Embedded BP's have phys_birth==0, so we reject them above. */ 4739 ASSERT(!BP_IS_EMBEDDED(bp)); 4740 4741 ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn)); 4742 if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) { 4743 zio_flags |= ZIO_FLAG_SCRUB; 4744 needs_io = B_TRUE; 4745 } else { 4746 ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER); 4747 zio_flags |= ZIO_FLAG_RESILVER; 4748 needs_io = B_FALSE; 4749 } 4750 4751 /* If it's an intent log block, failure is expected. */ 4752 if (zb->zb_level == ZB_ZIL_LEVEL) 4753 zio_flags |= ZIO_FLAG_SPECULATIVE; 4754 4755 for (int d = 0; d < BP_GET_NDVAS(bp); d++) { 4756 const dva_t *dva = &bp->blk_dva[d]; 4757 4758 /* 4759 * Keep track of how much data we've examined so that 4760 * zpool(8) status can make useful progress reports. 4761 */ 4762 uint64_t asize = DVA_GET_ASIZE(dva); 4763 scn->scn_phys.scn_examined += asize; 4764 spa->spa_scan_pass_exam += asize; 4765 4766 /* if it's a resilver, this may not be in the target range */ 4767 if (!needs_io) 4768 needs_io = dsl_scan_need_resilver(spa, dva, psize, 4769 phys_birth); 4770 } 4771 4772 if (needs_io && !zfs_no_scrub_io) { 4773 dsl_scan_enqueue(dp, bp, zio_flags, zb); 4774 } else { 4775 count_block_skipped(scn, bp, B_TRUE); 4776 } 4777 4778 /* do not relocate this block */ 4779 return (0); 4780 } 4781 4782 static void 4783 dsl_scan_scrub_done(zio_t *zio) 4784 { 4785 spa_t *spa = zio->io_spa; 4786 blkptr_t *bp = zio->io_bp; 4787 dsl_scan_io_queue_t *queue = zio->io_private; 4788 4789 abd_free(zio->io_abd); 4790 4791 if (queue == NULL) { 4792 mutex_enter(&spa->spa_scrub_lock); 4793 ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp)); 4794 spa->spa_scrub_inflight -= BP_GET_PSIZE(bp); 4795 cv_broadcast(&spa->spa_scrub_io_cv); 4796 mutex_exit(&spa->spa_scrub_lock); 4797 } else { 4798 mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock); 4799 ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp)); 4800 queue->q_inflight_bytes -= BP_GET_PSIZE(bp); 4801 cv_broadcast(&queue->q_zio_cv); 4802 mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock); 4803 } 4804 4805 if (zio->io_error && (zio->io_error != ECKSUM || 4806 !(zio->io_flags & ZIO_FLAG_SPECULATIVE))) { 4807 if (dsl_errorscrubbing(spa->spa_dsl_pool) && 4808 !dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) { 4809 atomic_inc_64(&spa->spa_dsl_pool->dp_scan 4810 ->errorscrub_phys.dep_errors); 4811 } else { 4812 atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys 4813 .scn_errors); 4814 } 4815 } 4816 } 4817 4818 /* 4819 * Given a scanning zio's information, executes the zio. The zio need 4820 * not necessarily be only sortable, this function simply executes the 4821 * zio, no matter what it is. The optional queue argument allows the 4822 * caller to specify that they want per top level vdev IO rate limiting 4823 * instead of the legacy global limiting. 4824 */ 4825 static void 4826 scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags, 4827 const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue) 4828 { 4829 spa_t *spa = dp->dp_spa; 4830 dsl_scan_t *scn = dp->dp_scan; 4831 size_t size = BP_GET_PSIZE(bp); 4832 abd_t *data = abd_alloc_for_io(size, B_FALSE); 4833 zio_t *pio; 4834 4835 if (queue == NULL) { 4836 ASSERT3U(scn->scn_maxinflight_bytes, >, 0); 4837 mutex_enter(&spa->spa_scrub_lock); 4838 while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes) 4839 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 4840 spa->spa_scrub_inflight += BP_GET_PSIZE(bp); 4841 mutex_exit(&spa->spa_scrub_lock); 4842 pio = scn->scn_zio_root; 4843 } else { 4844 kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock; 4845 4846 ASSERT3U(queue->q_maxinflight_bytes, >, 0); 4847 mutex_enter(q_lock); 4848 while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes) 4849 cv_wait(&queue->q_zio_cv, q_lock); 4850 queue->q_inflight_bytes += BP_GET_PSIZE(bp); 4851 pio = queue->q_zio; 4852 mutex_exit(q_lock); 4853 } 4854 4855 ASSERT(pio != NULL); 4856 count_block_issued(spa, bp, queue == NULL); 4857 zio_nowait(zio_read(pio, spa, bp, data, size, dsl_scan_scrub_done, 4858 queue, ZIO_PRIORITY_SCRUB, zio_flags, zb)); 4859 } 4860 4861 /* 4862 * This is the primary extent sorting algorithm. We balance two parameters: 4863 * 1) how many bytes of I/O are in an extent 4864 * 2) how well the extent is filled with I/O (as a fraction of its total size) 4865 * Since we allow extents to have gaps between their constituent I/Os, it's 4866 * possible to have a fairly large extent that contains the same amount of 4867 * I/O bytes than a much smaller extent, which just packs the I/O more tightly. 4868 * The algorithm sorts based on a score calculated from the extent's size, 4869 * the relative fill volume (in %) and a "fill weight" parameter that controls 4870 * the split between whether we prefer larger extents or more well populated 4871 * extents: 4872 * 4873 * SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT) 4874 * 4875 * Example: 4876 * 1) assume extsz = 64 MiB 4877 * 2) assume fill = 32 MiB (extent is half full) 4878 * 3) assume fill_weight = 3 4879 * 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100 4880 * SCORE = 32M + (50 * 3 * 32M) / 100 4881 * SCORE = 32M + (4800M / 100) 4882 * SCORE = 32M + 48M 4883 * ^ ^ 4884 * | +--- final total relative fill-based score 4885 * +--------- final total fill-based score 4886 * SCORE = 80M 4887 * 4888 * As can be seen, at fill_ratio=3, the algorithm is slightly biased towards 4889 * extents that are more completely filled (in a 3:2 ratio) vs just larger. 4890 * Note that as an optimization, we replace multiplication and division by 4891 * 100 with bitshifting by 7 (which effectively multiplies and divides by 128). 4892 * 4893 * Since we do not care if one extent is only few percent better than another, 4894 * compress the score into 6 bits via binary logarithm AKA highbit64() and 4895 * put into otherwise unused due to ashift high bits of offset. This allows 4896 * to reduce q_exts_by_size B-tree elements to only 64 bits and compare them 4897 * with single operation. Plus it makes scrubs more sequential and reduces 4898 * chances that minor extent change move it within the B-tree. 4899 */ 4900 __attribute__((always_inline)) inline 4901 static int 4902 ext_size_compare(const void *x, const void *y) 4903 { 4904 const uint64_t *a = x, *b = y; 4905 4906 return (TREE_CMP(*a, *b)); 4907 } 4908 4909 ZFS_BTREE_FIND_IN_BUF_FUNC(ext_size_find_in_buf, uint64_t, 4910 ext_size_compare) 4911 4912 static void 4913 ext_size_create(range_tree_t *rt, void *arg) 4914 { 4915 (void) rt; 4916 zfs_btree_t *size_tree = arg; 4917 4918 zfs_btree_create(size_tree, ext_size_compare, ext_size_find_in_buf, 4919 sizeof (uint64_t)); 4920 } 4921 4922 static void 4923 ext_size_destroy(range_tree_t *rt, void *arg) 4924 { 4925 (void) rt; 4926 zfs_btree_t *size_tree = arg; 4927 ASSERT0(zfs_btree_numnodes(size_tree)); 4928 4929 zfs_btree_destroy(size_tree); 4930 } 4931 4932 static uint64_t 4933 ext_size_value(range_tree_t *rt, range_seg_gap_t *rsg) 4934 { 4935 (void) rt; 4936 uint64_t size = rsg->rs_end - rsg->rs_start; 4937 uint64_t score = rsg->rs_fill + ((((rsg->rs_fill << 7) / size) * 4938 fill_weight * rsg->rs_fill) >> 7); 4939 ASSERT3U(rt->rt_shift, >=, 8); 4940 return (((uint64_t)(64 - highbit64(score)) << 56) | rsg->rs_start); 4941 } 4942 4943 static void 4944 ext_size_add(range_tree_t *rt, range_seg_t *rs, void *arg) 4945 { 4946 zfs_btree_t *size_tree = arg; 4947 ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); 4948 uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); 4949 zfs_btree_add(size_tree, &v); 4950 } 4951 4952 static void 4953 ext_size_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 4954 { 4955 zfs_btree_t *size_tree = arg; 4956 ASSERT3U(rt->rt_type, ==, RANGE_SEG_GAP); 4957 uint64_t v = ext_size_value(rt, (range_seg_gap_t *)rs); 4958 zfs_btree_remove(size_tree, &v); 4959 } 4960 4961 static void 4962 ext_size_vacate(range_tree_t *rt, void *arg) 4963 { 4964 zfs_btree_t *size_tree = arg; 4965 zfs_btree_clear(size_tree); 4966 zfs_btree_destroy(size_tree); 4967 4968 ext_size_create(rt, arg); 4969 } 4970 4971 static const range_tree_ops_t ext_size_ops = { 4972 .rtop_create = ext_size_create, 4973 .rtop_destroy = ext_size_destroy, 4974 .rtop_add = ext_size_add, 4975 .rtop_remove = ext_size_remove, 4976 .rtop_vacate = ext_size_vacate 4977 }; 4978 4979 /* 4980 * Comparator for the q_sios_by_addr tree. Sorting is simply performed 4981 * based on LBA-order (from lowest to highest). 4982 */ 4983 static int 4984 sio_addr_compare(const void *x, const void *y) 4985 { 4986 const scan_io_t *a = x, *b = y; 4987 4988 return (TREE_CMP(SIO_GET_OFFSET(a), SIO_GET_OFFSET(b))); 4989 } 4990 4991 /* IO queues are created on demand when they are needed. */ 4992 static dsl_scan_io_queue_t * 4993 scan_io_queue_create(vdev_t *vd) 4994 { 4995 dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan; 4996 dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP); 4997 4998 q->q_scn = scn; 4999 q->q_vd = vd; 5000 q->q_sio_memused = 0; 5001 q->q_last_ext_addr = -1; 5002 cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL); 5003 q->q_exts_by_addr = range_tree_create_gap(&ext_size_ops, RANGE_SEG_GAP, 5004 &q->q_exts_by_size, 0, vd->vdev_ashift, zfs_scan_max_ext_gap); 5005 avl_create(&q->q_sios_by_addr, sio_addr_compare, 5006 sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node)); 5007 5008 return (q); 5009 } 5010 5011 /* 5012 * Destroys a scan queue and all segments and scan_io_t's contained in it. 5013 * No further execution of I/O occurs, anything pending in the queue is 5014 * simply freed without being executed. 5015 */ 5016 void 5017 dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue) 5018 { 5019 dsl_scan_t *scn = queue->q_scn; 5020 scan_io_t *sio; 5021 void *cookie = NULL; 5022 5023 ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock)); 5024 5025 if (!avl_is_empty(&queue->q_sios_by_addr)) 5026 atomic_add_64(&scn->scn_queues_pending, -1); 5027 while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) != 5028 NULL) { 5029 ASSERT(range_tree_contains(queue->q_exts_by_addr, 5030 SIO_GET_OFFSET(sio), SIO_GET_ASIZE(sio))); 5031 queue->q_sio_memused -= SIO_GET_MUSED(sio); 5032 sio_free(sio); 5033 } 5034 5035 ASSERT0(queue->q_sio_memused); 5036 range_tree_vacate(queue->q_exts_by_addr, NULL, queue); 5037 range_tree_destroy(queue->q_exts_by_addr); 5038 avl_destroy(&queue->q_sios_by_addr); 5039 cv_destroy(&queue->q_zio_cv); 5040 5041 kmem_free(queue, sizeof (*queue)); 5042 } 5043 5044 /* 5045 * Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is 5046 * called on behalf of vdev_top_transfer when creating or destroying 5047 * a mirror vdev due to zpool attach/detach. 5048 */ 5049 void 5050 dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd) 5051 { 5052 mutex_enter(&svd->vdev_scan_io_queue_lock); 5053 mutex_enter(&tvd->vdev_scan_io_queue_lock); 5054 5055 VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL); 5056 tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue; 5057 svd->vdev_scan_io_queue = NULL; 5058 if (tvd->vdev_scan_io_queue != NULL) 5059 tvd->vdev_scan_io_queue->q_vd = tvd; 5060 5061 mutex_exit(&tvd->vdev_scan_io_queue_lock); 5062 mutex_exit(&svd->vdev_scan_io_queue_lock); 5063 } 5064 5065 static void 5066 scan_io_queues_destroy(dsl_scan_t *scn) 5067 { 5068 vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev; 5069 5070 for (uint64_t i = 0; i < rvd->vdev_children; i++) { 5071 vdev_t *tvd = rvd->vdev_child[i]; 5072 5073 mutex_enter(&tvd->vdev_scan_io_queue_lock); 5074 if (tvd->vdev_scan_io_queue != NULL) 5075 dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue); 5076 tvd->vdev_scan_io_queue = NULL; 5077 mutex_exit(&tvd->vdev_scan_io_queue_lock); 5078 } 5079 } 5080 5081 static void 5082 dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i) 5083 { 5084 dsl_pool_t *dp = spa->spa_dsl_pool; 5085 dsl_scan_t *scn = dp->dp_scan; 5086 vdev_t *vdev; 5087 kmutex_t *q_lock; 5088 dsl_scan_io_queue_t *queue; 5089 scan_io_t *srch_sio, *sio; 5090 avl_index_t idx; 5091 uint64_t start, size; 5092 5093 vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i])); 5094 ASSERT(vdev != NULL); 5095 q_lock = &vdev->vdev_scan_io_queue_lock; 5096 queue = vdev->vdev_scan_io_queue; 5097 5098 mutex_enter(q_lock); 5099 if (queue == NULL) { 5100 mutex_exit(q_lock); 5101 return; 5102 } 5103 5104 srch_sio = sio_alloc(BP_GET_NDVAS(bp)); 5105 bp2sio(bp, srch_sio, dva_i); 5106 start = SIO_GET_OFFSET(srch_sio); 5107 size = SIO_GET_ASIZE(srch_sio); 5108 5109 /* 5110 * We can find the zio in two states: 5111 * 1) Cold, just sitting in the queue of zio's to be issued at 5112 * some point in the future. In this case, all we do is 5113 * remove the zio from the q_sios_by_addr tree, decrement 5114 * its data volume from the containing range_seg_t and 5115 * resort the q_exts_by_size tree to reflect that the 5116 * range_seg_t has lost some of its 'fill'. We don't shorten 5117 * the range_seg_t - this is usually rare enough not to be 5118 * worth the extra hassle of trying keep track of precise 5119 * extent boundaries. 5120 * 2) Hot, where the zio is currently in-flight in 5121 * dsl_scan_issue_ios. In this case, we can't simply 5122 * reach in and stop the in-flight zio's, so we instead 5123 * block the caller. Eventually, dsl_scan_issue_ios will 5124 * be done with issuing the zio's it gathered and will 5125 * signal us. 5126 */ 5127 sio = avl_find(&queue->q_sios_by_addr, srch_sio, &idx); 5128 sio_free(srch_sio); 5129 5130 if (sio != NULL) { 5131 blkptr_t tmpbp; 5132 5133 /* Got it while it was cold in the queue */ 5134 ASSERT3U(start, ==, SIO_GET_OFFSET(sio)); 5135 ASSERT3U(size, ==, SIO_GET_ASIZE(sio)); 5136 avl_remove(&queue->q_sios_by_addr, sio); 5137 if (avl_is_empty(&queue->q_sios_by_addr)) 5138 atomic_add_64(&scn->scn_queues_pending, -1); 5139 queue->q_sio_memused -= SIO_GET_MUSED(sio); 5140 5141 ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size)); 5142 range_tree_remove_fill(queue->q_exts_by_addr, start, size); 5143 5144 /* count the block as though we skipped it */ 5145 sio2bp(sio, &tmpbp); 5146 count_block_skipped(scn, &tmpbp, B_FALSE); 5147 5148 sio_free(sio); 5149 } 5150 mutex_exit(q_lock); 5151 } 5152 5153 /* 5154 * Callback invoked when a zio_free() zio is executing. This needs to be 5155 * intercepted to prevent the zio from deallocating a particular portion 5156 * of disk space and it then getting reallocated and written to, while we 5157 * still have it queued up for processing. 5158 */ 5159 void 5160 dsl_scan_freed(spa_t *spa, const blkptr_t *bp) 5161 { 5162 dsl_pool_t *dp = spa->spa_dsl_pool; 5163 dsl_scan_t *scn = dp->dp_scan; 5164 5165 ASSERT(!BP_IS_EMBEDDED(bp)); 5166 ASSERT(scn != NULL); 5167 if (!dsl_scan_is_running(scn)) 5168 return; 5169 5170 for (int i = 0; i < BP_GET_NDVAS(bp); i++) 5171 dsl_scan_freed_dva(spa, bp, i); 5172 } 5173 5174 /* 5175 * Check if a vdev needs resilvering (non-empty DTL), if so, and resilver has 5176 * not started, start it. Otherwise, only restart if max txg in DTL range is 5177 * greater than the max txg in the current scan. If the DTL max is less than 5178 * the scan max, then the vdev has not missed any new data since the resilver 5179 * started, so a restart is not needed. 5180 */ 5181 void 5182 dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd) 5183 { 5184 uint64_t min, max; 5185 5186 if (!vdev_resilver_needed(vd, &min, &max)) 5187 return; 5188 5189 if (!dsl_scan_resilvering(dp)) { 5190 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); 5191 return; 5192 } 5193 5194 if (max <= dp->dp_scan->scn_phys.scn_max_txg) 5195 return; 5196 5197 /* restart is needed, check if it can be deferred */ 5198 if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)) 5199 vdev_defer_resilver(vd); 5200 else 5201 spa_async_request(dp->dp_spa, SPA_ASYNC_RESILVER); 5202 } 5203 5204 ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, U64, ZMOD_RW, 5205 "Max bytes in flight per leaf vdev for scrubs and resilvers"); 5206 5207 ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW, 5208 "Min millisecs to scrub per txg"); 5209 5210 ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW, 5211 "Min millisecs to obsolete per txg"); 5212 5213 ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW, 5214 "Min millisecs to free per txg"); 5215 5216 ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW, 5217 "Min millisecs to resilver per txg"); 5218 5219 ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW, 5220 "Set to prevent scans from progressing"); 5221 5222 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_io, INT, ZMOD_RW, 5223 "Set to disable scrub I/O"); 5224 5225 ZFS_MODULE_PARAM(zfs, zfs_, no_scrub_prefetch, INT, ZMOD_RW, 5226 "Set to disable scrub prefetching"); 5227 5228 ZFS_MODULE_PARAM(zfs, zfs_, async_block_max_blocks, U64, ZMOD_RW, 5229 "Max number of blocks freed in one txg"); 5230 5231 ZFS_MODULE_PARAM(zfs, zfs_, max_async_dedup_frees, U64, ZMOD_RW, 5232 "Max number of dedup blocks freed in one txg"); 5233 5234 ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW, 5235 "Enable processing of the free_bpobj"); 5236 5237 ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW, 5238 "Enable block statistics calculation during scrub"); 5239 5240 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW, 5241 "Fraction of RAM for scan hard limit"); 5242 5243 ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW, 5244 "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size"); 5245 5246 ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW, 5247 "Scrub using legacy non-sequential method"); 5248 5249 ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW, 5250 "Scan progress on-disk checkpointing interval"); 5251 5252 ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, U64, ZMOD_RW, 5253 "Max gap in bytes between sequential scrub / resilver I/Os"); 5254 5255 ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW, 5256 "Fraction of hard limit used as soft limit"); 5257 5258 ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW, 5259 "Tunable to attempt to reduce lock contention"); 5260 5261 ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW, 5262 "Tunable to adjust bias towards more filled segments during scans"); 5263 5264 ZFS_MODULE_PARAM(zfs, zfs_, scan_report_txgs, UINT, ZMOD_RW, 5265 "Tunable to report resilver performance over the last N txgs"); 5266 5267 ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW, 5268 "Process all resilvers immediately"); 5269 5270 ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW, 5271 "Error blocks to be scrubbed in one txg"); 5272 /* END CSTYLED */ 5273