1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa_impl.h> 30 #include <sys/dmu.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/zap.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/metaslab.h> 35 #include <sys/metaslab_impl.h> 36 #include <sys/uberblock_impl.h> 37 #include <sys/txg.h> 38 #include <sys/avl.h> 39 #include <sys/bpobj.h> 40 #include <sys/dsl_pool.h> 41 #include <sys/dsl_synctask.h> 42 #include <sys/dsl_dir.h> 43 #include <sys/arc.h> 44 #include <sys/zfeature.h> 45 #include <sys/vdev_indirect_births.h> 46 #include <sys/vdev_indirect_mapping.h> 47 #include <sys/abd.h> 48 #include <sys/vdev_initialize.h> 49 #include <sys/vdev_trim.h> 50 #include <sys/trace_zfs.h> 51 52 /* 53 * This file contains the necessary logic to remove vdevs from a 54 * storage pool. Currently, the only devices that can be removed 55 * are log, cache, and spare devices; and top level vdevs from a pool 56 * w/o raidz or mirrors. (Note that members of a mirror can be removed 57 * by the detach operation.) 58 * 59 * Log vdevs are removed by evacuating them and then turning the vdev 60 * into a hole vdev while holding spa config locks. 61 * 62 * Top level vdevs are removed and converted into an indirect vdev via 63 * a multi-step process: 64 * 65 * - Disable allocations from this device (spa_vdev_remove_top). 66 * 67 * - From a new thread (spa_vdev_remove_thread), copy data from 68 * the removing vdev to a different vdev. The copy happens in open 69 * context (spa_vdev_copy_impl) and issues a sync task 70 * (vdev_mapping_sync) so the sync thread can update the partial 71 * indirect mappings in core and on disk. 72 * 73 * - If a free happens during a removal, it is freed from the 74 * removing vdev, and if it has already been copied, from the new 75 * location as well (free_from_removing_vdev). 76 * 77 * - After the removal is completed, the copy thread converts the vdev 78 * into an indirect vdev (vdev_remove_complete) before instructing 79 * the sync thread to destroy the space maps and finish the removal 80 * (spa_finish_removal). 81 */ 82 83 typedef struct vdev_copy_arg { 84 metaslab_t *vca_msp; 85 uint64_t vca_outstanding_bytes; 86 uint64_t vca_read_error_bytes; 87 uint64_t vca_write_error_bytes; 88 kcondvar_t vca_cv; 89 kmutex_t vca_lock; 90 } vdev_copy_arg_t; 91 92 /* 93 * The maximum amount of memory we can use for outstanding i/o while 94 * doing a device removal. This determines how much i/o we can have 95 * in flight concurrently. 96 */ 97 int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; 98 99 /* 100 * The largest contiguous segment that we will attempt to allocate when 101 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If 102 * there is a performance problem with attempting to allocate large blocks, 103 * consider decreasing this. 104 * 105 * See also the accessor function spa_remove_max_segment(). 106 */ 107 int zfs_remove_max_segment = SPA_MAXBLOCKSIZE; 108 109 /* 110 * Ignore hard IO errors during device removal. When set if a device 111 * encounters hard IO error during the removal process the removal will 112 * not be cancelled. This can result in a normally recoverable block 113 * becoming permanently damaged and is not recommended. 114 */ 115 int zfs_removal_ignore_errors = 0; 116 117 /* 118 * Allow a remap segment to span free chunks of at most this size. The main 119 * impact of a larger span is that we will read and write larger, more 120 * contiguous chunks, with more "unnecessary" data -- trading off bandwidth 121 * for iops. The value here was chosen to align with 122 * zfs_vdev_read_gap_limit, which is a similar concept when doing regular 123 * reads (but there's no reason it has to be the same). 124 * 125 * Additionally, a higher span will have the following relatively minor 126 * effects: 127 * - the mapping will be smaller, since one entry can cover more allocated 128 * segments 129 * - more of the fragmentation in the removing device will be preserved 130 * - we'll do larger allocations, which may fail and fall back on smaller 131 * allocations 132 */ 133 int vdev_removal_max_span = 32 * 1024; 134 135 /* 136 * This is used by the test suite so that it can ensure that certain 137 * actions happen while in the middle of a removal. 138 */ 139 int zfs_removal_suspend_progress = 0; 140 141 #define VDEV_REMOVAL_ZAP_OBJS "lzap" 142 143 static void spa_vdev_remove_thread(void *arg); 144 static int spa_vdev_remove_cancel_impl(spa_t *spa); 145 146 static void 147 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) 148 { 149 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, 150 DMU_POOL_DIRECTORY_OBJECT, 151 DMU_POOL_REMOVING, sizeof (uint64_t), 152 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 153 &spa->spa_removing_phys, tx)); 154 } 155 156 static nvlist_t * 157 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 158 { 159 for (int i = 0; i < count; i++) { 160 uint64_t guid = 161 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); 162 163 if (guid == target_guid) 164 return (nvpp[i]); 165 } 166 167 return (NULL); 168 } 169 170 static void 171 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 172 nvlist_t *dev_to_remove) 173 { 174 nvlist_t **newdev = NULL; 175 176 if (count > 1) 177 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 178 179 for (int i = 0, j = 0; i < count; i++) { 180 if (dev[i] == dev_to_remove) 181 continue; 182 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 183 } 184 185 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 186 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 187 188 for (int i = 0; i < count - 1; i++) 189 nvlist_free(newdev[i]); 190 191 if (count > 1) 192 kmem_free(newdev, (count - 1) * sizeof (void *)); 193 } 194 195 static spa_vdev_removal_t * 196 spa_vdev_removal_create(vdev_t *vd) 197 { 198 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); 199 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); 200 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); 201 svr->svr_allocd_segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); 202 svr->svr_vdev_id = vd->vdev_id; 203 204 for (int i = 0; i < TXG_SIZE; i++) { 205 svr->svr_frees[i] = range_tree_create(NULL, RANGE_SEG64, NULL, 206 0, 0); 207 list_create(&svr->svr_new_segments[i], 208 sizeof (vdev_indirect_mapping_entry_t), 209 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 210 } 211 212 return (svr); 213 } 214 215 void 216 spa_vdev_removal_destroy(spa_vdev_removal_t *svr) 217 { 218 for (int i = 0; i < TXG_SIZE; i++) { 219 ASSERT0(svr->svr_bytes_done[i]); 220 ASSERT0(svr->svr_max_offset_to_sync[i]); 221 range_tree_destroy(svr->svr_frees[i]); 222 list_destroy(&svr->svr_new_segments[i]); 223 } 224 225 range_tree_destroy(svr->svr_allocd_segs); 226 mutex_destroy(&svr->svr_lock); 227 cv_destroy(&svr->svr_cv); 228 kmem_free(svr, sizeof (*svr)); 229 } 230 231 /* 232 * This is called as a synctask in the txg in which we will mark this vdev 233 * as removing (in the config stored in the MOS). 234 * 235 * It begins the evacuation of a toplevel vdev by: 236 * - initializing the spa_removing_phys which tracks this removal 237 * - computing the amount of space to remove for accounting purposes 238 * - dirtying all dbufs in the spa_config_object 239 * - creating the spa_vdev_removal 240 * - starting the spa_vdev_remove_thread 241 */ 242 static void 243 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) 244 { 245 int vdev_id = (uintptr_t)arg; 246 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 247 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 248 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 249 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; 250 spa_vdev_removal_t *svr = NULL; 251 uint64_t txg __maybe_unused = dmu_tx_get_txg(tx); 252 253 ASSERT0(vdev_get_nparity(vd)); 254 svr = spa_vdev_removal_create(vd); 255 256 ASSERT(vd->vdev_removing); 257 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 258 259 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 260 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 261 /* 262 * By activating the OBSOLETE_COUNTS feature, we prevent 263 * the pool from being downgraded and ensure that the 264 * refcounts are precise. 265 */ 266 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 267 uint64_t one = 1; 268 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, 269 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, 270 &one, tx)); 271 boolean_t are_precise __maybe_unused; 272 ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise)); 273 ASSERT3B(are_precise, ==, B_TRUE); 274 } 275 276 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); 277 vd->vdev_indirect_mapping = 278 vdev_indirect_mapping_open(mos, vic->vic_mapping_object); 279 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); 280 vd->vdev_indirect_births = 281 vdev_indirect_births_open(mos, vic->vic_births_object); 282 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; 283 spa->spa_removing_phys.sr_start_time = gethrestime_sec(); 284 spa->spa_removing_phys.sr_end_time = 0; 285 spa->spa_removing_phys.sr_state = DSS_SCANNING; 286 spa->spa_removing_phys.sr_to_copy = 0; 287 spa->spa_removing_phys.sr_copied = 0; 288 289 /* 290 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because 291 * there may be space in the defer tree, which is free, but still 292 * counted in vs_alloc. 293 */ 294 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 295 metaslab_t *ms = vd->vdev_ms[i]; 296 if (ms->ms_sm == NULL) 297 continue; 298 299 spa->spa_removing_phys.sr_to_copy += 300 metaslab_allocated_space(ms); 301 302 /* 303 * Space which we are freeing this txg does not need to 304 * be copied. 305 */ 306 spa->spa_removing_phys.sr_to_copy -= 307 range_tree_space(ms->ms_freeing); 308 309 ASSERT0(range_tree_space(ms->ms_freed)); 310 for (int t = 0; t < TXG_SIZE; t++) 311 ASSERT0(range_tree_space(ms->ms_allocating[t])); 312 } 313 314 /* 315 * Sync tasks are called before metaslab_sync(), so there should 316 * be no already-synced metaslabs in the TXG_CLEAN list. 317 */ 318 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); 319 320 spa_sync_removing_state(spa, tx); 321 322 /* 323 * All blocks that we need to read the most recent mapping must be 324 * stored on concrete vdevs. Therefore, we must dirty anything that 325 * is read before spa_remove_init(). Specifically, the 326 * spa_config_object. (Note that although we already modified the 327 * spa_config_object in spa_sync_removing_state, that may not have 328 * modified all blocks of the object.) 329 */ 330 dmu_object_info_t doi; 331 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); 332 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { 333 dmu_buf_t *dbuf; 334 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, 335 offset, FTAG, &dbuf, 0)); 336 dmu_buf_will_dirty(dbuf, tx); 337 offset += dbuf->db_size; 338 dmu_buf_rele(dbuf, FTAG); 339 } 340 341 /* 342 * Now that we've allocated the im_object, dirty the vdev to ensure 343 * that the object gets written to the config on disk. 344 */ 345 vdev_config_dirty(vd); 346 347 zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu " 348 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx), 349 vic->vic_mapping_object); 350 351 spa_history_log_internal(spa, "vdev remove started", tx, 352 "%s vdev %llu %s", spa_name(spa), (u_longlong_t)vd->vdev_id, 353 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 354 /* 355 * Setting spa_vdev_removal causes subsequent frees to call 356 * free_from_removing_vdev(). Note that we don't need any locking 357 * because we are the sync thread, and metaslab_free_impl() is only 358 * called from syncing context (potentially from a zio taskq thread, 359 * but in any case only when there are outstanding free i/os, which 360 * there are not). 361 */ 362 ASSERT3P(spa->spa_vdev_removal, ==, NULL); 363 spa->spa_vdev_removal = svr; 364 svr->svr_thread = thread_create(NULL, 0, 365 spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri); 366 } 367 368 /* 369 * When we are opening a pool, we must read the mapping for each 370 * indirect vdev in order from most recently removed to least 371 * recently removed. We do this because the blocks for the mapping 372 * of older indirect vdevs may be stored on more recently removed vdevs. 373 * In order to read each indirect mapping object, we must have 374 * initialized all more recently removed vdevs. 375 */ 376 int 377 spa_remove_init(spa_t *spa) 378 { 379 int error; 380 381 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, 382 DMU_POOL_DIRECTORY_OBJECT, 383 DMU_POOL_REMOVING, sizeof (uint64_t), 384 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 385 &spa->spa_removing_phys); 386 387 if (error == ENOENT) { 388 spa->spa_removing_phys.sr_state = DSS_NONE; 389 spa->spa_removing_phys.sr_removing_vdev = -1; 390 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 391 spa->spa_indirect_vdevs_loaded = B_TRUE; 392 return (0); 393 } else if (error != 0) { 394 return (error); 395 } 396 397 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { 398 /* 399 * We are currently removing a vdev. Create and 400 * initialize a spa_vdev_removal_t from the bonus 401 * buffer of the removing vdevs vdev_im_object, and 402 * initialize its partial mapping. 403 */ 404 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 405 vdev_t *vd = vdev_lookup_top(spa, 406 spa->spa_removing_phys.sr_removing_vdev); 407 408 if (vd == NULL) { 409 spa_config_exit(spa, SCL_STATE, FTAG); 410 return (EINVAL); 411 } 412 413 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 414 415 ASSERT(vdev_is_concrete(vd)); 416 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); 417 ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id); 418 ASSERT(vd->vdev_removing); 419 420 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 421 spa->spa_meta_objset, vic->vic_mapping_object); 422 vd->vdev_indirect_births = vdev_indirect_births_open( 423 spa->spa_meta_objset, vic->vic_births_object); 424 spa_config_exit(spa, SCL_STATE, FTAG); 425 426 spa->spa_vdev_removal = svr; 427 } 428 429 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 430 uint64_t indirect_vdev_id = 431 spa->spa_removing_phys.sr_prev_indirect_vdev; 432 while (indirect_vdev_id != UINT64_MAX) { 433 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); 434 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 435 436 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 437 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 438 spa->spa_meta_objset, vic->vic_mapping_object); 439 vd->vdev_indirect_births = vdev_indirect_births_open( 440 spa->spa_meta_objset, vic->vic_births_object); 441 442 indirect_vdev_id = vic->vic_prev_indirect_vdev; 443 } 444 spa_config_exit(spa, SCL_STATE, FTAG); 445 446 /* 447 * Now that we've loaded all the indirect mappings, we can allow 448 * reads from other blocks (e.g. via predictive prefetch). 449 */ 450 spa->spa_indirect_vdevs_loaded = B_TRUE; 451 return (0); 452 } 453 454 void 455 spa_restart_removal(spa_t *spa) 456 { 457 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 458 459 if (svr == NULL) 460 return; 461 462 /* 463 * In general when this function is called there is no 464 * removal thread running. The only scenario where this 465 * is not true is during spa_import() where this function 466 * is called twice [once from spa_import_impl() and 467 * spa_async_resume()]. Thus, in the scenario where we 468 * import a pool that has an ongoing removal we don't 469 * want to spawn a second thread. 470 */ 471 if (svr->svr_thread != NULL) 472 return; 473 474 if (!spa_writeable(spa)) 475 return; 476 477 zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id); 478 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa, 479 0, &p0, TS_RUN, minclsyspri); 480 } 481 482 /* 483 * Process freeing from a device which is in the middle of being removed. 484 * We must handle this carefully so that we attempt to copy freed data, 485 * and we correctly free already-copied data. 486 */ 487 void 488 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size) 489 { 490 spa_t *spa = vd->vdev_spa; 491 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 492 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 493 uint64_t txg = spa_syncing_txg(spa); 494 uint64_t max_offset_yet = 0; 495 496 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 497 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, 498 vdev_indirect_mapping_object(vim)); 499 ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id); 500 501 mutex_enter(&svr->svr_lock); 502 503 /* 504 * Remove the segment from the removing vdev's spacemap. This 505 * ensures that we will not attempt to copy this space (if the 506 * removal thread has not yet visited it), and also ensures 507 * that we know what is actually allocated on the new vdevs 508 * (needed if we cancel the removal). 509 * 510 * Note: we must do the metaslab_free_concrete() with the svr_lock 511 * held, so that the remove_thread can not load this metaslab and then 512 * visit this offset between the time that we metaslab_free_concrete() 513 * and when we check to see if it has been visited. 514 * 515 * Note: The checkpoint flag is set to false as having/taking 516 * a checkpoint and removing a device can't happen at the same 517 * time. 518 */ 519 ASSERT(!spa_has_checkpoint(spa)); 520 metaslab_free_concrete(vd, offset, size, B_FALSE); 521 522 uint64_t synced_size = 0; 523 uint64_t synced_offset = 0; 524 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); 525 if (offset < max_offset_synced) { 526 /* 527 * The mapping for this offset is already on disk. 528 * Free from the new location. 529 * 530 * Note that we use svr_max_synced_offset because it is 531 * updated atomically with respect to the in-core mapping. 532 * By contrast, vim_max_offset is not. 533 * 534 * This block may be split between a synced entry and an 535 * in-flight or unvisited entry. Only process the synced 536 * portion of it here. 537 */ 538 synced_size = MIN(size, max_offset_synced - offset); 539 synced_offset = offset; 540 541 ASSERT3U(max_offset_yet, <=, max_offset_synced); 542 max_offset_yet = max_offset_synced; 543 544 DTRACE_PROBE3(remove__free__synced, 545 spa_t *, spa, 546 uint64_t, offset, 547 uint64_t, synced_size); 548 549 size -= synced_size; 550 offset += synced_size; 551 } 552 553 /* 554 * Look at all in-flight txgs starting from the currently syncing one 555 * and see if a section of this free is being copied. By starting from 556 * this txg and iterating forward, we might find that this region 557 * was copied in two different txgs and handle it appropriately. 558 */ 559 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { 560 int txgoff = (txg + i) & TXG_MASK; 561 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { 562 /* 563 * The mapping for this offset is in flight, and 564 * will be synced in txg+i. 565 */ 566 uint64_t inflight_size = MIN(size, 567 svr->svr_max_offset_to_sync[txgoff] - offset); 568 569 DTRACE_PROBE4(remove__free__inflight, 570 spa_t *, spa, 571 uint64_t, offset, 572 uint64_t, inflight_size, 573 uint64_t, txg + i); 574 575 /* 576 * We copy data in order of increasing offset. 577 * Therefore the max_offset_to_sync[] must increase 578 * (or be zero, indicating that nothing is being 579 * copied in that txg). 580 */ 581 if (svr->svr_max_offset_to_sync[txgoff] != 0) { 582 ASSERT3U(svr->svr_max_offset_to_sync[txgoff], 583 >=, max_offset_yet); 584 max_offset_yet = 585 svr->svr_max_offset_to_sync[txgoff]; 586 } 587 588 /* 589 * We've already committed to copying this segment: 590 * we have allocated space elsewhere in the pool for 591 * it and have an IO outstanding to copy the data. We 592 * cannot free the space before the copy has 593 * completed, or else the copy IO might overwrite any 594 * new data. To free that space, we record the 595 * segment in the appropriate svr_frees tree and free 596 * the mapped space later, in the txg where we have 597 * completed the copy and synced the mapping (see 598 * vdev_mapping_sync). 599 */ 600 range_tree_add(svr->svr_frees[txgoff], 601 offset, inflight_size); 602 size -= inflight_size; 603 offset += inflight_size; 604 605 /* 606 * This space is already accounted for as being 607 * done, because it is being copied in txg+i. 608 * However, if i!=0, then it is being copied in 609 * a future txg. If we crash after this txg 610 * syncs but before txg+i syncs, then the space 611 * will be free. Therefore we must account 612 * for the space being done in *this* txg 613 * (when it is freed) rather than the future txg 614 * (when it will be copied). 615 */ 616 ASSERT3U(svr->svr_bytes_done[txgoff], >=, 617 inflight_size); 618 svr->svr_bytes_done[txgoff] -= inflight_size; 619 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; 620 } 621 } 622 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); 623 624 if (size > 0) { 625 /* 626 * The copy thread has not yet visited this offset. Ensure 627 * that it doesn't. 628 */ 629 630 DTRACE_PROBE3(remove__free__unvisited, 631 spa_t *, spa, 632 uint64_t, offset, 633 uint64_t, size); 634 635 if (svr->svr_allocd_segs != NULL) 636 range_tree_clear(svr->svr_allocd_segs, offset, size); 637 638 /* 639 * Since we now do not need to copy this data, for 640 * accounting purposes we have done our job and can count 641 * it as completed. 642 */ 643 svr->svr_bytes_done[txg & TXG_MASK] += size; 644 } 645 mutex_exit(&svr->svr_lock); 646 647 /* 648 * Now that we have dropped svr_lock, process the synced portion 649 * of this free. 650 */ 651 if (synced_size > 0) { 652 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size); 653 654 /* 655 * Note: this can only be called from syncing context, 656 * and the vdev_indirect_mapping is only changed from the 657 * sync thread, so we don't need svr_lock while doing 658 * metaslab_free_impl_cb. 659 */ 660 boolean_t checkpoint = B_FALSE; 661 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, 662 metaslab_free_impl_cb, &checkpoint); 663 } 664 } 665 666 /* 667 * Stop an active removal and update the spa_removing phys. 668 */ 669 static void 670 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) 671 { 672 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 673 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); 674 675 /* Ensure the removal thread has completed before we free the svr. */ 676 spa_vdev_remove_suspend(spa); 677 678 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); 679 680 if (state == DSS_FINISHED) { 681 spa_removing_phys_t *srp = &spa->spa_removing_phys; 682 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 683 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 684 685 if (srp->sr_prev_indirect_vdev != -1) { 686 vdev_t *pvd; 687 pvd = vdev_lookup_top(spa, 688 srp->sr_prev_indirect_vdev); 689 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); 690 } 691 692 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; 693 srp->sr_prev_indirect_vdev = vd->vdev_id; 694 } 695 spa->spa_removing_phys.sr_state = state; 696 spa->spa_removing_phys.sr_end_time = gethrestime_sec(); 697 698 spa->spa_vdev_removal = NULL; 699 spa_vdev_removal_destroy(svr); 700 701 spa_sync_removing_state(spa, tx); 702 spa_notify_waiters(spa); 703 704 vdev_config_dirty(spa->spa_root_vdev); 705 } 706 707 static void 708 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) 709 { 710 vdev_t *vd = arg; 711 vdev_indirect_mark_obsolete(vd, offset, size); 712 boolean_t checkpoint = B_FALSE; 713 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 714 metaslab_free_impl_cb, &checkpoint); 715 } 716 717 /* 718 * On behalf of the removal thread, syncs an incremental bit more of 719 * the indirect mapping to disk and updates the in-memory mapping. 720 * Called as a sync task in every txg that the removal thread makes progress. 721 */ 722 static void 723 vdev_mapping_sync(void *arg, dmu_tx_t *tx) 724 { 725 spa_vdev_removal_t *svr = arg; 726 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 727 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 728 vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config; 729 uint64_t txg = dmu_tx_get_txg(tx); 730 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 731 732 ASSERT(vic->vic_mapping_object != 0); 733 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 734 735 vdev_indirect_mapping_add_entries(vim, 736 &svr->svr_new_segments[txg & TXG_MASK], tx); 737 vdev_indirect_births_add_entry(vd->vdev_indirect_births, 738 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); 739 740 /* 741 * Free the copied data for anything that was freed while the 742 * mapping entries were in flight. 743 */ 744 mutex_enter(&svr->svr_lock); 745 range_tree_vacate(svr->svr_frees[txg & TXG_MASK], 746 free_mapped_segment_cb, vd); 747 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, 748 vdev_indirect_mapping_max_offset(vim)); 749 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; 750 mutex_exit(&svr->svr_lock); 751 752 spa_sync_removing_state(spa, tx); 753 } 754 755 typedef struct vdev_copy_segment_arg { 756 spa_t *vcsa_spa; 757 dva_t *vcsa_dest_dva; 758 uint64_t vcsa_txg; 759 range_tree_t *vcsa_obsolete_segs; 760 } vdev_copy_segment_arg_t; 761 762 static void 763 unalloc_seg(void *arg, uint64_t start, uint64_t size) 764 { 765 vdev_copy_segment_arg_t *vcsa = arg; 766 spa_t *spa = vcsa->vcsa_spa; 767 blkptr_t bp = { { { {0} } } }; 768 769 BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL); 770 BP_SET_LSIZE(&bp, size); 771 BP_SET_PSIZE(&bp, size); 772 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 773 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF); 774 BP_SET_TYPE(&bp, DMU_OT_NONE); 775 BP_SET_LEVEL(&bp, 0); 776 BP_SET_DEDUP(&bp, 0); 777 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER); 778 779 DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva)); 780 DVA_SET_OFFSET(&bp.blk_dva[0], 781 DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start); 782 DVA_SET_ASIZE(&bp.blk_dva[0], size); 783 784 zio_free(spa, vcsa->vcsa_txg, &bp); 785 } 786 787 /* 788 * All reads and writes associated with a call to spa_vdev_copy_segment() 789 * are done. 790 */ 791 static void 792 spa_vdev_copy_segment_done(zio_t *zio) 793 { 794 vdev_copy_segment_arg_t *vcsa = zio->io_private; 795 796 range_tree_vacate(vcsa->vcsa_obsolete_segs, 797 unalloc_seg, vcsa); 798 range_tree_destroy(vcsa->vcsa_obsolete_segs); 799 kmem_free(vcsa, sizeof (*vcsa)); 800 801 spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); 802 } 803 804 /* 805 * The write of the new location is done. 806 */ 807 static void 808 spa_vdev_copy_segment_write_done(zio_t *zio) 809 { 810 vdev_copy_arg_t *vca = zio->io_private; 811 812 abd_free(zio->io_abd); 813 814 mutex_enter(&vca->vca_lock); 815 vca->vca_outstanding_bytes -= zio->io_size; 816 817 if (zio->io_error != 0) 818 vca->vca_write_error_bytes += zio->io_size; 819 820 cv_signal(&vca->vca_cv); 821 mutex_exit(&vca->vca_lock); 822 } 823 824 /* 825 * The read of the old location is done. The parent zio is the write to 826 * the new location. Allow it to start. 827 */ 828 static void 829 spa_vdev_copy_segment_read_done(zio_t *zio) 830 { 831 vdev_copy_arg_t *vca = zio->io_private; 832 833 if (zio->io_error != 0) { 834 mutex_enter(&vca->vca_lock); 835 vca->vca_read_error_bytes += zio->io_size; 836 mutex_exit(&vca->vca_lock); 837 } 838 839 zio_nowait(zio_unique_parent(zio)); 840 } 841 842 /* 843 * If the old and new vdevs are mirrors, we will read both sides of the old 844 * mirror, and write each copy to the corresponding side of the new mirror. 845 * If the old and new vdevs have a different number of children, we will do 846 * this as best as possible. Since we aren't verifying checksums, this 847 * ensures that as long as there's a good copy of the data, we'll have a 848 * good copy after the removal, even if there's silent damage to one side 849 * of the mirror. If we're removing a mirror that has some silent damage, 850 * we'll have exactly the same damage in the new location (assuming that 851 * the new location is also a mirror). 852 * 853 * We accomplish this by creating a tree of zio_t's, with as many writes as 854 * there are "children" of the new vdev (a non-redundant vdev counts as one 855 * child, a 2-way mirror has 2 children, etc). Each write has an associated 856 * read from a child of the old vdev. Typically there will be the same 857 * number of children of the old and new vdevs. However, if there are more 858 * children of the new vdev, some child(ren) of the old vdev will be issued 859 * multiple reads. If there are more children of the old vdev, some copies 860 * will be dropped. 861 * 862 * For example, the tree of zio_t's for a 2-way mirror is: 863 * 864 * null 865 * / \ 866 * write(new vdev, child 0) write(new vdev, child 1) 867 * | | 868 * read(old vdev, child 0) read(old vdev, child 1) 869 * 870 * Child zio's complete before their parents complete. However, zio's 871 * created with zio_vdev_child_io() may be issued before their children 872 * complete. In this case we need to make sure that the children (reads) 873 * complete before the parents (writes) are *issued*. We do this by not 874 * calling zio_nowait() on each write until its corresponding read has 875 * completed. 876 * 877 * The spa_config_lock must be held while zio's created by 878 * zio_vdev_child_io() are in progress, to ensure that the vdev tree does 879 * not change (e.g. due to a concurrent "zpool attach/detach"). The "null" 880 * zio is needed to release the spa_config_lock after all the reads and 881 * writes complete. (Note that we can't grab the config lock for each read, 882 * because it is not reentrant - we could deadlock with a thread waiting 883 * for a write lock.) 884 */ 885 static void 886 spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio, 887 vdev_t *source_vd, uint64_t source_offset, 888 vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size) 889 { 890 ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0); 891 892 /* 893 * If the destination child in unwritable then there is no point 894 * in issuing the source reads which cannot be written. 895 */ 896 if (!vdev_writeable(dest_child_vd)) 897 return; 898 899 mutex_enter(&vca->vca_lock); 900 vca->vca_outstanding_bytes += size; 901 mutex_exit(&vca->vca_lock); 902 903 abd_t *abd = abd_alloc_for_io(size, B_FALSE); 904 905 vdev_t *source_child_vd = NULL; 906 if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) { 907 /* 908 * Source and dest are both mirrors. Copy from the same 909 * child id as we are copying to (wrapping around if there 910 * are more dest children than source children). If the 911 * preferred source child is unreadable select another. 912 */ 913 for (int i = 0; i < source_vd->vdev_children; i++) { 914 source_child_vd = source_vd->vdev_child[ 915 (dest_id + i) % source_vd->vdev_children]; 916 if (vdev_readable(source_child_vd)) 917 break; 918 } 919 } else { 920 source_child_vd = source_vd; 921 } 922 923 /* 924 * There should always be at least one readable source child or 925 * the pool would be in a suspended state. Somehow selecting an 926 * unreadable child would result in IO errors, the removal process 927 * being cancelled, and the pool reverting to its pre-removal state. 928 */ 929 ASSERT3P(source_child_vd, !=, NULL); 930 931 zio_t *write_zio = zio_vdev_child_io(nzio, NULL, 932 dest_child_vd, dest_offset, abd, size, 933 ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL, 934 ZIO_FLAG_CANFAIL, 935 spa_vdev_copy_segment_write_done, vca); 936 937 zio_nowait(zio_vdev_child_io(write_zio, NULL, 938 source_child_vd, source_offset, abd, size, 939 ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, 940 ZIO_FLAG_CANFAIL, 941 spa_vdev_copy_segment_read_done, vca)); 942 } 943 944 /* 945 * Allocate a new location for this segment, and create the zio_t's to 946 * read from the old location and write to the new location. 947 */ 948 static int 949 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, 950 uint64_t maxalloc, uint64_t txg, 951 vdev_copy_arg_t *vca, zio_alloc_list_t *zal) 952 { 953 metaslab_group_t *mg = vd->vdev_mg; 954 spa_t *spa = vd->vdev_spa; 955 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 956 vdev_indirect_mapping_entry_t *entry; 957 dva_t dst = {{ 0 }}; 958 uint64_t start = range_tree_min(segs); 959 ASSERT0(P2PHASE(start, 1 << spa->spa_min_ashift)); 960 961 ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE); 962 ASSERT0(P2PHASE(maxalloc, 1 << spa->spa_min_ashift)); 963 964 uint64_t size = range_tree_span(segs); 965 if (range_tree_span(segs) > maxalloc) { 966 /* 967 * We can't allocate all the segments. Prefer to end 968 * the allocation at the end of a segment, thus avoiding 969 * additional split blocks. 970 */ 971 range_seg_max_t search; 972 zfs_btree_index_t where; 973 rs_set_start(&search, segs, start + maxalloc); 974 rs_set_end(&search, segs, start + maxalloc); 975 (void) zfs_btree_find(&segs->rt_root, &search, &where); 976 range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where, 977 &where); 978 if (rs != NULL) { 979 size = rs_get_end(rs, segs) - start; 980 } else { 981 /* 982 * There are no segments that end before maxalloc. 983 * I.e. the first segment is larger than maxalloc, 984 * so we must split it. 985 */ 986 size = maxalloc; 987 } 988 } 989 ASSERT3U(size, <=, maxalloc); 990 ASSERT0(P2PHASE(size, 1 << spa->spa_min_ashift)); 991 992 /* 993 * An allocation class might not have any remaining vdevs or space 994 */ 995 metaslab_class_t *mc = mg->mg_class; 996 if (mc->mc_groups == 0) 997 mc = spa_normal_class(spa); 998 int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0, 999 zal, 0); 1000 if (error == ENOSPC && mc != spa_normal_class(spa)) { 1001 error = metaslab_alloc_dva(spa, spa_normal_class(spa), size, 1002 &dst, 0, NULL, txg, 0, zal, 0); 1003 } 1004 if (error != 0) 1005 return (error); 1006 1007 /* 1008 * Determine the ranges that are not actually needed. Offsets are 1009 * relative to the start of the range to be copied (i.e. relative to the 1010 * local variable "start"). 1011 */ 1012 range_tree_t *obsolete_segs = range_tree_create(NULL, RANGE_SEG64, NULL, 1013 0, 0); 1014 1015 zfs_btree_index_t where; 1016 range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where); 1017 ASSERT3U(rs_get_start(rs, segs), ==, start); 1018 uint64_t prev_seg_end = rs_get_end(rs, segs); 1019 while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) { 1020 if (rs_get_start(rs, segs) >= start + size) { 1021 break; 1022 } else { 1023 range_tree_add(obsolete_segs, 1024 prev_seg_end - start, 1025 rs_get_start(rs, segs) - prev_seg_end); 1026 } 1027 prev_seg_end = rs_get_end(rs, segs); 1028 } 1029 /* We don't end in the middle of an obsolete range */ 1030 ASSERT3U(start + size, <=, prev_seg_end); 1031 1032 range_tree_clear(segs, start, size); 1033 1034 /* 1035 * We can't have any padding of the allocated size, otherwise we will 1036 * misunderstand what's allocated, and the size of the mapping. We 1037 * prevent padding by ensuring that all devices in the pool have the 1038 * same ashift, and the allocation size is a multiple of the ashift. 1039 */ 1040 VERIFY3U(DVA_GET_ASIZE(&dst), ==, size); 1041 1042 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); 1043 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); 1044 entry->vime_mapping.vimep_dst = dst; 1045 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 1046 entry->vime_obsolete_count = range_tree_space(obsolete_segs); 1047 } 1048 1049 vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP); 1050 vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst; 1051 vcsa->vcsa_obsolete_segs = obsolete_segs; 1052 vcsa->vcsa_spa = spa; 1053 vcsa->vcsa_txg = txg; 1054 1055 /* 1056 * See comment before spa_vdev_copy_one_child(). 1057 */ 1058 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 1059 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, 1060 spa_vdev_copy_segment_done, vcsa, 0); 1061 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst)); 1062 if (dest_vd->vdev_ops == &vdev_mirror_ops) { 1063 for (int i = 0; i < dest_vd->vdev_children; i++) { 1064 vdev_t *child = dest_vd->vdev_child[i]; 1065 spa_vdev_copy_one_child(vca, nzio, vd, start, 1066 child, DVA_GET_OFFSET(&dst), i, size); 1067 } 1068 } else { 1069 spa_vdev_copy_one_child(vca, nzio, vd, start, 1070 dest_vd, DVA_GET_OFFSET(&dst), -1, size); 1071 } 1072 zio_nowait(nzio); 1073 1074 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); 1075 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); 1076 vdev_dirty(vd, 0, NULL, txg); 1077 1078 return (0); 1079 } 1080 1081 /* 1082 * Complete the removal of a toplevel vdev. This is called as a 1083 * synctask in the same txg that we will sync out the new config (to the 1084 * MOS object) which indicates that this vdev is indirect. 1085 */ 1086 static void 1087 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) 1088 { 1089 spa_vdev_removal_t *svr = arg; 1090 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1091 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1092 1093 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 1094 1095 for (int i = 0; i < TXG_SIZE; i++) { 1096 ASSERT0(svr->svr_bytes_done[i]); 1097 } 1098 1099 ASSERT3U(spa->spa_removing_phys.sr_copied, ==, 1100 spa->spa_removing_phys.sr_to_copy); 1101 1102 vdev_destroy_spacemaps(vd, tx); 1103 1104 /* destroy leaf zaps, if any */ 1105 ASSERT3P(svr->svr_zaplist, !=, NULL); 1106 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); 1107 pair != NULL; 1108 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { 1109 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); 1110 } 1111 fnvlist_free(svr->svr_zaplist); 1112 1113 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); 1114 /* vd->vdev_path is not available here */ 1115 spa_history_log_internal(spa, "vdev remove completed", tx, 1116 "%s vdev %llu", spa_name(spa), (u_longlong_t)vd->vdev_id); 1117 } 1118 1119 static void 1120 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) 1121 { 1122 ASSERT3P(zlist, !=, NULL); 1123 ASSERT0(vdev_get_nparity(vd)); 1124 1125 if (vd->vdev_leaf_zap != 0) { 1126 char zkey[32]; 1127 (void) snprintf(zkey, sizeof (zkey), "%s-%llu", 1128 VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap); 1129 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); 1130 } 1131 1132 for (uint64_t id = 0; id < vd->vdev_children; id++) { 1133 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); 1134 } 1135 } 1136 1137 static void 1138 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) 1139 { 1140 vdev_t *ivd; 1141 dmu_tx_t *tx; 1142 spa_t *spa = vd->vdev_spa; 1143 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1144 1145 /* 1146 * First, build a list of leaf zaps to be destroyed. 1147 * This is passed to the sync context thread, 1148 * which does the actual unlinking. 1149 */ 1150 svr->svr_zaplist = fnvlist_alloc(); 1151 vdev_remove_enlist_zaps(vd, svr->svr_zaplist); 1152 1153 ivd = vdev_add_parent(vd, &vdev_indirect_ops); 1154 ivd->vdev_removing = 0; 1155 1156 vd->vdev_leaf_zap = 0; 1157 1158 vdev_remove_child(ivd, vd); 1159 vdev_compact_children(ivd); 1160 1161 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 1162 1163 mutex_enter(&svr->svr_lock); 1164 svr->svr_thread = NULL; 1165 cv_broadcast(&svr->svr_cv); 1166 mutex_exit(&svr->svr_lock); 1167 1168 /* After this, we can not use svr. */ 1169 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1170 dsl_sync_task_nowait(spa->spa_dsl_pool, 1171 vdev_remove_complete_sync, svr, tx); 1172 dmu_tx_commit(tx); 1173 } 1174 1175 /* 1176 * Complete the removal of a toplevel vdev. This is called in open 1177 * context by the removal thread after we have copied all vdev's data. 1178 */ 1179 static void 1180 vdev_remove_complete(spa_t *spa) 1181 { 1182 uint64_t txg; 1183 1184 /* 1185 * Wait for any deferred frees to be synced before we call 1186 * vdev_metaslab_fini() 1187 */ 1188 txg_wait_synced(spa->spa_dsl_pool, 0); 1189 txg = spa_vdev_enter(spa); 1190 vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1191 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 1192 ASSERT3P(vd->vdev_trim_thread, ==, NULL); 1193 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); 1194 1195 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1196 ESC_ZFS_VDEV_REMOVE_DEV); 1197 1198 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", 1199 vd->vdev_id, txg); 1200 1201 /* 1202 * Discard allocation state. 1203 */ 1204 if (vd->vdev_mg != NULL) { 1205 vdev_metaslab_fini(vd); 1206 metaslab_group_destroy(vd->vdev_mg); 1207 vd->vdev_mg = NULL; 1208 spa_log_sm_set_blocklimit(spa); 1209 } 1210 if (vd->vdev_log_mg != NULL) { 1211 ASSERT0(vd->vdev_ms_count); 1212 metaslab_group_destroy(vd->vdev_log_mg); 1213 vd->vdev_log_mg = NULL; 1214 } 1215 ASSERT0(vd->vdev_stat.vs_space); 1216 ASSERT0(vd->vdev_stat.vs_dspace); 1217 1218 vdev_remove_replace_with_indirect(vd, txg); 1219 1220 /* 1221 * We now release the locks, allowing spa_sync to run and finish the 1222 * removal via vdev_remove_complete_sync in syncing context. 1223 * 1224 * Note that we hold on to the vdev_t that has been replaced. Since 1225 * it isn't part of the vdev tree any longer, it can't be concurrently 1226 * manipulated, even while we don't have the config lock. 1227 */ 1228 (void) spa_vdev_exit(spa, NULL, txg, 0); 1229 1230 /* 1231 * Top ZAP should have been transferred to the indirect vdev in 1232 * vdev_remove_replace_with_indirect. 1233 */ 1234 ASSERT0(vd->vdev_top_zap); 1235 1236 /* 1237 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. 1238 */ 1239 ASSERT0(vd->vdev_leaf_zap); 1240 1241 txg = spa_vdev_enter(spa); 1242 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1243 /* 1244 * Request to update the config and the config cachefile. 1245 */ 1246 vdev_config_dirty(spa->spa_root_vdev); 1247 (void) spa_vdev_exit(spa, vd, txg, 0); 1248 1249 if (ev != NULL) 1250 spa_event_post(ev); 1251 } 1252 1253 /* 1254 * Evacuates a segment of size at most max_alloc from the vdev 1255 * via repeated calls to spa_vdev_copy_segment. If an allocation 1256 * fails, the pool is probably too fragmented to handle such a 1257 * large size, so decrease max_alloc so that the caller will not try 1258 * this size again this txg. 1259 */ 1260 static void 1261 spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, 1262 uint64_t *max_alloc, dmu_tx_t *tx) 1263 { 1264 uint64_t txg = dmu_tx_get_txg(tx); 1265 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1266 1267 mutex_enter(&svr->svr_lock); 1268 1269 /* 1270 * Determine how big of a chunk to copy. We can allocate up 1271 * to max_alloc bytes, and we can span up to vdev_removal_max_span 1272 * bytes of unallocated space at a time. "segs" will track the 1273 * allocated segments that we are copying. We may also be copying 1274 * free segments (of up to vdev_removal_max_span bytes). 1275 */ 1276 range_tree_t *segs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); 1277 for (;;) { 1278 range_tree_t *rt = svr->svr_allocd_segs; 1279 range_seg_t *rs = range_tree_first(rt); 1280 1281 if (rs == NULL) 1282 break; 1283 1284 uint64_t seg_length; 1285 1286 if (range_tree_is_empty(segs)) { 1287 /* need to truncate the first seg based on max_alloc */ 1288 seg_length = MIN(rs_get_end(rs, rt) - rs_get_start(rs, 1289 rt), *max_alloc); 1290 } else { 1291 if (rs_get_start(rs, rt) - range_tree_max(segs) > 1292 vdev_removal_max_span) { 1293 /* 1294 * Including this segment would cause us to 1295 * copy a larger unneeded chunk than is allowed. 1296 */ 1297 break; 1298 } else if (rs_get_end(rs, rt) - range_tree_min(segs) > 1299 *max_alloc) { 1300 /* 1301 * This additional segment would extend past 1302 * max_alloc. Rather than splitting this 1303 * segment, leave it for the next mapping. 1304 */ 1305 break; 1306 } else { 1307 seg_length = rs_get_end(rs, rt) - 1308 rs_get_start(rs, rt); 1309 } 1310 } 1311 1312 range_tree_add(segs, rs_get_start(rs, rt), seg_length); 1313 range_tree_remove(svr->svr_allocd_segs, 1314 rs_get_start(rs, rt), seg_length); 1315 } 1316 1317 if (range_tree_is_empty(segs)) { 1318 mutex_exit(&svr->svr_lock); 1319 range_tree_destroy(segs); 1320 return; 1321 } 1322 1323 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { 1324 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, 1325 svr, tx); 1326 } 1327 1328 svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs); 1329 1330 /* 1331 * Note: this is the amount of *allocated* space 1332 * that we are taking care of each txg. 1333 */ 1334 svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs); 1335 1336 mutex_exit(&svr->svr_lock); 1337 1338 zio_alloc_list_t zal; 1339 metaslab_trace_init(&zal); 1340 uint64_t thismax = SPA_MAXBLOCKSIZE; 1341 while (!range_tree_is_empty(segs)) { 1342 int error = spa_vdev_copy_segment(vd, 1343 segs, thismax, txg, vca, &zal); 1344 1345 if (error == ENOSPC) { 1346 /* 1347 * Cut our segment in half, and don't try this 1348 * segment size again this txg. Note that the 1349 * allocation size must be aligned to the highest 1350 * ashift in the pool, so that the allocation will 1351 * not be padded out to a multiple of the ashift, 1352 * which could cause us to think that this mapping 1353 * is larger than we intended. 1354 */ 1355 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); 1356 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); 1357 uint64_t attempted = 1358 MIN(range_tree_span(segs), thismax); 1359 thismax = P2ROUNDUP(attempted / 2, 1360 1 << spa->spa_max_ashift); 1361 /* 1362 * The minimum-size allocation can not fail. 1363 */ 1364 ASSERT3U(attempted, >, 1 << spa->spa_max_ashift); 1365 *max_alloc = attempted - (1 << spa->spa_max_ashift); 1366 } else { 1367 ASSERT0(error); 1368 1369 /* 1370 * We've performed an allocation, so reset the 1371 * alloc trace list. 1372 */ 1373 metaslab_trace_fini(&zal); 1374 metaslab_trace_init(&zal); 1375 } 1376 } 1377 metaslab_trace_fini(&zal); 1378 range_tree_destroy(segs); 1379 } 1380 1381 /* 1382 * The size of each removal mapping is limited by the tunable 1383 * zfs_remove_max_segment, but we must adjust this to be a multiple of the 1384 * pool's ashift, so that we don't try to split individual sectors regardless 1385 * of the tunable value. (Note that device removal requires that all devices 1386 * have the same ashift, so there's no difference between spa_min_ashift and 1387 * spa_max_ashift.) The raw tunable should not be used elsewhere. 1388 */ 1389 uint64_t 1390 spa_remove_max_segment(spa_t *spa) 1391 { 1392 return (P2ROUNDUP(zfs_remove_max_segment, 1 << spa->spa_max_ashift)); 1393 } 1394 1395 /* 1396 * The removal thread operates in open context. It iterates over all 1397 * allocated space in the vdev, by loading each metaslab's spacemap. 1398 * For each contiguous segment of allocated space (capping the segment 1399 * size at SPA_MAXBLOCKSIZE), we: 1400 * - Allocate space for it on another vdev. 1401 * - Create a new mapping from the old location to the new location 1402 * (as a record in svr_new_segments). 1403 * - Initiate a physical read zio to get the data off the removing disk. 1404 * - In the read zio's done callback, initiate a physical write zio to 1405 * write it to the new vdev. 1406 * Note that all of this will take effect when a particular TXG syncs. 1407 * The sync thread ensures that all the phys reads and writes for the syncing 1408 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk 1409 * (see vdev_mapping_sync()). 1410 */ 1411 static void 1412 spa_vdev_remove_thread(void *arg) 1413 { 1414 spa_t *spa = arg; 1415 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1416 vdev_copy_arg_t vca; 1417 uint64_t max_alloc = spa_remove_max_segment(spa); 1418 uint64_t last_txg = 0; 1419 1420 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1421 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1422 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1423 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); 1424 1425 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); 1426 ASSERT(vdev_is_concrete(vd)); 1427 ASSERT(vd->vdev_removing); 1428 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 1429 ASSERT(vim != NULL); 1430 1431 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); 1432 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); 1433 vca.vca_outstanding_bytes = 0; 1434 vca.vca_read_error_bytes = 0; 1435 vca.vca_write_error_bytes = 0; 1436 1437 mutex_enter(&svr->svr_lock); 1438 1439 /* 1440 * Start from vim_max_offset so we pick up where we left off 1441 * if we are restarting the removal after opening the pool. 1442 */ 1443 uint64_t msi; 1444 for (msi = start_offset >> vd->vdev_ms_shift; 1445 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { 1446 metaslab_t *msp = vd->vdev_ms[msi]; 1447 ASSERT3U(msi, <=, vd->vdev_ms_count); 1448 1449 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1450 1451 mutex_enter(&msp->ms_sync_lock); 1452 mutex_enter(&msp->ms_lock); 1453 1454 /* 1455 * Assert nothing in flight -- ms_*tree is empty. 1456 */ 1457 for (int i = 0; i < TXG_SIZE; i++) { 1458 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1459 } 1460 1461 /* 1462 * If the metaslab has ever been allocated from (ms_sm!=NULL), 1463 * read the allocated segments from the space map object 1464 * into svr_allocd_segs. Since we do this while holding 1465 * svr_lock and ms_sync_lock, concurrent frees (which 1466 * would have modified the space map) will wait for us 1467 * to finish loading the spacemap, and then take the 1468 * appropriate action (see free_from_removing_vdev()). 1469 */ 1470 if (msp->ms_sm != NULL) { 1471 VERIFY0(space_map_load(msp->ms_sm, 1472 svr->svr_allocd_segs, SM_ALLOC)); 1473 1474 range_tree_walk(msp->ms_unflushed_allocs, 1475 range_tree_add, svr->svr_allocd_segs); 1476 range_tree_walk(msp->ms_unflushed_frees, 1477 range_tree_remove, svr->svr_allocd_segs); 1478 range_tree_walk(msp->ms_freeing, 1479 range_tree_remove, svr->svr_allocd_segs); 1480 1481 /* 1482 * When we are resuming from a paused removal (i.e. 1483 * when importing a pool with a removal in progress), 1484 * discard any state that we have already processed. 1485 */ 1486 range_tree_clear(svr->svr_allocd_segs, 0, start_offset); 1487 } 1488 mutex_exit(&msp->ms_lock); 1489 mutex_exit(&msp->ms_sync_lock); 1490 1491 vca.vca_msp = msp; 1492 zfs_dbgmsg("copying %llu segments for metaslab %llu", 1493 zfs_btree_numnodes(&svr->svr_allocd_segs->rt_root), 1494 msp->ms_id); 1495 1496 while (!svr->svr_thread_exit && 1497 !range_tree_is_empty(svr->svr_allocd_segs)) { 1498 1499 mutex_exit(&svr->svr_lock); 1500 1501 /* 1502 * We need to periodically drop the config lock so that 1503 * writers can get in. Additionally, we can't wait 1504 * for a txg to sync while holding a config lock 1505 * (since a waiting writer could cause a 3-way deadlock 1506 * with the sync thread, which also gets a config 1507 * lock for reader). So we can't hold the config lock 1508 * while calling dmu_tx_assign(). 1509 */ 1510 spa_config_exit(spa, SCL_CONFIG, FTAG); 1511 1512 /* 1513 * This delay will pause the removal around the point 1514 * specified by zfs_removal_suspend_progress. We do this 1515 * solely from the test suite or during debugging. 1516 */ 1517 while (zfs_removal_suspend_progress && 1518 !svr->svr_thread_exit) 1519 delay(hz); 1520 1521 mutex_enter(&vca.vca_lock); 1522 while (vca.vca_outstanding_bytes > 1523 zfs_remove_max_copy_bytes) { 1524 cv_wait(&vca.vca_cv, &vca.vca_lock); 1525 } 1526 mutex_exit(&vca.vca_lock); 1527 1528 dmu_tx_t *tx = 1529 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1530 1531 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1532 uint64_t txg = dmu_tx_get_txg(tx); 1533 1534 /* 1535 * Reacquire the vdev_config lock. The vdev_t 1536 * that we're removing may have changed, e.g. due 1537 * to a vdev_attach or vdev_detach. 1538 */ 1539 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1540 vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1541 1542 if (txg != last_txg) 1543 max_alloc = spa_remove_max_segment(spa); 1544 last_txg = txg; 1545 1546 spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx); 1547 1548 dmu_tx_commit(tx); 1549 mutex_enter(&svr->svr_lock); 1550 } 1551 1552 mutex_enter(&vca.vca_lock); 1553 if (zfs_removal_ignore_errors == 0 && 1554 (vca.vca_read_error_bytes > 0 || 1555 vca.vca_write_error_bytes > 0)) { 1556 svr->svr_thread_exit = B_TRUE; 1557 } 1558 mutex_exit(&vca.vca_lock); 1559 } 1560 1561 mutex_exit(&svr->svr_lock); 1562 1563 spa_config_exit(spa, SCL_CONFIG, FTAG); 1564 1565 /* 1566 * Wait for all copies to finish before cleaning up the vca. 1567 */ 1568 txg_wait_synced(spa->spa_dsl_pool, 0); 1569 ASSERT0(vca.vca_outstanding_bytes); 1570 1571 mutex_destroy(&vca.vca_lock); 1572 cv_destroy(&vca.vca_cv); 1573 1574 if (svr->svr_thread_exit) { 1575 mutex_enter(&svr->svr_lock); 1576 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); 1577 svr->svr_thread = NULL; 1578 cv_broadcast(&svr->svr_cv); 1579 mutex_exit(&svr->svr_lock); 1580 1581 /* 1582 * During the removal process an unrecoverable read or write 1583 * error was encountered. The removal process must be 1584 * cancelled or this damage may become permanent. 1585 */ 1586 if (zfs_removal_ignore_errors == 0 && 1587 (vca.vca_read_error_bytes > 0 || 1588 vca.vca_write_error_bytes > 0)) { 1589 zfs_dbgmsg("canceling removal due to IO errors: " 1590 "[read_error_bytes=%llu] [write_error_bytes=%llu]", 1591 vca.vca_read_error_bytes, 1592 vca.vca_write_error_bytes); 1593 spa_vdev_remove_cancel_impl(spa); 1594 } 1595 } else { 1596 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1597 vdev_remove_complete(spa); 1598 } 1599 1600 thread_exit(); 1601 } 1602 1603 void 1604 spa_vdev_remove_suspend(spa_t *spa) 1605 { 1606 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1607 1608 if (svr == NULL) 1609 return; 1610 1611 mutex_enter(&svr->svr_lock); 1612 svr->svr_thread_exit = B_TRUE; 1613 while (svr->svr_thread != NULL) 1614 cv_wait(&svr->svr_cv, &svr->svr_lock); 1615 svr->svr_thread_exit = B_FALSE; 1616 mutex_exit(&svr->svr_lock); 1617 } 1618 1619 /* ARGSUSED */ 1620 static int 1621 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) 1622 { 1623 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1624 1625 if (spa->spa_vdev_removal == NULL) 1626 return (ENOTACTIVE); 1627 return (0); 1628 } 1629 1630 /* 1631 * Cancel a removal by freeing all entries from the partial mapping 1632 * and marking the vdev as no longer being removing. 1633 */ 1634 /* ARGSUSED */ 1635 static void 1636 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) 1637 { 1638 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1639 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1640 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1641 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1642 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1643 objset_t *mos = spa->spa_meta_objset; 1644 1645 ASSERT3P(svr->svr_thread, ==, NULL); 1646 1647 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 1648 1649 boolean_t are_precise; 1650 VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise)); 1651 if (are_precise) { 1652 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1653 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1654 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); 1655 } 1656 1657 uint64_t obsolete_sm_object; 1658 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 1659 if (obsolete_sm_object != 0) { 1660 ASSERT(vd->vdev_obsolete_sm != NULL); 1661 ASSERT3U(obsolete_sm_object, ==, 1662 space_map_object(vd->vdev_obsolete_sm)); 1663 1664 space_map_free(vd->vdev_obsolete_sm, tx); 1665 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1666 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 1667 space_map_close(vd->vdev_obsolete_sm); 1668 vd->vdev_obsolete_sm = NULL; 1669 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1670 } 1671 for (int i = 0; i < TXG_SIZE; i++) { 1672 ASSERT(list_is_empty(&svr->svr_new_segments[i])); 1673 ASSERT3U(svr->svr_max_offset_to_sync[i], <=, 1674 vdev_indirect_mapping_max_offset(vim)); 1675 } 1676 1677 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 1678 metaslab_t *msp = vd->vdev_ms[msi]; 1679 1680 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 1681 break; 1682 1683 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1684 1685 mutex_enter(&msp->ms_lock); 1686 1687 /* 1688 * Assert nothing in flight -- ms_*tree is empty. 1689 */ 1690 for (int i = 0; i < TXG_SIZE; i++) 1691 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1692 for (int i = 0; i < TXG_DEFER_SIZE; i++) 1693 ASSERT0(range_tree_space(msp->ms_defer[i])); 1694 ASSERT0(range_tree_space(msp->ms_freed)); 1695 1696 if (msp->ms_sm != NULL) { 1697 mutex_enter(&svr->svr_lock); 1698 VERIFY0(space_map_load(msp->ms_sm, 1699 svr->svr_allocd_segs, SM_ALLOC)); 1700 1701 range_tree_walk(msp->ms_unflushed_allocs, 1702 range_tree_add, svr->svr_allocd_segs); 1703 range_tree_walk(msp->ms_unflushed_frees, 1704 range_tree_remove, svr->svr_allocd_segs); 1705 range_tree_walk(msp->ms_freeing, 1706 range_tree_remove, svr->svr_allocd_segs); 1707 1708 /* 1709 * Clear everything past what has been synced, 1710 * because we have not allocated mappings for it yet. 1711 */ 1712 uint64_t syncd = vdev_indirect_mapping_max_offset(vim); 1713 uint64_t sm_end = msp->ms_sm->sm_start + 1714 msp->ms_sm->sm_size; 1715 if (sm_end > syncd) 1716 range_tree_clear(svr->svr_allocd_segs, 1717 syncd, sm_end - syncd); 1718 1719 mutex_exit(&svr->svr_lock); 1720 } 1721 mutex_exit(&msp->ms_lock); 1722 1723 mutex_enter(&svr->svr_lock); 1724 range_tree_vacate(svr->svr_allocd_segs, 1725 free_mapped_segment_cb, vd); 1726 mutex_exit(&svr->svr_lock); 1727 } 1728 1729 /* 1730 * Note: this must happen after we invoke free_mapped_segment_cb, 1731 * because it adds to the obsolete_segments. 1732 */ 1733 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 1734 1735 ASSERT3U(vic->vic_mapping_object, ==, 1736 vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); 1737 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1738 vd->vdev_indirect_mapping = NULL; 1739 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 1740 vic->vic_mapping_object = 0; 1741 1742 ASSERT3U(vic->vic_births_object, ==, 1743 vdev_indirect_births_object(vd->vdev_indirect_births)); 1744 vdev_indirect_births_close(vd->vdev_indirect_births); 1745 vd->vdev_indirect_births = NULL; 1746 vdev_indirect_births_free(mos, vic->vic_births_object, tx); 1747 vic->vic_births_object = 0; 1748 1749 /* 1750 * We may have processed some frees from the removing vdev in this 1751 * txg, thus increasing svr_bytes_done; discard that here to 1752 * satisfy the assertions in spa_vdev_removal_destroy(). 1753 * Note that future txg's can not have any bytes_done, because 1754 * future TXG's are only modified from open context, and we have 1755 * already shut down the copying thread. 1756 */ 1757 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; 1758 spa_finish_removal(spa, DSS_CANCELED, tx); 1759 1760 vd->vdev_removing = B_FALSE; 1761 vdev_config_dirty(vd); 1762 1763 zfs_dbgmsg("canceled device removal for vdev %llu in %llu", 1764 vd->vdev_id, dmu_tx_get_txg(tx)); 1765 spa_history_log_internal(spa, "vdev remove canceled", tx, 1766 "%s vdev %llu %s", spa_name(spa), 1767 (u_longlong_t)vd->vdev_id, 1768 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1769 } 1770 1771 static int 1772 spa_vdev_remove_cancel_impl(spa_t *spa) 1773 { 1774 uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id; 1775 1776 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, 1777 spa_vdev_remove_cancel_sync, NULL, 0, 1778 ZFS_SPACE_CHECK_EXTRA_RESERVED); 1779 1780 if (error == 0) { 1781 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); 1782 vdev_t *vd = vdev_lookup_top(spa, vdid); 1783 metaslab_group_activate(vd->vdev_mg); 1784 ASSERT(!vd->vdev_islog); 1785 metaslab_group_activate(vd->vdev_log_mg); 1786 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); 1787 } 1788 1789 return (error); 1790 } 1791 1792 int 1793 spa_vdev_remove_cancel(spa_t *spa) 1794 { 1795 spa_vdev_remove_suspend(spa); 1796 1797 if (spa->spa_vdev_removal == NULL) 1798 return (ENOTACTIVE); 1799 1800 return (spa_vdev_remove_cancel_impl(spa)); 1801 } 1802 1803 void 1804 svr_sync(spa_t *spa, dmu_tx_t *tx) 1805 { 1806 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1807 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1808 1809 if (svr == NULL) 1810 return; 1811 1812 /* 1813 * This check is necessary so that we do not dirty the 1814 * DIRECTORY_OBJECT via spa_sync_removing_state() when there 1815 * is nothing to do. Dirtying it every time would prevent us 1816 * from syncing-to-convergence. 1817 */ 1818 if (svr->svr_bytes_done[txgoff] == 0) 1819 return; 1820 1821 /* 1822 * Update progress accounting. 1823 */ 1824 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; 1825 svr->svr_bytes_done[txgoff] = 0; 1826 1827 spa_sync_removing_state(spa, tx); 1828 } 1829 1830 static void 1831 vdev_remove_make_hole_and_free(vdev_t *vd) 1832 { 1833 uint64_t id = vd->vdev_id; 1834 spa_t *spa = vd->vdev_spa; 1835 vdev_t *rvd = spa->spa_root_vdev; 1836 1837 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1838 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1839 1840 vdev_free(vd); 1841 1842 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 1843 vdev_add_child(rvd, vd); 1844 vdev_config_dirty(rvd); 1845 1846 /* 1847 * Reassess the health of our root vdev. 1848 */ 1849 vdev_reopen(rvd); 1850 } 1851 1852 /* 1853 * Remove a log device. The config lock is held for the specified TXG. 1854 */ 1855 static int 1856 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) 1857 { 1858 metaslab_group_t *mg = vd->vdev_mg; 1859 spa_t *spa = vd->vdev_spa; 1860 int error = 0; 1861 1862 ASSERT(vd->vdev_islog); 1863 ASSERT(vd == vd->vdev_top); 1864 ASSERT3P(vd->vdev_log_mg, ==, NULL); 1865 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1866 1867 /* 1868 * Stop allocating from this vdev. 1869 */ 1870 metaslab_group_passivate(mg); 1871 1872 /* 1873 * Wait for the youngest allocations and frees to sync, 1874 * and then wait for the deferral of those frees to finish. 1875 */ 1876 spa_vdev_config_exit(spa, NULL, 1877 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1878 1879 /* 1880 * Cancel any initialize or TRIM which was in progress. 1881 */ 1882 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED); 1883 vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED); 1884 vdev_autotrim_stop_wait(vd); 1885 1886 /* 1887 * Evacuate the device. We don't hold the config lock as 1888 * writer since we need to do I/O but we do keep the 1889 * spa_namespace_lock held. Once this completes the device 1890 * should no longer have any blocks allocated on it. 1891 */ 1892 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1893 if (vd->vdev_stat.vs_alloc != 0) 1894 error = spa_reset_logs(spa); 1895 1896 *txg = spa_vdev_config_enter(spa); 1897 1898 if (error != 0) { 1899 metaslab_group_activate(mg); 1900 ASSERT3P(vd->vdev_log_mg, ==, NULL); 1901 return (error); 1902 } 1903 ASSERT0(vd->vdev_stat.vs_alloc); 1904 1905 /* 1906 * The evacuation succeeded. Remove any remaining MOS metadata 1907 * associated with this vdev, and wait for these changes to sync. 1908 */ 1909 vd->vdev_removing = B_TRUE; 1910 1911 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1912 vdev_config_dirty(vd); 1913 1914 /* 1915 * When the log space map feature is enabled we look at 1916 * the vdev's top_zap to find the on-disk flush data of 1917 * the metaslab we just flushed. Thus, while removing a 1918 * log vdev we make sure to call vdev_metaslab_fini() 1919 * first, which removes all metaslabs of this vdev from 1920 * spa_metaslabs_by_flushed before vdev_remove_empty() 1921 * destroys the top_zap of this log vdev. 1922 * 1923 * This avoids the scenario where we flush a metaslab 1924 * from the log vdev being removed that doesn't have a 1925 * top_zap and end up failing to lookup its on-disk flush 1926 * data. 1927 * 1928 * We don't call metaslab_group_destroy() right away 1929 * though (it will be called in vdev_free() later) as 1930 * during metaslab_sync() of metaslabs from other vdevs 1931 * we may touch the metaslab group of this vdev through 1932 * metaslab_class_histogram_verify() 1933 */ 1934 vdev_metaslab_fini(vd); 1935 spa_log_sm_set_blocklimit(spa); 1936 1937 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 1938 *txg = spa_vdev_config_enter(spa); 1939 1940 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1941 ESC_ZFS_VDEV_REMOVE_DEV); 1942 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1943 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1944 1945 /* The top ZAP should have been destroyed by vdev_remove_empty. */ 1946 ASSERT0(vd->vdev_top_zap); 1947 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ 1948 ASSERT0(vd->vdev_leaf_zap); 1949 1950 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1951 1952 if (list_link_active(&vd->vdev_state_dirty_node)) 1953 vdev_state_clean(vd); 1954 if (list_link_active(&vd->vdev_config_dirty_node)) 1955 vdev_config_clean(vd); 1956 1957 ASSERT0(vd->vdev_stat.vs_alloc); 1958 1959 /* 1960 * Clean up the vdev namespace. 1961 */ 1962 vdev_remove_make_hole_and_free(vd); 1963 1964 if (ev != NULL) 1965 spa_event_post(ev); 1966 1967 return (0); 1968 } 1969 1970 static int 1971 spa_vdev_remove_top_check(vdev_t *vd) 1972 { 1973 spa_t *spa = vd->vdev_spa; 1974 1975 if (vd != vd->vdev_top) 1976 return (SET_ERROR(ENOTSUP)); 1977 1978 if (!vdev_is_concrete(vd)) 1979 return (SET_ERROR(ENOTSUP)); 1980 1981 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) 1982 return (SET_ERROR(ENOTSUP)); 1983 1984 1985 metaslab_class_t *mc = vd->vdev_mg->mg_class; 1986 metaslab_class_t *normal = spa_normal_class(spa); 1987 if (mc != normal) { 1988 /* 1989 * Space allocated from the special (or dedup) class is 1990 * included in the DMU's space usage, but it's not included 1991 * in spa_dspace (or dsl_pool_adjustedsize()). Therefore 1992 * there is always at least as much free space in the normal 1993 * class, as is allocated from the special (and dedup) class. 1994 * As a backup check, we will return ENOSPC if this is 1995 * violated. See also spa_update_dspace(). 1996 */ 1997 uint64_t available = metaslab_class_get_space(normal) - 1998 metaslab_class_get_alloc(normal); 1999 ASSERT3U(available, >=, vd->vdev_stat.vs_alloc); 2000 if (available < vd->vdev_stat.vs_alloc) 2001 return (SET_ERROR(ENOSPC)); 2002 } else { 2003 /* available space in the pool's normal class */ 2004 uint64_t available = dsl_dir_space_available( 2005 spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE); 2006 if (available < 2007 vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) { 2008 /* 2009 * This is a normal device. There has to be enough free 2010 * space to remove the device and leave double the 2011 * "slop" space (i.e. we must leave at least 3% of the 2012 * pool free, in addition to the normal slop space). 2013 */ 2014 return (SET_ERROR(ENOSPC)); 2015 } 2016 } 2017 2018 /* 2019 * There can not be a removal in progress. 2020 */ 2021 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) 2022 return (SET_ERROR(EBUSY)); 2023 2024 /* 2025 * The device must have all its data. 2026 */ 2027 if (!vdev_dtl_empty(vd, DTL_MISSING) || 2028 !vdev_dtl_empty(vd, DTL_OUTAGE)) 2029 return (SET_ERROR(EBUSY)); 2030 2031 /* 2032 * The device must be healthy. 2033 */ 2034 if (!vdev_readable(vd)) 2035 return (SET_ERROR(EIO)); 2036 2037 /* 2038 * All vdevs in normal class must have the same ashift. 2039 */ 2040 if (spa->spa_max_ashift != spa->spa_min_ashift) { 2041 return (SET_ERROR(EINVAL)); 2042 } 2043 2044 /* 2045 * A removed special/dedup vdev must have same ashift as normal class. 2046 */ 2047 ASSERT(!vd->vdev_islog); 2048 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE && 2049 vd->vdev_ashift != spa->spa_max_ashift) { 2050 return (SET_ERROR(EINVAL)); 2051 } 2052 2053 /* 2054 * All vdevs in normal class must have the same ashift 2055 * and not be raidz or draid. 2056 */ 2057 vdev_t *rvd = spa->spa_root_vdev; 2058 int num_indirect = 0; 2059 for (uint64_t id = 0; id < rvd->vdev_children; id++) { 2060 vdev_t *cvd = rvd->vdev_child[id]; 2061 2062 /* 2063 * A removed special/dedup vdev must have the same ashift 2064 * across all vdevs in its class. 2065 */ 2066 if (vd->vdev_alloc_bias != VDEV_BIAS_NONE && 2067 cvd->vdev_alloc_bias == vd->vdev_alloc_bias && 2068 cvd->vdev_ashift != vd->vdev_ashift) { 2069 return (SET_ERROR(EINVAL)); 2070 } 2071 if (cvd->vdev_ashift != 0 && 2072 cvd->vdev_alloc_bias == VDEV_BIAS_NONE) 2073 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); 2074 if (cvd->vdev_ops == &vdev_indirect_ops) 2075 num_indirect++; 2076 if (!vdev_is_concrete(cvd)) 2077 continue; 2078 if (vdev_get_nparity(cvd) != 0) 2079 return (SET_ERROR(EINVAL)); 2080 /* 2081 * Need the mirror to be mirror of leaf vdevs only 2082 */ 2083 if (cvd->vdev_ops == &vdev_mirror_ops) { 2084 for (uint64_t cid = 0; 2085 cid < cvd->vdev_children; cid++) { 2086 if (!cvd->vdev_child[cid]->vdev_ops-> 2087 vdev_op_leaf) 2088 return (SET_ERROR(EINVAL)); 2089 } 2090 } 2091 } 2092 2093 return (0); 2094 } 2095 2096 /* 2097 * Initiate removal of a top-level vdev, reducing the total space in the pool. 2098 * The config lock is held for the specified TXG. Once initiated, 2099 * evacuation of all allocated space (copying it to other vdevs) happens 2100 * in the background (see spa_vdev_remove_thread()), and can be canceled 2101 * (see spa_vdev_remove_cancel()). If successful, the vdev will 2102 * be transformed to an indirect vdev (see spa_vdev_remove_complete()). 2103 */ 2104 static int 2105 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) 2106 { 2107 spa_t *spa = vd->vdev_spa; 2108 int error; 2109 2110 /* 2111 * Check for errors up-front, so that we don't waste time 2112 * passivating the metaslab group and clearing the ZIL if there 2113 * are errors. 2114 */ 2115 error = spa_vdev_remove_top_check(vd); 2116 if (error != 0) 2117 return (error); 2118 2119 /* 2120 * Stop allocating from this vdev. Note that we must check 2121 * that this is not the only device in the pool before 2122 * passivating, otherwise we will not be able to make 2123 * progress because we can't allocate from any vdevs. 2124 * The above check for sufficient free space serves this 2125 * purpose. 2126 */ 2127 metaslab_group_t *mg = vd->vdev_mg; 2128 metaslab_group_passivate(mg); 2129 ASSERT(!vd->vdev_islog); 2130 metaslab_group_passivate(vd->vdev_log_mg); 2131 2132 /* 2133 * Wait for the youngest allocations and frees to sync, 2134 * and then wait for the deferral of those frees to finish. 2135 */ 2136 spa_vdev_config_exit(spa, NULL, 2137 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 2138 2139 /* 2140 * We must ensure that no "stubby" log blocks are allocated 2141 * on the device to be removed. These blocks could be 2142 * written at any time, including while we are in the middle 2143 * of copying them. 2144 */ 2145 error = spa_reset_logs(spa); 2146 2147 /* 2148 * We stop any initializing and TRIM that is currently in progress 2149 * but leave the state as "active". This will allow the process to 2150 * resume if the removal is canceled sometime later. 2151 */ 2152 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE); 2153 vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE); 2154 vdev_autotrim_stop_wait(vd); 2155 2156 *txg = spa_vdev_config_enter(spa); 2157 2158 /* 2159 * Things might have changed while the config lock was dropped 2160 * (e.g. space usage). Check for errors again. 2161 */ 2162 if (error == 0) 2163 error = spa_vdev_remove_top_check(vd); 2164 2165 if (error != 0) { 2166 metaslab_group_activate(mg); 2167 ASSERT(!vd->vdev_islog); 2168 metaslab_group_activate(vd->vdev_log_mg); 2169 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 2170 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 2171 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 2172 return (error); 2173 } 2174 2175 vd->vdev_removing = B_TRUE; 2176 2177 vdev_dirty_leaves(vd, VDD_DTL, *txg); 2178 vdev_config_dirty(vd); 2179 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); 2180 dsl_sync_task_nowait(spa->spa_dsl_pool, 2181 vdev_remove_initiate_sync, (void *)(uintptr_t)vd->vdev_id, tx); 2182 dmu_tx_commit(tx); 2183 2184 return (0); 2185 } 2186 2187 /* 2188 * Remove a device from the pool. 2189 * 2190 * Removing a device from the vdev namespace requires several steps 2191 * and can take a significant amount of time. As a result we use 2192 * the spa_vdev_config_[enter/exit] functions which allow us to 2193 * grab and release the spa_config_lock while still holding the namespace 2194 * lock. During each step the configuration is synced out. 2195 */ 2196 int 2197 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 2198 { 2199 vdev_t *vd; 2200 nvlist_t **spares, **l2cache, *nv; 2201 uint64_t txg = 0; 2202 uint_t nspares, nl2cache; 2203 int error = 0, error_log; 2204 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 2205 sysevent_t *ev = NULL; 2206 char *vd_type = NULL, *vd_path = NULL; 2207 2208 ASSERT(spa_writeable(spa)); 2209 2210 if (!locked) 2211 txg = spa_vdev_enter(spa); 2212 2213 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2214 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 2215 error = (spa_has_checkpoint(spa)) ? 2216 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 2217 2218 if (!locked) 2219 return (spa_vdev_exit(spa, NULL, txg, error)); 2220 2221 return (error); 2222 } 2223 2224 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2225 2226 if (spa->spa_spares.sav_vdevs != NULL && 2227 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 2228 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 2229 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 2230 /* 2231 * Only remove the hot spare if it's not currently in use 2232 * in this pool. 2233 */ 2234 if (vd == NULL || unspare) { 2235 char *type; 2236 boolean_t draid_spare = B_FALSE; 2237 2238 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) 2239 == 0 && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) 2240 draid_spare = B_TRUE; 2241 2242 if (vd == NULL && draid_spare) { 2243 error = SET_ERROR(ENOTSUP); 2244 } else { 2245 if (vd == NULL) 2246 vd = spa_lookup_by_guid(spa, 2247 guid, B_TRUE); 2248 ev = spa_event_create(spa, vd, NULL, 2249 ESC_ZFS_VDEV_REMOVE_AUX); 2250 2251 vd_type = VDEV_TYPE_SPARE; 2252 vd_path = spa_strdup(fnvlist_lookup_string( 2253 nv, ZPOOL_CONFIG_PATH)); 2254 spa_vdev_remove_aux(spa->spa_spares.sav_config, 2255 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 2256 spa_load_spares(spa); 2257 spa->spa_spares.sav_sync = B_TRUE; 2258 } 2259 } else { 2260 error = SET_ERROR(EBUSY); 2261 } 2262 } else if (spa->spa_l2cache.sav_vdevs != NULL && 2263 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 2264 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 2265 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 2266 vd_type = VDEV_TYPE_L2CACHE; 2267 vd_path = spa_strdup(fnvlist_lookup_string( 2268 nv, ZPOOL_CONFIG_PATH)); 2269 /* 2270 * Cache devices can always be removed. 2271 */ 2272 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 2273 2274 /* 2275 * Stop trimming the cache device. We need to release the 2276 * config lock to allow the syncing of TRIM transactions 2277 * without releasing the spa_namespace_lock. The same 2278 * strategy is employed in spa_vdev_remove_top(). 2279 */ 2280 spa_vdev_config_exit(spa, NULL, 2281 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 2282 mutex_enter(&vd->vdev_trim_lock); 2283 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 2284 mutex_exit(&vd->vdev_trim_lock); 2285 txg = spa_vdev_config_enter(spa); 2286 2287 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); 2288 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 2289 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 2290 spa_load_l2cache(spa); 2291 spa->spa_l2cache.sav_sync = B_TRUE; 2292 } else if (vd != NULL && vd->vdev_islog) { 2293 ASSERT(!locked); 2294 vd_type = VDEV_TYPE_LOG; 2295 vd_path = spa_strdup((vd->vdev_path != NULL) ? 2296 vd->vdev_path : "-"); 2297 error = spa_vdev_remove_log(vd, &txg); 2298 } else if (vd != NULL) { 2299 ASSERT(!locked); 2300 error = spa_vdev_remove_top(vd, &txg); 2301 } else { 2302 /* 2303 * There is no vdev of any kind with the specified guid. 2304 */ 2305 error = SET_ERROR(ENOENT); 2306 } 2307 2308 error_log = error; 2309 2310 if (!locked) 2311 error = spa_vdev_exit(spa, NULL, txg, error); 2312 2313 /* 2314 * Logging must be done outside the spa config lock. Otherwise, 2315 * this code path could end up holding the spa config lock while 2316 * waiting for a txg_sync so it can write to the internal log. 2317 * Doing that would prevent the txg sync from actually happening, 2318 * causing a deadlock. 2319 */ 2320 if (error_log == 0 && vd_type != NULL && vd_path != NULL) { 2321 spa_history_log_internal(spa, "vdev remove", NULL, 2322 "%s vdev (%s) %s", spa_name(spa), vd_type, vd_path); 2323 } 2324 if (vd_path != NULL) 2325 spa_strfree(vd_path); 2326 2327 if (ev != NULL) 2328 spa_event_post(ev); 2329 2330 return (error); 2331 } 2332 2333 int 2334 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) 2335 { 2336 prs->prs_state = spa->spa_removing_phys.sr_state; 2337 2338 if (prs->prs_state == DSS_NONE) 2339 return (SET_ERROR(ENOENT)); 2340 2341 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; 2342 prs->prs_start_time = spa->spa_removing_phys.sr_start_time; 2343 prs->prs_end_time = spa->spa_removing_phys.sr_end_time; 2344 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; 2345 prs->prs_copied = spa->spa_removing_phys.sr_copied; 2346 2347 prs->prs_mapping_memory = 0; 2348 uint64_t indirect_vdev_id = 2349 spa->spa_removing_phys.sr_prev_indirect_vdev; 2350 while (indirect_vdev_id != -1) { 2351 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; 2352 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 2353 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 2354 2355 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2356 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); 2357 indirect_vdev_id = vic->vic_prev_indirect_vdev; 2358 } 2359 2360 return (0); 2361 } 2362 2363 /* BEGIN CSTYLED */ 2364 ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_ignore_errors, INT, ZMOD_RW, 2365 "Ignore hard IO errors when removing device"); 2366 2367 ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, INT, ZMOD_RW, 2368 "Largest contiguous segment to allocate when removing device"); 2369 2370 ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, INT, ZMOD_RW, 2371 "Largest span of free chunks a remap segment can span"); 2372 2373 ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, INT, ZMOD_RW, 2374 "Pause device removal after this many bytes are copied " 2375 "(debug use only - causes removal to hang)"); 2376 /* END CSTYLED */ 2377 2378 EXPORT_SYMBOL(free_from_removing_vdev); 2379 EXPORT_SYMBOL(spa_removal_get_stats); 2380 EXPORT_SYMBOL(spa_remove_init); 2381 EXPORT_SYMBOL(spa_restart_removal); 2382 EXPORT_SYMBOL(spa_vdev_removal_destroy); 2383 EXPORT_SYMBOL(spa_vdev_remove); 2384 EXPORT_SYMBOL(spa_vdev_remove_cancel); 2385 EXPORT_SYMBOL(spa_vdev_remove_suspend); 2386 EXPORT_SYMBOL(svr_sync); 2387