1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/spa_impl.h> 29 #include <sys/dmu.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/zap.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/metaslab.h> 34 #include <sys/metaslab_impl.h> 35 #include <sys/uberblock_impl.h> 36 #include <sys/txg.h> 37 #include <sys/avl.h> 38 #include <sys/bpobj.h> 39 #include <sys/dsl_pool.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/dsl_dir.h> 42 #include <sys/arc.h> 43 #include <sys/zfeature.h> 44 #include <sys/vdev_indirect_births.h> 45 #include <sys/vdev_indirect_mapping.h> 46 #include <sys/abd.h> 47 #include <sys/vdev_initialize.h> 48 49 /* 50 * This file contains the necessary logic to remove vdevs from a 51 * storage pool. Currently, the only devices that can be removed 52 * are log, cache, and spare devices; and top level vdevs from a pool 53 * w/o raidz. (Note that members of a mirror can also be removed 54 * by the detach operation.) 55 * 56 * Log vdevs are removed by evacuating them and then turning the vdev 57 * into a hole vdev while holding spa config locks. 58 * 59 * Top level vdevs are removed and converted into an indirect vdev via 60 * a multi-step process: 61 * 62 * - Disable allocations from this device (spa_vdev_remove_top). 63 * 64 * - From a new thread (spa_vdev_remove_thread), copy data from 65 * the removing vdev to a different vdev. The copy happens in open 66 * context (spa_vdev_copy_impl) and issues a sync task 67 * (vdev_mapping_sync) so the sync thread can update the partial 68 * indirect mappings in core and on disk. 69 * 70 * - If a free happens during a removal, it is freed from the 71 * removing vdev, and if it has already been copied, from the new 72 * location as well (free_from_removing_vdev). 73 * 74 * - After the removal is completed, the copy thread converts the vdev 75 * into an indirect vdev (vdev_remove_complete) before instructing 76 * the sync thread to destroy the space maps and finish the removal 77 * (spa_finish_removal). 78 */ 79 80 typedef struct vdev_copy_arg { 81 metaslab_t *vca_msp; 82 uint64_t vca_outstanding_bytes; 83 kcondvar_t vca_cv; 84 kmutex_t vca_lock; 85 } vdev_copy_arg_t; 86 87 /* 88 * The maximum amount of memory we can use for outstanding i/o while 89 * doing a device removal. This determines how much i/o we can have 90 * in flight concurrently. 91 */ 92 int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; 93 94 /* 95 * The largest contiguous segment that we will attempt to allocate when 96 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If 97 * there is a performance problem with attempting to allocate large blocks, 98 * consider decreasing this. 99 * 100 * Note: we will issue I/Os of up to this size. The mpt driver does not 101 * respond well to I/Os larger than 1MB, so we set this to 1MB. (When 102 * mpt processes an I/O larger than 1MB, it needs to do an allocation of 103 * 2 physically contiguous pages; if this allocation fails, mpt will drop 104 * the I/O and hang the device.) 105 */ 106 int zfs_remove_max_segment = 1024 * 1024; 107 108 /* 109 * Allow a remap segment to span free chunks of at most this size. The main 110 * impact of a larger span is that we will read and write larger, more 111 * contiguous chunks, with more "unnecessary" data -- trading off bandwidth 112 * for iops. The value here was chosen to align with 113 * zfs_vdev_read_gap_limit, which is a similar concept when doing regular 114 * reads (but there's no reason it has to be the same). 115 * 116 * Additionally, a higher span will have the following relatively minor 117 * effects: 118 * - the mapping will be smaller, since one entry can cover more allocated 119 * segments 120 * - more of the fragmentation in the removing device will be preserved 121 * - we'll do larger allocations, which may fail and fall back on smaller 122 * allocations 123 */ 124 int vdev_removal_max_span = 32 * 1024; 125 126 /* 127 * This is used by the test suite so that it can ensure that certain 128 * actions happen while in the middle of a removal. 129 */ 130 uint64_t zfs_remove_max_bytes_pause = UINT64_MAX; 131 132 #define VDEV_REMOVAL_ZAP_OBJS "lzap" 133 134 static void spa_vdev_remove_thread(void *arg); 135 136 static void 137 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) 138 { 139 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, 140 DMU_POOL_DIRECTORY_OBJECT, 141 DMU_POOL_REMOVING, sizeof (uint64_t), 142 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 143 &spa->spa_removing_phys, tx)); 144 } 145 146 static nvlist_t * 147 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 148 { 149 for (int i = 0; i < count; i++) { 150 uint64_t guid = 151 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); 152 153 if (guid == target_guid) 154 return (nvpp[i]); 155 } 156 157 return (NULL); 158 } 159 160 static void 161 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 162 nvlist_t *dev_to_remove) 163 { 164 nvlist_t **newdev = NULL; 165 166 if (count > 1) 167 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 168 169 for (int i = 0, j = 0; i < count; i++) { 170 if (dev[i] == dev_to_remove) 171 continue; 172 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 173 } 174 175 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 176 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 177 178 for (int i = 0; i < count - 1; i++) 179 nvlist_free(newdev[i]); 180 181 if (count > 1) 182 kmem_free(newdev, (count - 1) * sizeof (void *)); 183 } 184 185 static spa_vdev_removal_t * 186 spa_vdev_removal_create(vdev_t *vd) 187 { 188 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); 189 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); 190 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); 191 svr->svr_allocd_segs = range_tree_create(NULL, NULL); 192 svr->svr_vdev_id = vd->vdev_id; 193 194 for (int i = 0; i < TXG_SIZE; i++) { 195 svr->svr_frees[i] = range_tree_create(NULL, NULL); 196 list_create(&svr->svr_new_segments[i], 197 sizeof (vdev_indirect_mapping_entry_t), 198 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 199 } 200 201 return (svr); 202 } 203 204 void 205 spa_vdev_removal_destroy(spa_vdev_removal_t *svr) 206 { 207 for (int i = 0; i < TXG_SIZE; i++) { 208 ASSERT0(svr->svr_bytes_done[i]); 209 ASSERT0(svr->svr_max_offset_to_sync[i]); 210 range_tree_destroy(svr->svr_frees[i]); 211 list_destroy(&svr->svr_new_segments[i]); 212 } 213 214 range_tree_destroy(svr->svr_allocd_segs); 215 mutex_destroy(&svr->svr_lock); 216 cv_destroy(&svr->svr_cv); 217 kmem_free(svr, sizeof (*svr)); 218 } 219 220 /* 221 * This is called as a synctask in the txg in which we will mark this vdev 222 * as removing (in the config stored in the MOS). 223 * 224 * It begins the evacuation of a toplevel vdev by: 225 * - initializing the spa_removing_phys which tracks this removal 226 * - computing the amount of space to remove for accounting purposes 227 * - dirtying all dbufs in the spa_config_object 228 * - creating the spa_vdev_removal 229 * - starting the spa_vdev_remove_thread 230 */ 231 static void 232 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) 233 { 234 int vdev_id = (uintptr_t)arg; 235 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 236 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 237 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 238 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; 239 spa_vdev_removal_t *svr = NULL; 240 uint64_t txg = dmu_tx_get_txg(tx); 241 242 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 243 svr = spa_vdev_removal_create(vd); 244 245 ASSERT(vd->vdev_removing); 246 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 247 248 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 249 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 250 /* 251 * By activating the OBSOLETE_COUNTS feature, we prevent 252 * the pool from being downgraded and ensure that the 253 * refcounts are precise. 254 */ 255 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 256 uint64_t one = 1; 257 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, 258 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, 259 &one, tx)); 260 ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0); 261 } 262 263 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); 264 vd->vdev_indirect_mapping = 265 vdev_indirect_mapping_open(mos, vic->vic_mapping_object); 266 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); 267 vd->vdev_indirect_births = 268 vdev_indirect_births_open(mos, vic->vic_births_object); 269 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; 270 spa->spa_removing_phys.sr_start_time = gethrestime_sec(); 271 spa->spa_removing_phys.sr_end_time = 0; 272 spa->spa_removing_phys.sr_state = DSS_SCANNING; 273 spa->spa_removing_phys.sr_to_copy = 0; 274 spa->spa_removing_phys.sr_copied = 0; 275 276 /* 277 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because 278 * there may be space in the defer tree, which is free, but still 279 * counted in vs_alloc. 280 */ 281 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 282 metaslab_t *ms = vd->vdev_ms[i]; 283 if (ms->ms_sm == NULL) 284 continue; 285 286 /* 287 * Sync tasks happen before metaslab_sync(), therefore 288 * smp_alloc and sm_alloc must be the same. 289 */ 290 ASSERT3U(space_map_allocated(ms->ms_sm), ==, 291 ms->ms_sm->sm_phys->smp_alloc); 292 293 spa->spa_removing_phys.sr_to_copy += 294 space_map_allocated(ms->ms_sm); 295 296 /* 297 * Space which we are freeing this txg does not need to 298 * be copied. 299 */ 300 spa->spa_removing_phys.sr_to_copy -= 301 range_tree_space(ms->ms_freeing); 302 303 ASSERT0(range_tree_space(ms->ms_freed)); 304 for (int t = 0; t < TXG_SIZE; t++) 305 ASSERT0(range_tree_space(ms->ms_allocating[t])); 306 } 307 308 /* 309 * Sync tasks are called before metaslab_sync(), so there should 310 * be no already-synced metaslabs in the TXG_CLEAN list. 311 */ 312 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); 313 314 spa_sync_removing_state(spa, tx); 315 316 /* 317 * All blocks that we need to read the most recent mapping must be 318 * stored on concrete vdevs. Therefore, we must dirty anything that 319 * is read before spa_remove_init(). Specifically, the 320 * spa_config_object. (Note that although we already modified the 321 * spa_config_object in spa_sync_removing_state, that may not have 322 * modified all blocks of the object.) 323 */ 324 dmu_object_info_t doi; 325 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); 326 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { 327 dmu_buf_t *dbuf; 328 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, 329 offset, FTAG, &dbuf, 0)); 330 dmu_buf_will_dirty(dbuf, tx); 331 offset += dbuf->db_size; 332 dmu_buf_rele(dbuf, FTAG); 333 } 334 335 /* 336 * Now that we've allocated the im_object, dirty the vdev to ensure 337 * that the object gets written to the config on disk. 338 */ 339 vdev_config_dirty(vd); 340 341 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu " 342 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx), 343 vic->vic_mapping_object); 344 345 spa_history_log_internal(spa, "vdev remove started", tx, 346 "%s vdev %llu %s", spa_name(spa), vd->vdev_id, 347 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 348 /* 349 * Setting spa_vdev_removal causes subsequent frees to call 350 * free_from_removing_vdev(). Note that we don't need any locking 351 * because we are the sync thread, and metaslab_free_impl() is only 352 * called from syncing context (potentially from a zio taskq thread, 353 * but in any case only when there are outstanding free i/os, which 354 * there are not). 355 */ 356 ASSERT3P(spa->spa_vdev_removal, ==, NULL); 357 spa->spa_vdev_removal = svr; 358 svr->svr_thread = thread_create(NULL, 0, 359 spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri); 360 } 361 362 /* 363 * When we are opening a pool, we must read the mapping for each 364 * indirect vdev in order from most recently removed to least 365 * recently removed. We do this because the blocks for the mapping 366 * of older indirect vdevs may be stored on more recently removed vdevs. 367 * In order to read each indirect mapping object, we must have 368 * initialized all more recently removed vdevs. 369 */ 370 int 371 spa_remove_init(spa_t *spa) 372 { 373 int error; 374 375 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, 376 DMU_POOL_DIRECTORY_OBJECT, 377 DMU_POOL_REMOVING, sizeof (uint64_t), 378 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 379 &spa->spa_removing_phys); 380 381 if (error == ENOENT) { 382 spa->spa_removing_phys.sr_state = DSS_NONE; 383 spa->spa_removing_phys.sr_removing_vdev = -1; 384 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 385 spa->spa_indirect_vdevs_loaded = B_TRUE; 386 return (0); 387 } else if (error != 0) { 388 return (error); 389 } 390 391 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { 392 /* 393 * We are currently removing a vdev. Create and 394 * initialize a spa_vdev_removal_t from the bonus 395 * buffer of the removing vdevs vdev_im_object, and 396 * initialize its partial mapping. 397 */ 398 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 399 vdev_t *vd = vdev_lookup_top(spa, 400 spa->spa_removing_phys.sr_removing_vdev); 401 402 if (vd == NULL) { 403 spa_config_exit(spa, SCL_STATE, FTAG); 404 return (EINVAL); 405 } 406 407 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 408 409 ASSERT(vdev_is_concrete(vd)); 410 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); 411 ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id); 412 ASSERT(vd->vdev_removing); 413 414 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 415 spa->spa_meta_objset, vic->vic_mapping_object); 416 vd->vdev_indirect_births = vdev_indirect_births_open( 417 spa->spa_meta_objset, vic->vic_births_object); 418 spa_config_exit(spa, SCL_STATE, FTAG); 419 420 spa->spa_vdev_removal = svr; 421 } 422 423 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 424 uint64_t indirect_vdev_id = 425 spa->spa_removing_phys.sr_prev_indirect_vdev; 426 while (indirect_vdev_id != UINT64_MAX) { 427 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); 428 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 429 430 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 431 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 432 spa->spa_meta_objset, vic->vic_mapping_object); 433 vd->vdev_indirect_births = vdev_indirect_births_open( 434 spa->spa_meta_objset, vic->vic_births_object); 435 436 indirect_vdev_id = vic->vic_prev_indirect_vdev; 437 } 438 spa_config_exit(spa, SCL_STATE, FTAG); 439 440 /* 441 * Now that we've loaded all the indirect mappings, we can allow 442 * reads from other blocks (e.g. via predictive prefetch). 443 */ 444 spa->spa_indirect_vdevs_loaded = B_TRUE; 445 return (0); 446 } 447 448 void 449 spa_restart_removal(spa_t *spa) 450 { 451 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 452 453 if (svr == NULL) 454 return; 455 456 /* 457 * In general when this function is called there is no 458 * removal thread running. The only scenario where this 459 * is not true is during spa_import() where this function 460 * is called twice [once from spa_import_impl() and 461 * spa_async_resume()]. Thus, in the scenario where we 462 * import a pool that has an ongoing removal we don't 463 * want to spawn a second thread. 464 */ 465 if (svr->svr_thread != NULL) 466 return; 467 468 if (!spa_writeable(spa)) 469 return; 470 471 zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id); 472 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa, 473 0, &p0, TS_RUN, minclsyspri); 474 } 475 476 /* 477 * Process freeing from a device which is in the middle of being removed. 478 * We must handle this carefully so that we attempt to copy freed data, 479 * and we correctly free already-copied data. 480 */ 481 void 482 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size) 483 { 484 spa_t *spa = vd->vdev_spa; 485 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 486 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 487 uint64_t txg = spa_syncing_txg(spa); 488 uint64_t max_offset_yet = 0; 489 490 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 491 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, 492 vdev_indirect_mapping_object(vim)); 493 ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id); 494 495 mutex_enter(&svr->svr_lock); 496 497 /* 498 * Remove the segment from the removing vdev's spacemap. This 499 * ensures that we will not attempt to copy this space (if the 500 * removal thread has not yet visited it), and also ensures 501 * that we know what is actually allocated on the new vdevs 502 * (needed if we cancel the removal). 503 * 504 * Note: we must do the metaslab_free_concrete() with the svr_lock 505 * held, so that the remove_thread can not load this metaslab and then 506 * visit this offset between the time that we metaslab_free_concrete() 507 * and when we check to see if it has been visited. 508 * 509 * Note: The checkpoint flag is set to false as having/taking 510 * a checkpoint and removing a device can't happen at the same 511 * time. 512 */ 513 ASSERT(!spa_has_checkpoint(spa)); 514 metaslab_free_concrete(vd, offset, size, B_FALSE); 515 516 uint64_t synced_size = 0; 517 uint64_t synced_offset = 0; 518 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); 519 if (offset < max_offset_synced) { 520 /* 521 * The mapping for this offset is already on disk. 522 * Free from the new location. 523 * 524 * Note that we use svr_max_synced_offset because it is 525 * updated atomically with respect to the in-core mapping. 526 * By contrast, vim_max_offset is not. 527 * 528 * This block may be split between a synced entry and an 529 * in-flight or unvisited entry. Only process the synced 530 * portion of it here. 531 */ 532 synced_size = MIN(size, max_offset_synced - offset); 533 synced_offset = offset; 534 535 ASSERT3U(max_offset_yet, <=, max_offset_synced); 536 max_offset_yet = max_offset_synced; 537 538 DTRACE_PROBE3(remove__free__synced, 539 spa_t *, spa, 540 uint64_t, offset, 541 uint64_t, synced_size); 542 543 size -= synced_size; 544 offset += synced_size; 545 } 546 547 /* 548 * Look at all in-flight txgs starting from the currently syncing one 549 * and see if a section of this free is being copied. By starting from 550 * this txg and iterating forward, we might find that this region 551 * was copied in two different txgs and handle it appropriately. 552 */ 553 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { 554 int txgoff = (txg + i) & TXG_MASK; 555 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { 556 /* 557 * The mapping for this offset is in flight, and 558 * will be synced in txg+i. 559 */ 560 uint64_t inflight_size = MIN(size, 561 svr->svr_max_offset_to_sync[txgoff] - offset); 562 563 DTRACE_PROBE4(remove__free__inflight, 564 spa_t *, spa, 565 uint64_t, offset, 566 uint64_t, inflight_size, 567 uint64_t, txg + i); 568 569 /* 570 * We copy data in order of increasing offset. 571 * Therefore the max_offset_to_sync[] must increase 572 * (or be zero, indicating that nothing is being 573 * copied in that txg). 574 */ 575 if (svr->svr_max_offset_to_sync[txgoff] != 0) { 576 ASSERT3U(svr->svr_max_offset_to_sync[txgoff], 577 >=, max_offset_yet); 578 max_offset_yet = 579 svr->svr_max_offset_to_sync[txgoff]; 580 } 581 582 /* 583 * We've already committed to copying this segment: 584 * we have allocated space elsewhere in the pool for 585 * it and have an IO outstanding to copy the data. We 586 * cannot free the space before the copy has 587 * completed, or else the copy IO might overwrite any 588 * new data. To free that space, we record the 589 * segment in the appropriate svr_frees tree and free 590 * the mapped space later, in the txg where we have 591 * completed the copy and synced the mapping (see 592 * vdev_mapping_sync). 593 */ 594 range_tree_add(svr->svr_frees[txgoff], 595 offset, inflight_size); 596 size -= inflight_size; 597 offset += inflight_size; 598 599 /* 600 * This space is already accounted for as being 601 * done, because it is being copied in txg+i. 602 * However, if i!=0, then it is being copied in 603 * a future txg. If we crash after this txg 604 * syncs but before txg+i syncs, then the space 605 * will be free. Therefore we must account 606 * for the space being done in *this* txg 607 * (when it is freed) rather than the future txg 608 * (when it will be copied). 609 */ 610 ASSERT3U(svr->svr_bytes_done[txgoff], >=, 611 inflight_size); 612 svr->svr_bytes_done[txgoff] -= inflight_size; 613 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; 614 } 615 } 616 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); 617 618 if (size > 0) { 619 /* 620 * The copy thread has not yet visited this offset. Ensure 621 * that it doesn't. 622 */ 623 624 DTRACE_PROBE3(remove__free__unvisited, 625 spa_t *, spa, 626 uint64_t, offset, 627 uint64_t, size); 628 629 if (svr->svr_allocd_segs != NULL) 630 range_tree_clear(svr->svr_allocd_segs, offset, size); 631 632 /* 633 * Since we now do not need to copy this data, for 634 * accounting purposes we have done our job and can count 635 * it as completed. 636 */ 637 svr->svr_bytes_done[txg & TXG_MASK] += size; 638 } 639 mutex_exit(&svr->svr_lock); 640 641 /* 642 * Now that we have dropped svr_lock, process the synced portion 643 * of this free. 644 */ 645 if (synced_size > 0) { 646 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size); 647 648 /* 649 * Note: this can only be called from syncing context, 650 * and the vdev_indirect_mapping is only changed from the 651 * sync thread, so we don't need svr_lock while doing 652 * metaslab_free_impl_cb. 653 */ 654 boolean_t checkpoint = B_FALSE; 655 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, 656 metaslab_free_impl_cb, &checkpoint); 657 } 658 } 659 660 /* 661 * Stop an active removal and update the spa_removing phys. 662 */ 663 static void 664 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) 665 { 666 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 667 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); 668 669 /* Ensure the removal thread has completed before we free the svr. */ 670 spa_vdev_remove_suspend(spa); 671 672 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); 673 674 if (state == DSS_FINISHED) { 675 spa_removing_phys_t *srp = &spa->spa_removing_phys; 676 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 677 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 678 679 if (srp->sr_prev_indirect_vdev != UINT64_MAX) { 680 vdev_t *pvd = vdev_lookup_top(spa, 681 srp->sr_prev_indirect_vdev); 682 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); 683 } 684 685 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; 686 srp->sr_prev_indirect_vdev = vd->vdev_id; 687 } 688 spa->spa_removing_phys.sr_state = state; 689 spa->spa_removing_phys.sr_end_time = gethrestime_sec(); 690 691 spa->spa_vdev_removal = NULL; 692 spa_vdev_removal_destroy(svr); 693 694 spa_sync_removing_state(spa, tx); 695 696 vdev_config_dirty(spa->spa_root_vdev); 697 } 698 699 static void 700 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) 701 { 702 vdev_t *vd = arg; 703 vdev_indirect_mark_obsolete(vd, offset, size); 704 boolean_t checkpoint = B_FALSE; 705 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 706 metaslab_free_impl_cb, &checkpoint); 707 } 708 709 /* 710 * On behalf of the removal thread, syncs an incremental bit more of 711 * the indirect mapping to disk and updates the in-memory mapping. 712 * Called as a sync task in every txg that the removal thread makes progress. 713 */ 714 static void 715 vdev_mapping_sync(void *arg, dmu_tx_t *tx) 716 { 717 spa_vdev_removal_t *svr = arg; 718 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 719 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 720 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 721 uint64_t txg = dmu_tx_get_txg(tx); 722 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 723 724 ASSERT(vic->vic_mapping_object != 0); 725 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 726 727 vdev_indirect_mapping_add_entries(vim, 728 &svr->svr_new_segments[txg & TXG_MASK], tx); 729 vdev_indirect_births_add_entry(vd->vdev_indirect_births, 730 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); 731 732 /* 733 * Free the copied data for anything that was freed while the 734 * mapping entries were in flight. 735 */ 736 mutex_enter(&svr->svr_lock); 737 range_tree_vacate(svr->svr_frees[txg & TXG_MASK], 738 free_mapped_segment_cb, vd); 739 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, 740 vdev_indirect_mapping_max_offset(vim)); 741 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; 742 mutex_exit(&svr->svr_lock); 743 744 spa_sync_removing_state(spa, tx); 745 } 746 747 typedef struct vdev_copy_segment_arg { 748 spa_t *vcsa_spa; 749 dva_t *vcsa_dest_dva; 750 uint64_t vcsa_txg; 751 range_tree_t *vcsa_obsolete_segs; 752 } vdev_copy_segment_arg_t; 753 754 static void 755 unalloc_seg(void *arg, uint64_t start, uint64_t size) 756 { 757 vdev_copy_segment_arg_t *vcsa = arg; 758 spa_t *spa = vcsa->vcsa_spa; 759 blkptr_t bp = { 0 }; 760 761 BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL); 762 BP_SET_LSIZE(&bp, size); 763 BP_SET_PSIZE(&bp, size); 764 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 765 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF); 766 BP_SET_TYPE(&bp, DMU_OT_NONE); 767 BP_SET_LEVEL(&bp, 0); 768 BP_SET_DEDUP(&bp, 0); 769 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER); 770 771 DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva)); 772 DVA_SET_OFFSET(&bp.blk_dva[0], 773 DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start); 774 DVA_SET_ASIZE(&bp.blk_dva[0], size); 775 776 zio_free(spa, vcsa->vcsa_txg, &bp); 777 } 778 779 /* 780 * All reads and writes associated with a call to spa_vdev_copy_segment() 781 * are done. 782 */ 783 static void 784 spa_vdev_copy_segment_done(zio_t *zio) 785 { 786 vdev_copy_segment_arg_t *vcsa = zio->io_private; 787 788 range_tree_vacate(vcsa->vcsa_obsolete_segs, 789 unalloc_seg, vcsa); 790 range_tree_destroy(vcsa->vcsa_obsolete_segs); 791 kmem_free(vcsa, sizeof (*vcsa)); 792 793 spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); 794 } 795 796 /* 797 * The write of the new location is done. 798 */ 799 static void 800 spa_vdev_copy_segment_write_done(zio_t *zio) 801 { 802 vdev_copy_arg_t *vca = zio->io_private; 803 804 abd_free(zio->io_abd); 805 806 mutex_enter(&vca->vca_lock); 807 vca->vca_outstanding_bytes -= zio->io_size; 808 cv_signal(&vca->vca_cv); 809 mutex_exit(&vca->vca_lock); 810 } 811 812 /* 813 * The read of the old location is done. The parent zio is the write to 814 * the new location. Allow it to start. 815 */ 816 static void 817 spa_vdev_copy_segment_read_done(zio_t *zio) 818 { 819 zio_nowait(zio_unique_parent(zio)); 820 } 821 822 /* 823 * If the old and new vdevs are mirrors, we will read both sides of the old 824 * mirror, and write each copy to the corresponding side of the new mirror. 825 * If the old and new vdevs have a different number of children, we will do 826 * this as best as possible. Since we aren't verifying checksums, this 827 * ensures that as long as there's a good copy of the data, we'll have a 828 * good copy after the removal, even if there's silent damage to one side 829 * of the mirror. If we're removing a mirror that has some silent damage, 830 * we'll have exactly the same damage in the new location (assuming that 831 * the new location is also a mirror). 832 * 833 * We accomplish this by creating a tree of zio_t's, with as many writes as 834 * there are "children" of the new vdev (a non-redundant vdev counts as one 835 * child, a 2-way mirror has 2 children, etc). Each write has an associated 836 * read from a child of the old vdev. Typically there will be the same 837 * number of children of the old and new vdevs. However, if there are more 838 * children of the new vdev, some child(ren) of the old vdev will be issued 839 * multiple reads. If there are more children of the old vdev, some copies 840 * will be dropped. 841 * 842 * For example, the tree of zio_t's for a 2-way mirror is: 843 * 844 * null 845 * / \ 846 * write(new vdev, child 0) write(new vdev, child 1) 847 * | | 848 * read(old vdev, child 0) read(old vdev, child 1) 849 * 850 * Child zio's complete before their parents complete. However, zio's 851 * created with zio_vdev_child_io() may be issued before their children 852 * complete. In this case we need to make sure that the children (reads) 853 * complete before the parents (writes) are *issued*. We do this by not 854 * calling zio_nowait() on each write until its corresponding read has 855 * completed. 856 * 857 * The spa_config_lock must be held while zio's created by 858 * zio_vdev_child_io() are in progress, to ensure that the vdev tree does 859 * not change (e.g. due to a concurrent "zpool attach/detach"). The "null" 860 * zio is needed to release the spa_config_lock after all the reads and 861 * writes complete. (Note that we can't grab the config lock for each read, 862 * because it is not reentrant - we could deadlock with a thread waiting 863 * for a write lock.) 864 */ 865 static void 866 spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio, 867 vdev_t *source_vd, uint64_t source_offset, 868 vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size) 869 { 870 ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0); 871 872 mutex_enter(&vca->vca_lock); 873 vca->vca_outstanding_bytes += size; 874 mutex_exit(&vca->vca_lock); 875 876 abd_t *abd = abd_alloc_for_io(size, B_FALSE); 877 878 vdev_t *source_child_vd; 879 if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) { 880 /* 881 * Source and dest are both mirrors. Copy from the same 882 * child id as we are copying to (wrapping around if there 883 * are more dest children than source children). 884 */ 885 source_child_vd = 886 source_vd->vdev_child[dest_id % source_vd->vdev_children]; 887 } else { 888 source_child_vd = source_vd; 889 } 890 891 zio_t *write_zio = zio_vdev_child_io(nzio, NULL, 892 dest_child_vd, dest_offset, abd, size, 893 ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL, 894 ZIO_FLAG_CANFAIL, 895 spa_vdev_copy_segment_write_done, vca); 896 897 zio_nowait(zio_vdev_child_io(write_zio, NULL, 898 source_child_vd, source_offset, abd, size, 899 ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, 900 ZIO_FLAG_CANFAIL, 901 spa_vdev_copy_segment_read_done, vca)); 902 } 903 904 /* 905 * Allocate a new location for this segment, and create the zio_t's to 906 * read from the old location and write to the new location. 907 */ 908 static int 909 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, 910 uint64_t maxalloc, uint64_t txg, 911 vdev_copy_arg_t *vca, zio_alloc_list_t *zal) 912 { 913 metaslab_group_t *mg = vd->vdev_mg; 914 spa_t *spa = vd->vdev_spa; 915 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 916 vdev_indirect_mapping_entry_t *entry; 917 dva_t dst = { 0 }; 918 uint64_t start = range_tree_min(segs); 919 920 ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE); 921 922 uint64_t size = range_tree_span(segs); 923 if (range_tree_span(segs) > maxalloc) { 924 /* 925 * We can't allocate all the segments. Prefer to end 926 * the allocation at the end of a segment, thus avoiding 927 * additional split blocks. 928 */ 929 range_seg_t search; 930 avl_index_t where; 931 search.rs_start = start + maxalloc; 932 search.rs_end = search.rs_start; 933 range_seg_t *rs = avl_find(&segs->rt_root, &search, &where); 934 if (rs == NULL) { 935 rs = avl_nearest(&segs->rt_root, where, AVL_BEFORE); 936 } else { 937 rs = AVL_PREV(&segs->rt_root, rs); 938 } 939 if (rs != NULL) { 940 size = rs->rs_end - start; 941 } else { 942 /* 943 * There are no segments that end before maxalloc. 944 * I.e. the first segment is larger than maxalloc, 945 * so we must split it. 946 */ 947 size = maxalloc; 948 } 949 } 950 ASSERT3U(size, <=, maxalloc); 951 952 /* 953 * An allocation class might not have any remaining vdevs or space 954 */ 955 metaslab_class_t *mc = mg->mg_class; 956 if (mc != spa_normal_class(spa) && mc->mc_groups <= 1) 957 mc = spa_normal_class(spa); 958 int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0, 959 zal, 0); 960 if (error == ENOSPC && mc != spa_normal_class(spa)) { 961 error = metaslab_alloc_dva(spa, spa_normal_class(spa), size, 962 &dst, 0, NULL, txg, 0, zal, 0); 963 } 964 if (error != 0) 965 return (error); 966 967 /* 968 * Determine the ranges that are not actually needed. Offsets are 969 * relative to the start of the range to be copied (i.e. relative to the 970 * local variable "start"). 971 */ 972 range_tree_t *obsolete_segs = range_tree_create(NULL, NULL); 973 974 range_seg_t *rs = avl_first(&segs->rt_root); 975 ASSERT3U(rs->rs_start, ==, start); 976 uint64_t prev_seg_end = rs->rs_end; 977 while ((rs = AVL_NEXT(&segs->rt_root, rs)) != NULL) { 978 if (rs->rs_start >= start + size) { 979 break; 980 } else { 981 range_tree_add(obsolete_segs, 982 prev_seg_end - start, 983 rs->rs_start - prev_seg_end); 984 } 985 prev_seg_end = rs->rs_end; 986 } 987 /* We don't end in the middle of an obsolete range */ 988 ASSERT3U(start + size, <=, prev_seg_end); 989 990 range_tree_clear(segs, start, size); 991 992 /* 993 * We can't have any padding of the allocated size, otherwise we will 994 * misunderstand what's allocated, and the size of the mapping. 995 * The caller ensures this will be true by passing in a size that is 996 * aligned to the worst (highest) ashift in the pool. 997 */ 998 ASSERT3U(DVA_GET_ASIZE(&dst), ==, size); 999 1000 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); 1001 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); 1002 entry->vime_mapping.vimep_dst = dst; 1003 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 1004 entry->vime_obsolete_count = range_tree_space(obsolete_segs); 1005 } 1006 1007 vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP); 1008 vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst; 1009 vcsa->vcsa_obsolete_segs = obsolete_segs; 1010 vcsa->vcsa_spa = spa; 1011 vcsa->vcsa_txg = txg; 1012 1013 /* 1014 * See comment before spa_vdev_copy_one_child(). 1015 */ 1016 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 1017 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, 1018 spa_vdev_copy_segment_done, vcsa, 0); 1019 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst)); 1020 if (dest_vd->vdev_ops == &vdev_mirror_ops) { 1021 for (int i = 0; i < dest_vd->vdev_children; i++) { 1022 vdev_t *child = dest_vd->vdev_child[i]; 1023 spa_vdev_copy_one_child(vca, nzio, vd, start, 1024 child, DVA_GET_OFFSET(&dst), i, size); 1025 } 1026 } else { 1027 spa_vdev_copy_one_child(vca, nzio, vd, start, 1028 dest_vd, DVA_GET_OFFSET(&dst), -1, size); 1029 } 1030 zio_nowait(nzio); 1031 1032 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); 1033 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); 1034 vdev_dirty(vd, 0, NULL, txg); 1035 1036 return (0); 1037 } 1038 1039 /* 1040 * Complete the removal of a toplevel vdev. This is called as a 1041 * synctask in the same txg that we will sync out the new config (to the 1042 * MOS object) which indicates that this vdev is indirect. 1043 */ 1044 static void 1045 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) 1046 { 1047 spa_vdev_removal_t *svr = arg; 1048 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1049 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1050 1051 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 1052 1053 for (int i = 0; i < TXG_SIZE; i++) { 1054 ASSERT0(svr->svr_bytes_done[i]); 1055 } 1056 1057 ASSERT3U(spa->spa_removing_phys.sr_copied, ==, 1058 spa->spa_removing_phys.sr_to_copy); 1059 1060 vdev_destroy_spacemaps(vd, tx); 1061 1062 /* destroy leaf zaps, if any */ 1063 ASSERT3P(svr->svr_zaplist, !=, NULL); 1064 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); 1065 pair != NULL; 1066 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { 1067 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); 1068 } 1069 fnvlist_free(svr->svr_zaplist); 1070 1071 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); 1072 /* vd->vdev_path is not available here */ 1073 spa_history_log_internal(spa, "vdev remove completed", tx, 1074 "%s vdev %llu", spa_name(spa), vd->vdev_id); 1075 } 1076 1077 static void 1078 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) 1079 { 1080 ASSERT3P(zlist, !=, NULL); 1081 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 1082 1083 if (vd->vdev_leaf_zap != 0) { 1084 char zkey[32]; 1085 (void) snprintf(zkey, sizeof (zkey), "%s-%"PRIu64, 1086 VDEV_REMOVAL_ZAP_OBJS, vd->vdev_leaf_zap); 1087 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); 1088 } 1089 1090 for (uint64_t id = 0; id < vd->vdev_children; id++) { 1091 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); 1092 } 1093 } 1094 1095 static void 1096 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) 1097 { 1098 vdev_t *ivd; 1099 dmu_tx_t *tx; 1100 spa_t *spa = vd->vdev_spa; 1101 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1102 1103 /* 1104 * First, build a list of leaf zaps to be destroyed. 1105 * This is passed to the sync context thread, 1106 * which does the actual unlinking. 1107 */ 1108 svr->svr_zaplist = fnvlist_alloc(); 1109 vdev_remove_enlist_zaps(vd, svr->svr_zaplist); 1110 1111 ivd = vdev_add_parent(vd, &vdev_indirect_ops); 1112 ivd->vdev_removing = 0; 1113 1114 vd->vdev_leaf_zap = 0; 1115 1116 vdev_remove_child(ivd, vd); 1117 vdev_compact_children(ivd); 1118 1119 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 1120 1121 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 1122 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr, 1123 0, ZFS_SPACE_CHECK_NONE, tx); 1124 dmu_tx_commit(tx); 1125 1126 /* 1127 * Indicate that this thread has exited. 1128 * After this, we can not use svr. 1129 */ 1130 mutex_enter(&svr->svr_lock); 1131 svr->svr_thread = NULL; 1132 cv_broadcast(&svr->svr_cv); 1133 mutex_exit(&svr->svr_lock); 1134 } 1135 1136 /* 1137 * Complete the removal of a toplevel vdev. This is called in open 1138 * context by the removal thread after we have copied all vdev's data. 1139 */ 1140 static void 1141 vdev_remove_complete(spa_t *spa) 1142 { 1143 uint64_t txg; 1144 1145 /* 1146 * Wait for any deferred frees to be synced before we call 1147 * vdev_metaslab_fini() 1148 */ 1149 txg_wait_synced(spa->spa_dsl_pool, 0); 1150 txg = spa_vdev_enter(spa); 1151 vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1152 ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 1153 1154 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1155 ESC_ZFS_VDEV_REMOVE_DEV); 1156 1157 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", 1158 vd->vdev_id, txg); 1159 1160 /* 1161 * Discard allocation state. 1162 */ 1163 if (vd->vdev_mg != NULL) { 1164 vdev_metaslab_fini(vd); 1165 metaslab_group_destroy(vd->vdev_mg); 1166 vd->vdev_mg = NULL; 1167 } 1168 ASSERT0(vd->vdev_stat.vs_space); 1169 ASSERT0(vd->vdev_stat.vs_dspace); 1170 1171 vdev_remove_replace_with_indirect(vd, txg); 1172 1173 /* 1174 * We now release the locks, allowing spa_sync to run and finish the 1175 * removal via vdev_remove_complete_sync in syncing context. 1176 * 1177 * Note that we hold on to the vdev_t that has been replaced. Since 1178 * it isn't part of the vdev tree any longer, it can't be concurrently 1179 * manipulated, even while we don't have the config lock. 1180 */ 1181 (void) spa_vdev_exit(spa, NULL, txg, 0); 1182 1183 /* 1184 * Top ZAP should have been transferred to the indirect vdev in 1185 * vdev_remove_replace_with_indirect. 1186 */ 1187 ASSERT0(vd->vdev_top_zap); 1188 1189 /* 1190 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. 1191 */ 1192 ASSERT0(vd->vdev_leaf_zap); 1193 1194 txg = spa_vdev_enter(spa); 1195 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1196 /* 1197 * Request to update the config and the config cachefile. 1198 */ 1199 vdev_config_dirty(spa->spa_root_vdev); 1200 (void) spa_vdev_exit(spa, vd, txg, 0); 1201 1202 spa_event_post(ev); 1203 } 1204 1205 /* 1206 * Evacuates a segment of size at most max_alloc from the vdev 1207 * via repeated calls to spa_vdev_copy_segment. If an allocation 1208 * fails, the pool is probably too fragmented to handle such a 1209 * large size, so decrease max_alloc so that the caller will not try 1210 * this size again this txg. 1211 */ 1212 static void 1213 spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, 1214 uint64_t *max_alloc, dmu_tx_t *tx) 1215 { 1216 uint64_t txg = dmu_tx_get_txg(tx); 1217 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1218 1219 mutex_enter(&svr->svr_lock); 1220 1221 /* 1222 * Determine how big of a chunk to copy. We can allocate up 1223 * to max_alloc bytes, and we can span up to vdev_removal_max_span 1224 * bytes of unallocated space at a time. "segs" will track the 1225 * allocated segments that we are copying. We may also be copying 1226 * free segments (of up to vdev_removal_max_span bytes). 1227 */ 1228 range_tree_t *segs = range_tree_create(NULL, NULL); 1229 for (;;) { 1230 range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root); 1231 if (rs == NULL) 1232 break; 1233 1234 uint64_t seg_length; 1235 1236 if (range_tree_is_empty(segs)) { 1237 /* need to truncate the first seg based on max_alloc */ 1238 seg_length = 1239 MIN(rs->rs_end - rs->rs_start, *max_alloc); 1240 } else { 1241 if (rs->rs_start - range_tree_max(segs) > 1242 vdev_removal_max_span) { 1243 /* 1244 * Including this segment would cause us to 1245 * copy a larger unneeded chunk than is allowed. 1246 */ 1247 break; 1248 } else if (rs->rs_end - range_tree_min(segs) > 1249 *max_alloc) { 1250 /* 1251 * This additional segment would extend past 1252 * max_alloc. Rather than splitting this 1253 * segment, leave it for the next mapping. 1254 */ 1255 break; 1256 } else { 1257 seg_length = rs->rs_end - rs->rs_start; 1258 } 1259 } 1260 1261 range_tree_add(segs, rs->rs_start, seg_length); 1262 range_tree_remove(svr->svr_allocd_segs, 1263 rs->rs_start, seg_length); 1264 } 1265 1266 if (range_tree_is_empty(segs)) { 1267 mutex_exit(&svr->svr_lock); 1268 range_tree_destroy(segs); 1269 return; 1270 } 1271 1272 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { 1273 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, 1274 svr, 0, ZFS_SPACE_CHECK_NONE, tx); 1275 } 1276 1277 svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs); 1278 1279 /* 1280 * Note: this is the amount of *allocated* space 1281 * that we are taking care of each txg. 1282 */ 1283 svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs); 1284 1285 mutex_exit(&svr->svr_lock); 1286 1287 zio_alloc_list_t zal; 1288 metaslab_trace_init(&zal); 1289 uint64_t thismax = SPA_MAXBLOCKSIZE; 1290 while (!range_tree_is_empty(segs)) { 1291 int error = spa_vdev_copy_segment(vd, 1292 segs, thismax, txg, vca, &zal); 1293 1294 if (error == ENOSPC) { 1295 /* 1296 * Cut our segment in half, and don't try this 1297 * segment size again this txg. Note that the 1298 * allocation size must be aligned to the highest 1299 * ashift in the pool, so that the allocation will 1300 * not be padded out to a multiple of the ashift, 1301 * which could cause us to think that this mapping 1302 * is larger than we intended. 1303 */ 1304 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); 1305 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); 1306 uint64_t attempted = 1307 MIN(range_tree_span(segs), thismax); 1308 thismax = P2ROUNDUP(attempted / 2, 1309 1 << spa->spa_max_ashift); 1310 /* 1311 * The minimum-size allocation can not fail. 1312 */ 1313 ASSERT3U(attempted, >, 1 << spa->spa_max_ashift); 1314 *max_alloc = attempted - (1 << spa->spa_max_ashift); 1315 } else { 1316 ASSERT0(error); 1317 1318 /* 1319 * We've performed an allocation, so reset the 1320 * alloc trace list. 1321 */ 1322 metaslab_trace_fini(&zal); 1323 metaslab_trace_init(&zal); 1324 } 1325 } 1326 metaslab_trace_fini(&zal); 1327 range_tree_destroy(segs); 1328 } 1329 1330 /* 1331 * The removal thread operates in open context. It iterates over all 1332 * allocated space in the vdev, by loading each metaslab's spacemap. 1333 * For each contiguous segment of allocated space (capping the segment 1334 * size at SPA_MAXBLOCKSIZE), we: 1335 * - Allocate space for it on another vdev. 1336 * - Create a new mapping from the old location to the new location 1337 * (as a record in svr_new_segments). 1338 * - Initiate a logical read zio to get the data off the removing disk. 1339 * - In the read zio's done callback, initiate a logical write zio to 1340 * write it to the new vdev. 1341 * Note that all of this will take effect when a particular TXG syncs. 1342 * The sync thread ensures that all the phys reads and writes for the syncing 1343 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk 1344 * (see vdev_mapping_sync()). 1345 */ 1346 static void 1347 spa_vdev_remove_thread(void *arg) 1348 { 1349 spa_t *spa = arg; 1350 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1351 vdev_copy_arg_t vca; 1352 uint64_t max_alloc = zfs_remove_max_segment; 1353 uint64_t last_txg = 0; 1354 1355 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1356 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1357 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1358 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); 1359 1360 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); 1361 ASSERT(vdev_is_concrete(vd)); 1362 ASSERT(vd->vdev_removing); 1363 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 1364 ASSERT(vim != NULL); 1365 1366 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); 1367 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); 1368 vca.vca_outstanding_bytes = 0; 1369 1370 mutex_enter(&svr->svr_lock); 1371 1372 /* 1373 * Start from vim_max_offset so we pick up where we left off 1374 * if we are restarting the removal after opening the pool. 1375 */ 1376 uint64_t msi; 1377 for (msi = start_offset >> vd->vdev_ms_shift; 1378 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { 1379 metaslab_t *msp = vd->vdev_ms[msi]; 1380 ASSERT3U(msi, <=, vd->vdev_ms_count); 1381 1382 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1383 1384 mutex_enter(&msp->ms_sync_lock); 1385 mutex_enter(&msp->ms_lock); 1386 1387 /* 1388 * Assert nothing in flight -- ms_*tree is empty. 1389 */ 1390 for (int i = 0; i < TXG_SIZE; i++) { 1391 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1392 } 1393 1394 /* 1395 * If the metaslab has ever been allocated from (ms_sm!=NULL), 1396 * read the allocated segments from the space map object 1397 * into svr_allocd_segs. Since we do this while holding 1398 * svr_lock and ms_sync_lock, concurrent frees (which 1399 * would have modified the space map) will wait for us 1400 * to finish loading the spacemap, and then take the 1401 * appropriate action (see free_from_removing_vdev()). 1402 */ 1403 if (msp->ms_sm != NULL) { 1404 space_map_t *sm = NULL; 1405 1406 /* 1407 * We have to open a new space map here, because 1408 * ms_sm's sm_length and sm_alloc may not reflect 1409 * what's in the object contents, if we are in between 1410 * metaslab_sync() and metaslab_sync_done(). 1411 */ 1412 VERIFY0(space_map_open(&sm, 1413 spa->spa_dsl_pool->dp_meta_objset, 1414 msp->ms_sm->sm_object, msp->ms_sm->sm_start, 1415 msp->ms_sm->sm_size, msp->ms_sm->sm_shift)); 1416 space_map_update(sm); 1417 VERIFY0(space_map_load(sm, svr->svr_allocd_segs, 1418 SM_ALLOC)); 1419 space_map_close(sm); 1420 1421 range_tree_walk(msp->ms_freeing, 1422 range_tree_remove, svr->svr_allocd_segs); 1423 1424 /* 1425 * When we are resuming from a paused removal (i.e. 1426 * when importing a pool with a removal in progress), 1427 * discard any state that we have already processed. 1428 */ 1429 range_tree_clear(svr->svr_allocd_segs, 0, start_offset); 1430 } 1431 mutex_exit(&msp->ms_lock); 1432 mutex_exit(&msp->ms_sync_lock); 1433 1434 vca.vca_msp = msp; 1435 zfs_dbgmsg("copying %llu segments for metaslab %llu", 1436 avl_numnodes(&svr->svr_allocd_segs->rt_root), 1437 msp->ms_id); 1438 1439 while (!svr->svr_thread_exit && 1440 !range_tree_is_empty(svr->svr_allocd_segs)) { 1441 1442 mutex_exit(&svr->svr_lock); 1443 1444 /* 1445 * We need to periodically drop the config lock so that 1446 * writers can get in. Additionally, we can't wait 1447 * for a txg to sync while holding a config lock 1448 * (since a waiting writer could cause a 3-way deadlock 1449 * with the sync thread, which also gets a config 1450 * lock for reader). So we can't hold the config lock 1451 * while calling dmu_tx_assign(). 1452 */ 1453 spa_config_exit(spa, SCL_CONFIG, FTAG); 1454 1455 /* 1456 * This delay will pause the removal around the point 1457 * specified by zfs_remove_max_bytes_pause. We do this 1458 * solely from the test suite or during debugging. 1459 */ 1460 uint64_t bytes_copied = 1461 spa->spa_removing_phys.sr_copied; 1462 for (int i = 0; i < TXG_SIZE; i++) 1463 bytes_copied += svr->svr_bytes_done[i]; 1464 while (zfs_remove_max_bytes_pause <= bytes_copied && 1465 !svr->svr_thread_exit) 1466 delay(hz); 1467 1468 mutex_enter(&vca.vca_lock); 1469 while (vca.vca_outstanding_bytes > 1470 zfs_remove_max_copy_bytes) { 1471 cv_wait(&vca.vca_cv, &vca.vca_lock); 1472 } 1473 mutex_exit(&vca.vca_lock); 1474 1475 dmu_tx_t *tx = 1476 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1477 1478 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1479 uint64_t txg = dmu_tx_get_txg(tx); 1480 1481 /* 1482 * Reacquire the vdev_config lock. The vdev_t 1483 * that we're removing may have changed, e.g. due 1484 * to a vdev_attach or vdev_detach. 1485 */ 1486 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1487 vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1488 1489 if (txg != last_txg) 1490 max_alloc = zfs_remove_max_segment; 1491 last_txg = txg; 1492 1493 spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx); 1494 1495 dmu_tx_commit(tx); 1496 mutex_enter(&svr->svr_lock); 1497 } 1498 } 1499 1500 mutex_exit(&svr->svr_lock); 1501 1502 spa_config_exit(spa, SCL_CONFIG, FTAG); 1503 1504 /* 1505 * Wait for all copies to finish before cleaning up the vca. 1506 */ 1507 txg_wait_synced(spa->spa_dsl_pool, 0); 1508 ASSERT0(vca.vca_outstanding_bytes); 1509 1510 mutex_destroy(&vca.vca_lock); 1511 cv_destroy(&vca.vca_cv); 1512 1513 if (svr->svr_thread_exit) { 1514 mutex_enter(&svr->svr_lock); 1515 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); 1516 svr->svr_thread = NULL; 1517 cv_broadcast(&svr->svr_cv); 1518 mutex_exit(&svr->svr_lock); 1519 } else { 1520 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1521 vdev_remove_complete(spa); 1522 } 1523 } 1524 1525 void 1526 spa_vdev_remove_suspend(spa_t *spa) 1527 { 1528 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1529 1530 if (svr == NULL) 1531 return; 1532 1533 mutex_enter(&svr->svr_lock); 1534 svr->svr_thread_exit = B_TRUE; 1535 while (svr->svr_thread != NULL) 1536 cv_wait(&svr->svr_cv, &svr->svr_lock); 1537 svr->svr_thread_exit = B_FALSE; 1538 mutex_exit(&svr->svr_lock); 1539 } 1540 1541 /* ARGSUSED */ 1542 static int 1543 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) 1544 { 1545 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1546 1547 if (spa->spa_vdev_removal == NULL) 1548 return (ENOTACTIVE); 1549 return (0); 1550 } 1551 1552 /* 1553 * Cancel a removal by freeing all entries from the partial mapping 1554 * and marking the vdev as no longer being removing. 1555 */ 1556 /* ARGSUSED */ 1557 static void 1558 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) 1559 { 1560 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1561 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1562 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1563 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1564 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1565 objset_t *mos = spa->spa_meta_objset; 1566 1567 ASSERT3P(svr->svr_thread, ==, NULL); 1568 1569 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 1570 if (vdev_obsolete_counts_are_precise(vd)) { 1571 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1572 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1573 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); 1574 } 1575 1576 if (vdev_obsolete_sm_object(vd) != 0) { 1577 ASSERT(vd->vdev_obsolete_sm != NULL); 1578 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 1579 space_map_object(vd->vdev_obsolete_sm)); 1580 1581 space_map_free(vd->vdev_obsolete_sm, tx); 1582 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1583 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 1584 space_map_close(vd->vdev_obsolete_sm); 1585 vd->vdev_obsolete_sm = NULL; 1586 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1587 } 1588 for (int i = 0; i < TXG_SIZE; i++) { 1589 ASSERT(list_is_empty(&svr->svr_new_segments[i])); 1590 ASSERT3U(svr->svr_max_offset_to_sync[i], <=, 1591 vdev_indirect_mapping_max_offset(vim)); 1592 } 1593 1594 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 1595 metaslab_t *msp = vd->vdev_ms[msi]; 1596 1597 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 1598 break; 1599 1600 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1601 1602 mutex_enter(&msp->ms_lock); 1603 1604 /* 1605 * Assert nothing in flight -- ms_*tree is empty. 1606 */ 1607 for (int i = 0; i < TXG_SIZE; i++) 1608 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1609 for (int i = 0; i < TXG_DEFER_SIZE; i++) 1610 ASSERT0(range_tree_space(msp->ms_defer[i])); 1611 ASSERT0(range_tree_space(msp->ms_freed)); 1612 1613 if (msp->ms_sm != NULL) { 1614 /* 1615 * Assert that the in-core spacemap has the same 1616 * length as the on-disk one, so we can use the 1617 * existing in-core spacemap to load it from disk. 1618 */ 1619 ASSERT3U(msp->ms_sm->sm_alloc, ==, 1620 msp->ms_sm->sm_phys->smp_alloc); 1621 ASSERT3U(msp->ms_sm->sm_length, ==, 1622 msp->ms_sm->sm_phys->smp_objsize); 1623 1624 mutex_enter(&svr->svr_lock); 1625 VERIFY0(space_map_load(msp->ms_sm, 1626 svr->svr_allocd_segs, SM_ALLOC)); 1627 range_tree_walk(msp->ms_freeing, 1628 range_tree_remove, svr->svr_allocd_segs); 1629 1630 /* 1631 * Clear everything past what has been synced, 1632 * because we have not allocated mappings for it yet. 1633 */ 1634 uint64_t syncd = vdev_indirect_mapping_max_offset(vim); 1635 uint64_t sm_end = msp->ms_sm->sm_start + 1636 msp->ms_sm->sm_size; 1637 if (sm_end > syncd) 1638 range_tree_clear(svr->svr_allocd_segs, 1639 syncd, sm_end - syncd); 1640 1641 mutex_exit(&svr->svr_lock); 1642 } 1643 mutex_exit(&msp->ms_lock); 1644 1645 mutex_enter(&svr->svr_lock); 1646 range_tree_vacate(svr->svr_allocd_segs, 1647 free_mapped_segment_cb, vd); 1648 mutex_exit(&svr->svr_lock); 1649 } 1650 1651 /* 1652 * Note: this must happen after we invoke free_mapped_segment_cb, 1653 * because it adds to the obsolete_segments. 1654 */ 1655 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 1656 1657 ASSERT3U(vic->vic_mapping_object, ==, 1658 vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); 1659 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1660 vd->vdev_indirect_mapping = NULL; 1661 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 1662 vic->vic_mapping_object = 0; 1663 1664 ASSERT3U(vic->vic_births_object, ==, 1665 vdev_indirect_births_object(vd->vdev_indirect_births)); 1666 vdev_indirect_births_close(vd->vdev_indirect_births); 1667 vd->vdev_indirect_births = NULL; 1668 vdev_indirect_births_free(mos, vic->vic_births_object, tx); 1669 vic->vic_births_object = 0; 1670 1671 /* 1672 * We may have processed some frees from the removing vdev in this 1673 * txg, thus increasing svr_bytes_done; discard that here to 1674 * satisfy the assertions in spa_vdev_removal_destroy(). 1675 * Note that future txg's can not have any bytes_done, because 1676 * future TXG's are only modified from open context, and we have 1677 * already shut down the copying thread. 1678 */ 1679 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; 1680 spa_finish_removal(spa, DSS_CANCELED, tx); 1681 1682 vd->vdev_removing = B_FALSE; 1683 vdev_config_dirty(vd); 1684 1685 zfs_dbgmsg("canceled device removal for vdev %llu in %llu", 1686 vd->vdev_id, dmu_tx_get_txg(tx)); 1687 spa_history_log_internal(spa, "vdev remove canceled", tx, 1688 "%s vdev %llu %s", spa_name(spa), 1689 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1690 } 1691 1692 int 1693 spa_vdev_remove_cancel(spa_t *spa) 1694 { 1695 spa_vdev_remove_suspend(spa); 1696 1697 if (spa->spa_vdev_removal == NULL) 1698 return (ENOTACTIVE); 1699 1700 uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id; 1701 1702 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, 1703 spa_vdev_remove_cancel_sync, NULL, 0, 1704 ZFS_SPACE_CHECK_EXTRA_RESERVED); 1705 1706 if (error == 0) { 1707 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); 1708 vdev_t *vd = vdev_lookup_top(spa, vdid); 1709 metaslab_group_activate(vd->vdev_mg); 1710 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); 1711 } 1712 1713 return (error); 1714 } 1715 1716 /* 1717 * Called every sync pass of every txg if there's a svr. 1718 */ 1719 void 1720 svr_sync(spa_t *spa, dmu_tx_t *tx) 1721 { 1722 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1723 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1724 1725 /* 1726 * This check is necessary so that we do not dirty the 1727 * DIRECTORY_OBJECT via spa_sync_removing_state() when there 1728 * is nothing to do. Dirtying it every time would prevent us 1729 * from syncing-to-convergence. 1730 */ 1731 if (svr->svr_bytes_done[txgoff] == 0) 1732 return; 1733 1734 /* 1735 * Update progress accounting. 1736 */ 1737 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; 1738 svr->svr_bytes_done[txgoff] = 0; 1739 1740 spa_sync_removing_state(spa, tx); 1741 } 1742 1743 static void 1744 vdev_remove_make_hole_and_free(vdev_t *vd) 1745 { 1746 uint64_t id = vd->vdev_id; 1747 spa_t *spa = vd->vdev_spa; 1748 vdev_t *rvd = spa->spa_root_vdev; 1749 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 1750 1751 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1752 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1753 1754 vdev_free(vd); 1755 1756 if (last_vdev) { 1757 vdev_compact_children(rvd); 1758 } else { 1759 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 1760 vdev_add_child(rvd, vd); 1761 } 1762 vdev_config_dirty(rvd); 1763 1764 /* 1765 * Reassess the health of our root vdev. 1766 */ 1767 vdev_reopen(rvd); 1768 } 1769 1770 /* 1771 * Remove a log device. The config lock is held for the specified TXG. 1772 */ 1773 static int 1774 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) 1775 { 1776 metaslab_group_t *mg = vd->vdev_mg; 1777 spa_t *spa = vd->vdev_spa; 1778 int error = 0; 1779 1780 ASSERT(vd->vdev_islog); 1781 ASSERT(vd == vd->vdev_top); 1782 1783 /* 1784 * Stop allocating from this vdev. 1785 */ 1786 metaslab_group_passivate(mg); 1787 1788 /* 1789 * Wait for the youngest allocations and frees to sync, 1790 * and then wait for the deferral of those frees to finish. 1791 */ 1792 spa_vdev_config_exit(spa, NULL, 1793 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1794 1795 /* 1796 * Evacuate the device. We don't hold the config lock as writer 1797 * since we need to do I/O but we do keep the 1798 * spa_namespace_lock held. Once this completes the device 1799 * should no longer have any blocks allocated on it. 1800 */ 1801 if (vd->vdev_islog) { 1802 if (vd->vdev_stat.vs_alloc != 0) 1803 error = spa_reset_logs(spa); 1804 } 1805 1806 *txg = spa_vdev_config_enter(spa); 1807 1808 if (error != 0) { 1809 metaslab_group_activate(mg); 1810 return (error); 1811 } 1812 ASSERT0(vd->vdev_stat.vs_alloc); 1813 1814 /* 1815 * The evacuation succeeded. Remove any remaining MOS metadata 1816 * associated with this vdev, and wait for these changes to sync. 1817 */ 1818 vd->vdev_removing = B_TRUE; 1819 1820 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1821 vdev_config_dirty(vd); 1822 1823 spa_history_log_internal(spa, "vdev remove", NULL, 1824 "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id, 1825 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1826 1827 /* Make sure these changes are sync'ed */ 1828 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 1829 1830 /* Stop initializing */ 1831 (void) vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED); 1832 1833 *txg = spa_vdev_config_enter(spa); 1834 1835 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1836 ESC_ZFS_VDEV_REMOVE_DEV); 1837 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1838 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1839 1840 /* The top ZAP should have been destroyed by vdev_remove_empty. */ 1841 ASSERT0(vd->vdev_top_zap); 1842 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ 1843 ASSERT0(vd->vdev_leaf_zap); 1844 1845 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1846 1847 if (list_link_active(&vd->vdev_state_dirty_node)) 1848 vdev_state_clean(vd); 1849 if (list_link_active(&vd->vdev_config_dirty_node)) 1850 vdev_config_clean(vd); 1851 1852 /* 1853 * Clean up the vdev namespace. 1854 */ 1855 vdev_remove_make_hole_and_free(vd); 1856 1857 if (ev != NULL) 1858 spa_event_post(ev); 1859 1860 return (0); 1861 } 1862 1863 static int 1864 spa_vdev_remove_top_check(vdev_t *vd) 1865 { 1866 spa_t *spa = vd->vdev_spa; 1867 1868 if (vd != vd->vdev_top) 1869 return (SET_ERROR(ENOTSUP)); 1870 1871 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) 1872 return (SET_ERROR(ENOTSUP)); 1873 1874 /* available space in the pool's normal class */ 1875 uint64_t available = dsl_dir_space_available( 1876 spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE); 1877 1878 metaslab_class_t *mc = vd->vdev_mg->mg_class; 1879 1880 /* 1881 * When removing a vdev from an allocation class that has 1882 * remaining vdevs, include available space from the class. 1883 */ 1884 if (mc != spa_normal_class(spa) && mc->mc_groups > 1) { 1885 uint64_t class_avail = metaslab_class_get_space(mc) - 1886 metaslab_class_get_alloc(mc); 1887 1888 /* add class space, adjusted for overhead */ 1889 available += (class_avail * 94) / 100; 1890 } 1891 1892 /* 1893 * There has to be enough free space to remove the 1894 * device and leave double the "slop" space (i.e. we 1895 * must leave at least 3% of the pool free, in addition to 1896 * the normal slop space). 1897 */ 1898 if (available < vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) { 1899 return (SET_ERROR(ENOSPC)); 1900 } 1901 1902 /* 1903 * There can not be a removal in progress. 1904 */ 1905 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) 1906 return (SET_ERROR(EBUSY)); 1907 1908 /* 1909 * The device must have all its data. 1910 */ 1911 if (!vdev_dtl_empty(vd, DTL_MISSING) || 1912 !vdev_dtl_empty(vd, DTL_OUTAGE)) 1913 return (SET_ERROR(EBUSY)); 1914 1915 /* 1916 * The device must be healthy. 1917 */ 1918 if (!vdev_readable(vd)) 1919 return (SET_ERROR(EIO)); 1920 1921 /* 1922 * All vdevs in normal class must have the same ashift. 1923 */ 1924 if (spa->spa_max_ashift != spa->spa_min_ashift) { 1925 return (SET_ERROR(EINVAL)); 1926 } 1927 1928 /* 1929 * All vdevs in normal class must have the same ashift 1930 * and not be raidz. 1931 */ 1932 vdev_t *rvd = spa->spa_root_vdev; 1933 int num_indirect = 0; 1934 for (uint64_t id = 0; id < rvd->vdev_children; id++) { 1935 vdev_t *cvd = rvd->vdev_child[id]; 1936 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog) 1937 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); 1938 if (cvd->vdev_ops == &vdev_indirect_ops) 1939 num_indirect++; 1940 if (!vdev_is_concrete(cvd)) 1941 continue; 1942 if (cvd->vdev_ops == &vdev_raidz_ops) 1943 return (SET_ERROR(EINVAL)); 1944 /* 1945 * Need the mirror to be mirror of leaf vdevs only 1946 */ 1947 if (cvd->vdev_ops == &vdev_mirror_ops) { 1948 for (uint64_t cid = 0; 1949 cid < cvd->vdev_children; cid++) { 1950 vdev_t *tmp = cvd->vdev_child[cid]; 1951 if (!tmp->vdev_ops->vdev_op_leaf) 1952 return (SET_ERROR(EINVAL)); 1953 } 1954 } 1955 } 1956 1957 return (0); 1958 } 1959 1960 /* 1961 * Initiate removal of a top-level vdev, reducing the total space in the pool. 1962 * The config lock is held for the specified TXG. Once initiated, 1963 * evacuation of all allocated space (copying it to other vdevs) happens 1964 * in the background (see spa_vdev_remove_thread()), and can be canceled 1965 * (see spa_vdev_remove_cancel()). If successful, the vdev will 1966 * be transformed to an indirect vdev (see spa_vdev_remove_complete()). 1967 */ 1968 static int 1969 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) 1970 { 1971 spa_t *spa = vd->vdev_spa; 1972 int error; 1973 1974 /* 1975 * Check for errors up-front, so that we don't waste time 1976 * passivating the metaslab group and clearing the ZIL if there 1977 * are errors. 1978 */ 1979 error = spa_vdev_remove_top_check(vd); 1980 if (error != 0) 1981 return (error); 1982 1983 /* 1984 * Stop allocating from this vdev. Note that we must check 1985 * that this is not the only device in the pool before 1986 * passivating, otherwise we will not be able to make 1987 * progress because we can't allocate from any vdevs. 1988 * The above check for sufficient free space serves this 1989 * purpose. 1990 */ 1991 metaslab_group_t *mg = vd->vdev_mg; 1992 metaslab_group_passivate(mg); 1993 1994 /* 1995 * Wait for the youngest allocations and frees to sync, 1996 * and then wait for the deferral of those frees to finish. 1997 */ 1998 spa_vdev_config_exit(spa, NULL, 1999 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 2000 2001 /* 2002 * We must ensure that no "stubby" log blocks are allocated 2003 * on the device to be removed. These blocks could be 2004 * written at any time, including while we are in the middle 2005 * of copying them. 2006 */ 2007 error = spa_reset_logs(spa); 2008 2009 /* 2010 * We stop any initializing that is currently in progress but leave 2011 * the state as "active". This will allow the initializing to resume 2012 * if the removal is canceled sometime later. 2013 */ 2014 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE); 2015 2016 *txg = spa_vdev_config_enter(spa); 2017 2018 /* 2019 * Things might have changed while the config lock was dropped 2020 * (e.g. space usage). Check for errors again. 2021 */ 2022 if (error == 0) 2023 error = spa_vdev_remove_top_check(vd); 2024 2025 if (error != 0) { 2026 metaslab_group_activate(mg); 2027 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 2028 return (error); 2029 } 2030 2031 vd->vdev_removing = B_TRUE; 2032 2033 vdev_dirty_leaves(vd, VDD_DTL, *txg); 2034 vdev_config_dirty(vd); 2035 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); 2036 dsl_sync_task_nowait(spa->spa_dsl_pool, 2037 vdev_remove_initiate_sync, 2038 (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx); 2039 dmu_tx_commit(tx); 2040 2041 return (0); 2042 } 2043 2044 /* 2045 * Remove a device from the pool. 2046 * 2047 * Removing a device from the vdev namespace requires several steps 2048 * and can take a significant amount of time. As a result we use 2049 * the spa_vdev_config_[enter/exit] functions which allow us to 2050 * grab and release the spa_config_lock while still holding the namespace 2051 * lock. During each step the configuration is synced out. 2052 */ 2053 int 2054 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 2055 { 2056 vdev_t *vd; 2057 nvlist_t **spares, **l2cache, *nv; 2058 uint64_t txg = 0; 2059 uint_t nspares, nl2cache; 2060 int error = 0; 2061 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 2062 sysevent_t *ev = NULL; 2063 2064 ASSERT(spa_writeable(spa)); 2065 2066 if (!locked) 2067 txg = spa_vdev_enter(spa); 2068 2069 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2070 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 2071 error = (spa_has_checkpoint(spa)) ? 2072 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 2073 2074 if (!locked) 2075 return (spa_vdev_exit(spa, NULL, txg, error)); 2076 2077 return (error); 2078 } 2079 2080 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2081 2082 if (spa->spa_spares.sav_vdevs != NULL && 2083 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 2084 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 2085 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 2086 /* 2087 * Only remove the hot spare if it's not currently in use 2088 * in this pool. 2089 */ 2090 if (vd == NULL || unspare) { 2091 char *nvstr = fnvlist_lookup_string(nv, 2092 ZPOOL_CONFIG_PATH); 2093 spa_history_log_internal(spa, "vdev remove", NULL, 2094 "%s vdev (%s) %s", spa_name(spa), 2095 VDEV_TYPE_SPARE, nvstr); 2096 if (vd == NULL) 2097 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 2098 ev = spa_event_create(spa, vd, NULL, 2099 ESC_ZFS_VDEV_REMOVE_AUX); 2100 spa_vdev_remove_aux(spa->spa_spares.sav_config, 2101 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 2102 spa_load_spares(spa); 2103 spa->spa_spares.sav_sync = B_TRUE; 2104 } else { 2105 error = SET_ERROR(EBUSY); 2106 } 2107 } else if (spa->spa_l2cache.sav_vdevs != NULL && 2108 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 2109 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 2110 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 2111 char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 2112 spa_history_log_internal(spa, "vdev remove", NULL, 2113 "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr); 2114 /* 2115 * Cache devices can always be removed. 2116 */ 2117 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 2118 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); 2119 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 2120 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 2121 spa_load_l2cache(spa); 2122 spa->spa_l2cache.sav_sync = B_TRUE; 2123 } else if (vd != NULL && vd->vdev_islog) { 2124 ASSERT(!locked); 2125 error = spa_vdev_remove_log(vd, &txg); 2126 } else if (vd != NULL) { 2127 ASSERT(!locked); 2128 error = spa_vdev_remove_top(vd, &txg); 2129 } else { 2130 /* 2131 * There is no vdev of any kind with the specified guid. 2132 */ 2133 error = SET_ERROR(ENOENT); 2134 } 2135 2136 if (!locked) 2137 error = spa_vdev_exit(spa, NULL, txg, error); 2138 2139 if (ev != NULL) { 2140 if (error != 0) { 2141 spa_event_discard(ev); 2142 } else { 2143 spa_event_post(ev); 2144 } 2145 } 2146 2147 return (error); 2148 } 2149 2150 int 2151 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) 2152 { 2153 prs->prs_state = spa->spa_removing_phys.sr_state; 2154 2155 if (prs->prs_state == DSS_NONE) 2156 return (SET_ERROR(ENOENT)); 2157 2158 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; 2159 prs->prs_start_time = spa->spa_removing_phys.sr_start_time; 2160 prs->prs_end_time = spa->spa_removing_phys.sr_end_time; 2161 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; 2162 prs->prs_copied = spa->spa_removing_phys.sr_copied; 2163 2164 if (spa->spa_vdev_removal != NULL) { 2165 for (int i = 0; i < TXG_SIZE; i++) { 2166 prs->prs_copied += 2167 spa->spa_vdev_removal->svr_bytes_done[i]; 2168 } 2169 } 2170 2171 prs->prs_mapping_memory = 0; 2172 uint64_t indirect_vdev_id = 2173 spa->spa_removing_phys.sr_prev_indirect_vdev; 2174 while (indirect_vdev_id != -1) { 2175 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; 2176 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 2177 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 2178 2179 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2180 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); 2181 indirect_vdev_id = vic->vic_prev_indirect_vdev; 2182 } 2183 2184 return (0); 2185 } 2186