1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/spa_impl.h> 29 #include <sys/dmu.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/zap.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/metaslab.h> 34 #include <sys/metaslab_impl.h> 35 #include <sys/uberblock_impl.h> 36 #include <sys/txg.h> 37 #include <sys/avl.h> 38 #include <sys/bpobj.h> 39 #include <sys/dsl_pool.h> 40 #include <sys/dsl_synctask.h> 41 #include <sys/dsl_dir.h> 42 #include <sys/arc.h> 43 #include <sys/zfeature.h> 44 #include <sys/vdev_indirect_births.h> 45 #include <sys/vdev_indirect_mapping.h> 46 #include <sys/abd.h> 47 48 /* 49 * This file contains the necessary logic to remove vdevs from a 50 * storage pool. Currently, the only devices that can be removed 51 * are log, cache, and spare devices; and top level vdevs from a pool 52 * w/o raidz. (Note that members of a mirror can also be removed 53 * by the detach operation.) 54 * 55 * Log vdevs are removed by evacuating them and then turning the vdev 56 * into a hole vdev while holding spa config locks. 57 * 58 * Top level vdevs are removed and converted into an indirect vdev via 59 * a multi-step process: 60 * 61 * - Disable allocations from this device (spa_vdev_remove_top). 62 * 63 * - From a new thread (spa_vdev_remove_thread), copy data from 64 * the removing vdev to a different vdev. The copy happens in open 65 * context (spa_vdev_copy_impl) and issues a sync task 66 * (vdev_mapping_sync) so the sync thread can update the partial 67 * indirect mappings in core and on disk. 68 * 69 * - If a free happens during a removal, it is freed from the 70 * removing vdev, and if it has already been copied, from the new 71 * location as well (free_from_removing_vdev). 72 * 73 * - After the removal is completed, the copy thread converts the vdev 74 * into an indirect vdev (vdev_remove_complete) before instructing 75 * the sync thread to destroy the space maps and finish the removal 76 * (spa_finish_removal). 77 */ 78 79 typedef struct vdev_copy_arg { 80 metaslab_t *vca_msp; 81 uint64_t vca_outstanding_bytes; 82 kcondvar_t vca_cv; 83 kmutex_t vca_lock; 84 } vdev_copy_arg_t; 85 86 typedef struct vdev_copy_seg_arg { 87 vdev_copy_arg_t *vcsa_copy_arg; 88 uint64_t vcsa_txg; 89 dva_t *vcsa_dest_dva; 90 blkptr_t *vcsa_dest_bp; 91 } vdev_copy_seg_arg_t; 92 93 /* 94 * The maximum amount of allowed data we're allowed to copy from a device 95 * at a time when removing it. 96 */ 97 int zfs_remove_max_copy_bytes = 8 * 1024 * 1024; 98 99 /* 100 * The largest contiguous segment that we will attempt to allocate when 101 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If 102 * there is a performance problem with attempting to allocate large blocks, 103 * consider decreasing this. 104 * 105 * Note: we will issue I/Os of up to this size. The mpt driver does not 106 * respond well to I/Os larger than 1MB, so we set this to 1MB. (When 107 * mpt processes an I/O larger than 1MB, it needs to do an allocation of 108 * 2 physically contiguous pages; if this allocation fails, mpt will drop 109 * the I/O and hang the device.) 110 */ 111 int zfs_remove_max_segment = 1024 * 1024; 112 113 #define VDEV_REMOVAL_ZAP_OBJS "lzap" 114 115 static void spa_vdev_remove_thread(void *arg); 116 117 static void 118 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) 119 { 120 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, 121 DMU_POOL_DIRECTORY_OBJECT, 122 DMU_POOL_REMOVING, sizeof (uint64_t), 123 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 124 &spa->spa_removing_phys, tx)); 125 } 126 127 static nvlist_t * 128 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 129 { 130 for (int i = 0; i < count; i++) { 131 uint64_t guid = 132 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); 133 134 if (guid == target_guid) 135 return (nvpp[i]); 136 } 137 138 return (NULL); 139 } 140 141 static void 142 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 143 nvlist_t *dev_to_remove) 144 { 145 nvlist_t **newdev = NULL; 146 147 if (count > 1) 148 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 149 150 for (int i = 0, j = 0; i < count; i++) { 151 if (dev[i] == dev_to_remove) 152 continue; 153 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 154 } 155 156 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 157 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 158 159 for (int i = 0; i < count - 1; i++) 160 nvlist_free(newdev[i]); 161 162 if (count > 1) 163 kmem_free(newdev, (count - 1) * sizeof (void *)); 164 } 165 166 static spa_vdev_removal_t * 167 spa_vdev_removal_create(vdev_t *vd) 168 { 169 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); 170 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); 171 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); 172 svr->svr_allocd_segs = range_tree_create(NULL, NULL); 173 svr->svr_vdev = vd; 174 175 for (int i = 0; i < TXG_SIZE; i++) { 176 svr->svr_frees[i] = range_tree_create(NULL, NULL); 177 list_create(&svr->svr_new_segments[i], 178 sizeof (vdev_indirect_mapping_entry_t), 179 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 180 } 181 182 return (svr); 183 } 184 185 void 186 spa_vdev_removal_destroy(spa_vdev_removal_t *svr) 187 { 188 for (int i = 0; i < TXG_SIZE; i++) { 189 ASSERT0(svr->svr_bytes_done[i]); 190 ASSERT0(svr->svr_max_offset_to_sync[i]); 191 range_tree_destroy(svr->svr_frees[i]); 192 list_destroy(&svr->svr_new_segments[i]); 193 } 194 195 range_tree_destroy(svr->svr_allocd_segs); 196 mutex_destroy(&svr->svr_lock); 197 cv_destroy(&svr->svr_cv); 198 kmem_free(svr, sizeof (*svr)); 199 } 200 201 /* 202 * This is called as a synctask in the txg in which we will mark this vdev 203 * as removing (in the config stored in the MOS). 204 * 205 * It begins the evacuation of a toplevel vdev by: 206 * - initializing the spa_removing_phys which tracks this removal 207 * - computing the amount of space to remove for accounting purposes 208 * - dirtying all dbufs in the spa_config_object 209 * - creating the spa_vdev_removal 210 * - starting the spa_vdev_remove_thread 211 */ 212 static void 213 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) 214 { 215 vdev_t *vd = arg; 216 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 217 spa_t *spa = vd->vdev_spa; 218 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; 219 spa_vdev_removal_t *svr = NULL; 220 uint64_t txg = dmu_tx_get_txg(tx); 221 222 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 223 svr = spa_vdev_removal_create(vd); 224 225 ASSERT(vd->vdev_removing); 226 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 227 228 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 229 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 230 /* 231 * By activating the OBSOLETE_COUNTS feature, we prevent 232 * the pool from being downgraded and ensure that the 233 * refcounts are precise. 234 */ 235 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 236 uint64_t one = 1; 237 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, 238 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, 239 &one, tx)); 240 ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0); 241 } 242 243 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); 244 vd->vdev_indirect_mapping = 245 vdev_indirect_mapping_open(mos, vic->vic_mapping_object); 246 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); 247 vd->vdev_indirect_births = 248 vdev_indirect_births_open(mos, vic->vic_births_object); 249 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; 250 spa->spa_removing_phys.sr_start_time = gethrestime_sec(); 251 spa->spa_removing_phys.sr_end_time = 0; 252 spa->spa_removing_phys.sr_state = DSS_SCANNING; 253 spa->spa_removing_phys.sr_to_copy = 0; 254 spa->spa_removing_phys.sr_copied = 0; 255 256 /* 257 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because 258 * there may be space in the defer tree, which is free, but still 259 * counted in vs_alloc. 260 */ 261 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 262 metaslab_t *ms = vd->vdev_ms[i]; 263 if (ms->ms_sm == NULL) 264 continue; 265 266 /* 267 * Sync tasks happen before metaslab_sync(), therefore 268 * smp_alloc and sm_alloc must be the same. 269 */ 270 ASSERT3U(space_map_allocated(ms->ms_sm), ==, 271 ms->ms_sm->sm_phys->smp_alloc); 272 273 spa->spa_removing_phys.sr_to_copy += 274 space_map_allocated(ms->ms_sm); 275 276 /* 277 * Space which we are freeing this txg does not need to 278 * be copied. 279 */ 280 spa->spa_removing_phys.sr_to_copy -= 281 range_tree_space(ms->ms_freeingtree); 282 283 ASSERT0(range_tree_space(ms->ms_freedtree)); 284 for (int t = 0; t < TXG_SIZE; t++) 285 ASSERT0(range_tree_space(ms->ms_alloctree[t])); 286 } 287 288 /* 289 * Sync tasks are called before metaslab_sync(), so there should 290 * be no already-synced metaslabs in the TXG_CLEAN list. 291 */ 292 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); 293 294 spa_sync_removing_state(spa, tx); 295 296 /* 297 * All blocks that we need to read the most recent mapping must be 298 * stored on concrete vdevs. Therefore, we must dirty anything that 299 * is read before spa_remove_init(). Specifically, the 300 * spa_config_object. (Note that although we already modified the 301 * spa_config_object in spa_sync_removing_state, that may not have 302 * modified all blocks of the object.) 303 */ 304 dmu_object_info_t doi; 305 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); 306 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { 307 dmu_buf_t *dbuf; 308 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, 309 offset, FTAG, &dbuf, 0)); 310 dmu_buf_will_dirty(dbuf, tx); 311 offset += dbuf->db_size; 312 dmu_buf_rele(dbuf, FTAG); 313 } 314 315 /* 316 * Now that we've allocated the im_object, dirty the vdev to ensure 317 * that the object gets written to the config on disk. 318 */ 319 vdev_config_dirty(vd); 320 321 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu " 322 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx), 323 vic->vic_mapping_object); 324 325 spa_history_log_internal(spa, "vdev remove started", tx, 326 "%s vdev %llu %s", spa_name(spa), vd->vdev_id, 327 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 328 /* 329 * Setting spa_vdev_removal causes subsequent frees to call 330 * free_from_removing_vdev(). Note that we don't need any locking 331 * because we are the sync thread, and metaslab_free_impl() is only 332 * called from syncing context (potentially from a zio taskq thread, 333 * but in any case only when there are outstanding free i/os, which 334 * there are not). 335 */ 336 ASSERT3P(spa->spa_vdev_removal, ==, NULL); 337 spa->spa_vdev_removal = svr; 338 svr->svr_thread = thread_create(NULL, 0, 339 spa_vdev_remove_thread, vd, 0, &p0, TS_RUN, minclsyspri); 340 } 341 342 /* 343 * When we are opening a pool, we must read the mapping for each 344 * indirect vdev in order from most recently removed to least 345 * recently removed. We do this because the blocks for the mapping 346 * of older indirect vdevs may be stored on more recently removed vdevs. 347 * In order to read each indirect mapping object, we must have 348 * initialized all more recently removed vdevs. 349 */ 350 int 351 spa_remove_init(spa_t *spa) 352 { 353 int error; 354 355 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, 356 DMU_POOL_DIRECTORY_OBJECT, 357 DMU_POOL_REMOVING, sizeof (uint64_t), 358 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 359 &spa->spa_removing_phys); 360 361 if (error == ENOENT) { 362 spa->spa_removing_phys.sr_state = DSS_NONE; 363 spa->spa_removing_phys.sr_removing_vdev = -1; 364 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 365 return (0); 366 } else if (error != 0) { 367 return (error); 368 } 369 370 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { 371 /* 372 * We are currently removing a vdev. Create and 373 * initialize a spa_vdev_removal_t from the bonus 374 * buffer of the removing vdevs vdev_im_object, and 375 * initialize its partial mapping. 376 */ 377 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 378 vdev_t *vd = vdev_lookup_top(spa, 379 spa->spa_removing_phys.sr_removing_vdev); 380 spa_config_exit(spa, SCL_STATE, FTAG); 381 382 if (vd == NULL) 383 return (EINVAL); 384 385 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 386 387 ASSERT(vdev_is_concrete(vd)); 388 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); 389 ASSERT(svr->svr_vdev->vdev_removing); 390 391 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 392 spa->spa_meta_objset, vic->vic_mapping_object); 393 vd->vdev_indirect_births = vdev_indirect_births_open( 394 spa->spa_meta_objset, vic->vic_births_object); 395 396 spa->spa_vdev_removal = svr; 397 } 398 399 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 400 uint64_t indirect_vdev_id = 401 spa->spa_removing_phys.sr_prev_indirect_vdev; 402 while (indirect_vdev_id != UINT64_MAX) { 403 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); 404 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 405 406 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 407 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 408 spa->spa_meta_objset, vic->vic_mapping_object); 409 vd->vdev_indirect_births = vdev_indirect_births_open( 410 spa->spa_meta_objset, vic->vic_births_object); 411 412 indirect_vdev_id = vic->vic_prev_indirect_vdev; 413 } 414 spa_config_exit(spa, SCL_STATE, FTAG); 415 416 /* 417 * Now that we've loaded all the indirect mappings, we can allow 418 * reads from other blocks (e.g. via predictive prefetch). 419 */ 420 spa->spa_indirect_vdevs_loaded = B_TRUE; 421 return (0); 422 } 423 424 void 425 spa_restart_removal(spa_t *spa) 426 { 427 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 428 429 if (svr == NULL) 430 return; 431 432 /* 433 * In general when this function is called there is no 434 * removal thread running. The only scenario where this 435 * is not true is during spa_import() where this function 436 * is called twice [once from spa_import_impl() and 437 * spa_async_resume()]. Thus, in the scenario where we 438 * import a pool that has an ongoing removal we don't 439 * want to spawn a second thread. 440 */ 441 if (svr->svr_thread != NULL) 442 return; 443 444 if (!spa_writeable(spa)) 445 return; 446 447 vdev_t *vd = svr->svr_vdev; 448 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 449 450 ASSERT3P(vd, !=, NULL); 451 ASSERT(vd->vdev_removing); 452 453 zfs_dbgmsg("restarting removal of %llu at count=%llu", 454 vd->vdev_id, vdev_indirect_mapping_num_entries(vim)); 455 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, vd, 456 0, &p0, TS_RUN, minclsyspri); 457 } 458 459 /* 460 * Process freeing from a device which is in the middle of being removed. 461 * We must handle this carefully so that we attempt to copy freed data, 462 * and we correctly free already-copied data. 463 */ 464 void 465 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size, 466 uint64_t txg) 467 { 468 spa_t *spa = vd->vdev_spa; 469 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 470 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 471 uint64_t max_offset_yet = 0; 472 473 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 474 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, 475 vdev_indirect_mapping_object(vim)); 476 ASSERT3P(vd, ==, svr->svr_vdev); 477 ASSERT3U(spa_syncing_txg(spa), ==, txg); 478 479 mutex_enter(&svr->svr_lock); 480 481 /* 482 * Remove the segment from the removing vdev's spacemap. This 483 * ensures that we will not attempt to copy this space (if the 484 * removal thread has not yet visited it), and also ensures 485 * that we know what is actually allocated on the new vdevs 486 * (needed if we cancel the removal). 487 * 488 * Note: we must do the metaslab_free_concrete() with the svr_lock 489 * held, so that the remove_thread can not load this metaslab and then 490 * visit this offset between the time that we metaslab_free_concrete() 491 * and when we check to see if it has been visited. 492 */ 493 metaslab_free_concrete(vd, offset, size, txg); 494 495 uint64_t synced_size = 0; 496 uint64_t synced_offset = 0; 497 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); 498 if (offset < max_offset_synced) { 499 /* 500 * The mapping for this offset is already on disk. 501 * Free from the new location. 502 * 503 * Note that we use svr_max_synced_offset because it is 504 * updated atomically with respect to the in-core mapping. 505 * By contrast, vim_max_offset is not. 506 * 507 * This block may be split between a synced entry and an 508 * in-flight or unvisited entry. Only process the synced 509 * portion of it here. 510 */ 511 synced_size = MIN(size, max_offset_synced - offset); 512 synced_offset = offset; 513 514 ASSERT3U(max_offset_yet, <=, max_offset_synced); 515 max_offset_yet = max_offset_synced; 516 517 DTRACE_PROBE3(remove__free__synced, 518 spa_t *, spa, 519 uint64_t, offset, 520 uint64_t, synced_size); 521 522 size -= synced_size; 523 offset += synced_size; 524 } 525 526 /* 527 * Look at all in-flight txgs starting from the currently syncing one 528 * and see if a section of this free is being copied. By starting from 529 * this txg and iterating forward, we might find that this region 530 * was copied in two different txgs and handle it appropriately. 531 */ 532 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { 533 int txgoff = (txg + i) & TXG_MASK; 534 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { 535 /* 536 * The mapping for this offset is in flight, and 537 * will be synced in txg+i. 538 */ 539 uint64_t inflight_size = MIN(size, 540 svr->svr_max_offset_to_sync[txgoff] - offset); 541 542 DTRACE_PROBE4(remove__free__inflight, 543 spa_t *, spa, 544 uint64_t, offset, 545 uint64_t, inflight_size, 546 uint64_t, txg + i); 547 548 /* 549 * We copy data in order of increasing offset. 550 * Therefore the max_offset_to_sync[] must increase 551 * (or be zero, indicating that nothing is being 552 * copied in that txg). 553 */ 554 if (svr->svr_max_offset_to_sync[txgoff] != 0) { 555 ASSERT3U(svr->svr_max_offset_to_sync[txgoff], 556 >=, max_offset_yet); 557 max_offset_yet = 558 svr->svr_max_offset_to_sync[txgoff]; 559 } 560 561 /* 562 * We've already committed to copying this segment: 563 * we have allocated space elsewhere in the pool for 564 * it and have an IO outstanding to copy the data. We 565 * cannot free the space before the copy has 566 * completed, or else the copy IO might overwrite any 567 * new data. To free that space, we record the 568 * segment in the appropriate svr_frees tree and free 569 * the mapped space later, in the txg where we have 570 * completed the copy and synced the mapping (see 571 * vdev_mapping_sync). 572 */ 573 range_tree_add(svr->svr_frees[txgoff], 574 offset, inflight_size); 575 size -= inflight_size; 576 offset += inflight_size; 577 578 /* 579 * This space is already accounted for as being 580 * done, because it is being copied in txg+i. 581 * However, if i!=0, then it is being copied in 582 * a future txg. If we crash after this txg 583 * syncs but before txg+i syncs, then the space 584 * will be free. Therefore we must account 585 * for the space being done in *this* txg 586 * (when it is freed) rather than the future txg 587 * (when it will be copied). 588 */ 589 ASSERT3U(svr->svr_bytes_done[txgoff], >=, 590 inflight_size); 591 svr->svr_bytes_done[txgoff] -= inflight_size; 592 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; 593 } 594 } 595 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); 596 597 if (size > 0) { 598 /* 599 * The copy thread has not yet visited this offset. Ensure 600 * that it doesn't. 601 */ 602 603 DTRACE_PROBE3(remove__free__unvisited, 604 spa_t *, spa, 605 uint64_t, offset, 606 uint64_t, size); 607 608 if (svr->svr_allocd_segs != NULL) 609 range_tree_clear(svr->svr_allocd_segs, offset, size); 610 611 /* 612 * Since we now do not need to copy this data, for 613 * accounting purposes we have done our job and can count 614 * it as completed. 615 */ 616 svr->svr_bytes_done[txg & TXG_MASK] += size; 617 } 618 mutex_exit(&svr->svr_lock); 619 620 /* 621 * Now that we have dropped svr_lock, process the synced portion 622 * of this free. 623 */ 624 if (synced_size > 0) { 625 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size, 626 txg); 627 /* 628 * Note: this can only be called from syncing context, 629 * and the vdev_indirect_mapping is only changed from the 630 * sync thread, so we don't need svr_lock while doing 631 * metaslab_free_impl_cb. 632 */ 633 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, 634 metaslab_free_impl_cb, &txg); 635 } 636 } 637 638 /* 639 * Stop an active removal and update the spa_removing phys. 640 */ 641 static void 642 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) 643 { 644 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 645 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); 646 647 /* Ensure the removal thread has completed before we free the svr. */ 648 spa_vdev_remove_suspend(spa); 649 650 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); 651 652 if (state == DSS_FINISHED) { 653 spa_removing_phys_t *srp = &spa->spa_removing_phys; 654 vdev_t *vd = svr->svr_vdev; 655 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 656 657 if (srp->sr_prev_indirect_vdev != UINT64_MAX) { 658 vdev_t *pvd = vdev_lookup_top(spa, 659 srp->sr_prev_indirect_vdev); 660 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); 661 } 662 663 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; 664 srp->sr_prev_indirect_vdev = vd->vdev_id; 665 } 666 spa->spa_removing_phys.sr_state = state; 667 spa->spa_removing_phys.sr_end_time = gethrestime_sec(); 668 669 spa->spa_vdev_removal = NULL; 670 spa_vdev_removal_destroy(svr); 671 672 spa_sync_removing_state(spa, tx); 673 674 vdev_config_dirty(spa->spa_root_vdev); 675 } 676 677 static void 678 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) 679 { 680 vdev_t *vd = arg; 681 vdev_indirect_mark_obsolete(vd, offset, size, 682 vd->vdev_spa->spa_syncing_txg); 683 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 684 metaslab_free_impl_cb, &vd->vdev_spa->spa_syncing_txg); 685 } 686 687 /* 688 * On behalf of the removal thread, syncs an incremental bit more of 689 * the indirect mapping to disk and updates the in-memory mapping. 690 * Called as a sync task in every txg that the removal thread makes progress. 691 */ 692 static void 693 vdev_mapping_sync(void *arg, dmu_tx_t *tx) 694 { 695 spa_vdev_removal_t *svr = arg; 696 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 697 vdev_t *vd = svr->svr_vdev; 698 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 699 uint64_t txg = dmu_tx_get_txg(tx); 700 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 701 702 ASSERT(vic->vic_mapping_object != 0); 703 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 704 705 vdev_indirect_mapping_add_entries(vim, 706 &svr->svr_new_segments[txg & TXG_MASK], tx); 707 vdev_indirect_births_add_entry(vd->vdev_indirect_births, 708 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); 709 710 /* 711 * Free the copied data for anything that was freed while the 712 * mapping entries were in flight. 713 */ 714 mutex_enter(&svr->svr_lock); 715 range_tree_vacate(svr->svr_frees[txg & TXG_MASK], 716 free_mapped_segment_cb, vd); 717 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, 718 vdev_indirect_mapping_max_offset(vim)); 719 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; 720 mutex_exit(&svr->svr_lock); 721 722 spa_sync_removing_state(spa, tx); 723 } 724 725 static void 726 spa_vdev_copy_segment_write_done(zio_t *zio) 727 { 728 vdev_copy_seg_arg_t *vcsa = zio->io_private; 729 vdev_copy_arg_t *vca = vcsa->vcsa_copy_arg; 730 spa_config_exit(zio->io_spa, SCL_STATE, FTAG); 731 abd_free(zio->io_abd); 732 733 mutex_enter(&vca->vca_lock); 734 vca->vca_outstanding_bytes -= zio->io_size; 735 cv_signal(&vca->vca_cv); 736 mutex_exit(&vca->vca_lock); 737 738 ASSERT0(zio->io_error); 739 kmem_free(vcsa->vcsa_dest_bp, sizeof (blkptr_t)); 740 kmem_free(vcsa, sizeof (vdev_copy_seg_arg_t)); 741 } 742 743 static void 744 spa_vdev_copy_segment_read_done(zio_t *zio) 745 { 746 vdev_copy_seg_arg_t *vcsa = zio->io_private; 747 dva_t *dest_dva = vcsa->vcsa_dest_dva; 748 uint64_t txg = vcsa->vcsa_txg; 749 spa_t *spa = zio->io_spa; 750 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(dest_dva)); 751 blkptr_t *bp = NULL; 752 dva_t *dva = NULL; 753 uint64_t size = zio->io_size; 754 755 ASSERT3P(dest_vd, !=, NULL); 756 ASSERT0(zio->io_error); 757 758 vcsa->vcsa_dest_bp = kmem_alloc(sizeof (blkptr_t), KM_SLEEP); 759 bp = vcsa->vcsa_dest_bp; 760 dva = bp->blk_dva; 761 762 BP_ZERO(bp); 763 764 /* initialize with dest_dva */ 765 bcopy(dest_dva, dva, sizeof (dva_t)); 766 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 767 768 BP_SET_LSIZE(bp, size); 769 BP_SET_PSIZE(bp, size); 770 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 771 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 772 BP_SET_TYPE(bp, DMU_OT_NONE); 773 BP_SET_LEVEL(bp, 0); 774 BP_SET_DEDUP(bp, 0); 775 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 776 777 zio_nowait(zio_rewrite(spa->spa_txg_zio[txg & TXG_MASK], spa, 778 txg, bp, zio->io_abd, size, 779 spa_vdev_copy_segment_write_done, vcsa, 780 ZIO_PRIORITY_REMOVAL, 0, NULL)); 781 } 782 783 static int 784 spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg, 785 vdev_copy_arg_t *vca, zio_alloc_list_t *zal) 786 { 787 metaslab_group_t *mg = vd->vdev_mg; 788 spa_t *spa = vd->vdev_spa; 789 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 790 vdev_indirect_mapping_entry_t *entry; 791 vdev_copy_seg_arg_t *private; 792 dva_t dst = { 0 }; 793 blkptr_t blk, *bp = &blk; 794 dva_t *dva = bp->blk_dva; 795 796 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 797 798 int error = metaslab_alloc_dva(spa, mg->mg_class, size, 799 &dst, 0, NULL, txg, 0, zal); 800 if (error != 0) 801 return (error); 802 803 /* 804 * We can't have any padding of the allocated size, otherwise we will 805 * misunderstand what's allocated, and the size of the mapping. 806 * The caller ensures this will be true by passing in a size that is 807 * aligned to the worst (highest) ashift in the pool. 808 */ 809 ASSERT3U(DVA_GET_ASIZE(&dst), ==, size); 810 811 mutex_enter(&vca->vca_lock); 812 vca->vca_outstanding_bytes += size; 813 mutex_exit(&vca->vca_lock); 814 815 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); 816 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); 817 entry->vime_mapping.vimep_dst = dst; 818 819 private = kmem_alloc(sizeof (vdev_copy_seg_arg_t), KM_SLEEP); 820 private->vcsa_dest_dva = &entry->vime_mapping.vimep_dst; 821 private->vcsa_txg = txg; 822 private->vcsa_copy_arg = vca; 823 824 /* 825 * This lock is eventually released by the donefunc for the 826 * zio_write_phys that finishes copying the data. 827 */ 828 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 829 830 /* 831 * Do logical I/O, letting the redundancy vdevs (like mirror) 832 * handle their own I/O instead of duplicating that code here. 833 */ 834 BP_ZERO(bp); 835 836 DVA_SET_VDEV(&dva[0], vd->vdev_id); 837 DVA_SET_OFFSET(&dva[0], start); 838 DVA_SET_GANG(&dva[0], 0); 839 DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, size)); 840 841 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 842 843 BP_SET_LSIZE(bp, size); 844 BP_SET_PSIZE(bp, size); 845 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 846 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 847 BP_SET_TYPE(bp, DMU_OT_NONE); 848 BP_SET_LEVEL(bp, 0); 849 BP_SET_DEDUP(bp, 0); 850 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 851 852 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, 853 bp, abd_alloc_for_io(size, B_FALSE), size, 854 spa_vdev_copy_segment_read_done, private, 855 ZIO_PRIORITY_REMOVAL, 0, NULL)); 856 857 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); 858 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); 859 vdev_dirty(vd, 0, NULL, txg); 860 861 return (0); 862 } 863 864 /* 865 * Complete the removal of a toplevel vdev. This is called as a 866 * synctask in the same txg that we will sync out the new config (to the 867 * MOS object) which indicates that this vdev is indirect. 868 */ 869 static void 870 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) 871 { 872 spa_vdev_removal_t *svr = arg; 873 vdev_t *vd = svr->svr_vdev; 874 spa_t *spa = vd->vdev_spa; 875 876 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 877 878 for (int i = 0; i < TXG_SIZE; i++) { 879 ASSERT0(svr->svr_bytes_done[i]); 880 } 881 882 ASSERT3U(spa->spa_removing_phys.sr_copied, ==, 883 spa->spa_removing_phys.sr_to_copy); 884 885 vdev_destroy_spacemaps(vd, tx); 886 887 /* destroy leaf zaps, if any */ 888 ASSERT3P(svr->svr_zaplist, !=, NULL); 889 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); 890 pair != NULL; 891 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { 892 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); 893 } 894 fnvlist_free(svr->svr_zaplist); 895 896 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); 897 /* vd->vdev_path is not available here */ 898 spa_history_log_internal(spa, "vdev remove completed", tx, 899 "%s vdev %llu", spa_name(spa), vd->vdev_id); 900 } 901 902 static void 903 vdev_indirect_state_transfer(vdev_t *ivd, vdev_t *vd) 904 { 905 ivd->vdev_indirect_config = vd->vdev_indirect_config; 906 907 ASSERT3P(ivd->vdev_indirect_mapping, ==, NULL); 908 ASSERT(vd->vdev_indirect_mapping != NULL); 909 ivd->vdev_indirect_mapping = vd->vdev_indirect_mapping; 910 vd->vdev_indirect_mapping = NULL; 911 912 ASSERT3P(ivd->vdev_indirect_births, ==, NULL); 913 ASSERT(vd->vdev_indirect_births != NULL); 914 ivd->vdev_indirect_births = vd->vdev_indirect_births; 915 vd->vdev_indirect_births = NULL; 916 917 ASSERT0(range_tree_space(vd->vdev_obsolete_segments)); 918 ASSERT0(range_tree_space(ivd->vdev_obsolete_segments)); 919 920 if (vd->vdev_obsolete_sm != NULL) { 921 ASSERT3U(ivd->vdev_asize, ==, vd->vdev_asize); 922 923 /* 924 * We cannot use space_map_{open,close} because we hold all 925 * the config locks as writer. 926 */ 927 ASSERT3P(ivd->vdev_obsolete_sm, ==, NULL); 928 ivd->vdev_obsolete_sm = vd->vdev_obsolete_sm; 929 vd->vdev_obsolete_sm = NULL; 930 } 931 } 932 933 static void 934 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) 935 { 936 ASSERT3P(zlist, !=, NULL); 937 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 938 939 if (vd->vdev_leaf_zap != 0) { 940 char zkey[32]; 941 (void) snprintf(zkey, sizeof (zkey), "%s-%"PRIu64, 942 VDEV_REMOVAL_ZAP_OBJS, vd->vdev_leaf_zap); 943 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); 944 } 945 946 for (uint64_t id = 0; id < vd->vdev_children; id++) { 947 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); 948 } 949 } 950 951 static void 952 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) 953 { 954 vdev_t *ivd; 955 dmu_tx_t *tx; 956 spa_t *spa = vd->vdev_spa; 957 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 958 959 /* 960 * First, build a list of leaf zaps to be destroyed. 961 * This is passed to the sync context thread, 962 * which does the actual unlinking. 963 */ 964 svr->svr_zaplist = fnvlist_alloc(); 965 vdev_remove_enlist_zaps(vd, svr->svr_zaplist); 966 967 ivd = vdev_add_parent(vd, &vdev_indirect_ops); 968 969 vd->vdev_leaf_zap = 0; 970 971 vdev_remove_child(ivd, vd); 972 vdev_compact_children(ivd); 973 974 vdev_indirect_state_transfer(ivd, vd); 975 976 svr->svr_vdev = ivd; 977 978 ASSERT(!ivd->vdev_removing); 979 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 980 981 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 982 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr, 983 0, ZFS_SPACE_CHECK_NONE, tx); 984 dmu_tx_commit(tx); 985 986 /* 987 * Indicate that this thread has exited. 988 * After this, we can not use svr. 989 */ 990 mutex_enter(&svr->svr_lock); 991 svr->svr_thread = NULL; 992 cv_broadcast(&svr->svr_cv); 993 mutex_exit(&svr->svr_lock); 994 } 995 996 /* 997 * Complete the removal of a toplevel vdev. This is called in open 998 * context by the removal thread after we have copied all vdev's data. 999 */ 1000 static void 1001 vdev_remove_complete(vdev_t *vd) 1002 { 1003 spa_t *spa = vd->vdev_spa; 1004 uint64_t txg; 1005 1006 /* 1007 * Wait for any deferred frees to be synced before we call 1008 * vdev_metaslab_fini() 1009 */ 1010 txg_wait_synced(spa->spa_dsl_pool, 0); 1011 1012 txg = spa_vdev_enter(spa); 1013 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", 1014 vd->vdev_id, txg); 1015 1016 /* 1017 * Discard allocation state. 1018 */ 1019 if (vd->vdev_mg != NULL) { 1020 vdev_metaslab_fini(vd); 1021 metaslab_group_destroy(vd->vdev_mg); 1022 vd->vdev_mg = NULL; 1023 } 1024 ASSERT0(vd->vdev_stat.vs_space); 1025 ASSERT0(vd->vdev_stat.vs_dspace); 1026 1027 vdev_remove_replace_with_indirect(vd, txg); 1028 1029 /* 1030 * We now release the locks, allowing spa_sync to run and finish the 1031 * removal via vdev_remove_complete_sync in syncing context. 1032 */ 1033 (void) spa_vdev_exit(spa, NULL, txg, 0); 1034 1035 /* 1036 * Top ZAP should have been transferred to the indirect vdev in 1037 * vdev_remove_replace_with_indirect. 1038 */ 1039 ASSERT0(vd->vdev_top_zap); 1040 1041 /* 1042 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. 1043 */ 1044 ASSERT0(vd->vdev_leaf_zap); 1045 1046 txg = spa_vdev_enter(spa); 1047 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1048 /* 1049 * Request to update the config and the config cachefile. 1050 */ 1051 vdev_config_dirty(spa->spa_root_vdev); 1052 (void) spa_vdev_exit(spa, vd, txg, 0); 1053 } 1054 1055 /* 1056 * Evacuates a segment of size at most max_alloc from the vdev 1057 * via repeated calls to spa_vdev_copy_segment. If an allocation 1058 * fails, the pool is probably too fragmented to handle such a 1059 * large size, so decrease max_alloc so that the caller will not try 1060 * this size again this txg. 1061 */ 1062 static void 1063 spa_vdev_copy_impl(spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, 1064 uint64_t *max_alloc, dmu_tx_t *tx) 1065 { 1066 uint64_t txg = dmu_tx_get_txg(tx); 1067 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1068 1069 mutex_enter(&svr->svr_lock); 1070 1071 range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root); 1072 if (rs == NULL) { 1073 mutex_exit(&svr->svr_lock); 1074 return; 1075 } 1076 uint64_t offset = rs->rs_start; 1077 uint64_t length = MIN(rs->rs_end - rs->rs_start, *max_alloc); 1078 1079 range_tree_remove(svr->svr_allocd_segs, offset, length); 1080 1081 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { 1082 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, 1083 svr, 0, ZFS_SPACE_CHECK_NONE, tx); 1084 } 1085 1086 svr->svr_max_offset_to_sync[txg & TXG_MASK] = offset + length; 1087 1088 /* 1089 * Note: this is the amount of *allocated* space 1090 * that we are taking care of each txg. 1091 */ 1092 svr->svr_bytes_done[txg & TXG_MASK] += length; 1093 1094 mutex_exit(&svr->svr_lock); 1095 1096 zio_alloc_list_t zal; 1097 metaslab_trace_init(&zal); 1098 uint64_t thismax = *max_alloc; 1099 while (length > 0) { 1100 uint64_t mylen = MIN(length, thismax); 1101 1102 int error = spa_vdev_copy_segment(svr->svr_vdev, 1103 offset, mylen, txg, vca, &zal); 1104 1105 if (error == ENOSPC) { 1106 /* 1107 * Cut our segment in half, and don't try this 1108 * segment size again this txg. Note that the 1109 * allocation size must be aligned to the highest 1110 * ashift in the pool, so that the allocation will 1111 * not be padded out to a multiple of the ashift, 1112 * which could cause us to think that this mapping 1113 * is larger than we intended. 1114 */ 1115 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); 1116 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); 1117 thismax = P2ROUNDUP(mylen / 2, 1118 1 << spa->spa_max_ashift); 1119 ASSERT3U(thismax, <, mylen); 1120 /* 1121 * The minimum-size allocation can not fail. 1122 */ 1123 ASSERT3U(mylen, >, 1 << spa->spa_max_ashift); 1124 *max_alloc = mylen - (1 << spa->spa_max_ashift); 1125 } else { 1126 ASSERT0(error); 1127 length -= mylen; 1128 offset += mylen; 1129 1130 /* 1131 * We've performed an allocation, so reset the 1132 * alloc trace list. 1133 */ 1134 metaslab_trace_fini(&zal); 1135 metaslab_trace_init(&zal); 1136 } 1137 } 1138 metaslab_trace_fini(&zal); 1139 } 1140 1141 /* 1142 * The removal thread operates in open context. It iterates over all 1143 * allocated space in the vdev, by loading each metaslab's spacemap. 1144 * For each contiguous segment of allocated space (capping the segment 1145 * size at SPA_MAXBLOCKSIZE), we: 1146 * - Allocate space for it on another vdev. 1147 * - Create a new mapping from the old location to the new location 1148 * (as a record in svr_new_segments). 1149 * - Initiate a logical read zio to get the data off the removing disk. 1150 * - In the read zio's done callback, initiate a logical write zio to 1151 * write it to the new vdev. 1152 * Note that all of this will take effect when a particular TXG syncs. 1153 * The sync thread ensures that all the phys reads and writes for the syncing 1154 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk 1155 * (see vdev_mapping_sync()). 1156 */ 1157 static void 1158 spa_vdev_remove_thread(void *arg) 1159 { 1160 vdev_t *vd = arg; 1161 spa_t *spa = vd->vdev_spa; 1162 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1163 vdev_copy_arg_t vca; 1164 uint64_t max_alloc = zfs_remove_max_segment; 1165 uint64_t last_txg = 0; 1166 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1167 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); 1168 1169 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); 1170 ASSERT(vdev_is_concrete(vd)); 1171 ASSERT(vd->vdev_removing); 1172 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 1173 ASSERT3P(svr->svr_vdev, ==, vd); 1174 ASSERT(vim != NULL); 1175 1176 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); 1177 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); 1178 vca.vca_outstanding_bytes = 0; 1179 1180 mutex_enter(&svr->svr_lock); 1181 1182 /* 1183 * Start from vim_max_offset so we pick up where we left off 1184 * if we are restarting the removal after opening the pool. 1185 */ 1186 uint64_t msi; 1187 for (msi = start_offset >> vd->vdev_ms_shift; 1188 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { 1189 metaslab_t *msp = vd->vdev_ms[msi]; 1190 ASSERT3U(msi, <=, vd->vdev_ms_count); 1191 1192 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1193 1194 mutex_enter(&msp->ms_sync_lock); 1195 mutex_enter(&msp->ms_lock); 1196 1197 /* 1198 * Assert nothing in flight -- ms_*tree is empty. 1199 */ 1200 for (int i = 0; i < TXG_SIZE; i++) { 1201 ASSERT0(range_tree_space(msp->ms_alloctree[i])); 1202 } 1203 1204 /* 1205 * If the metaslab has ever been allocated from (ms_sm!=NULL), 1206 * read the allocated segments from the space map object 1207 * into svr_allocd_segs. Since we do this while holding 1208 * svr_lock and ms_sync_lock, concurrent frees (which 1209 * would have modified the space map) will wait for us 1210 * to finish loading the spacemap, and then take the 1211 * appropriate action (see free_from_removing_vdev()). 1212 */ 1213 if (msp->ms_sm != NULL) { 1214 space_map_t *sm = NULL; 1215 1216 /* 1217 * We have to open a new space map here, because 1218 * ms_sm's sm_length and sm_alloc may not reflect 1219 * what's in the object contents, if we are in between 1220 * metaslab_sync() and metaslab_sync_done(). 1221 */ 1222 VERIFY0(space_map_open(&sm, 1223 spa->spa_dsl_pool->dp_meta_objset, 1224 msp->ms_sm->sm_object, msp->ms_sm->sm_start, 1225 msp->ms_sm->sm_size, msp->ms_sm->sm_shift)); 1226 space_map_update(sm); 1227 VERIFY0(space_map_load(sm, svr->svr_allocd_segs, 1228 SM_ALLOC)); 1229 space_map_close(sm); 1230 1231 range_tree_walk(msp->ms_freeingtree, 1232 range_tree_remove, svr->svr_allocd_segs); 1233 1234 /* 1235 * When we are resuming from a paused removal (i.e. 1236 * when importing a pool with a removal in progress), 1237 * discard any state that we have already processed. 1238 */ 1239 range_tree_clear(svr->svr_allocd_segs, 0, start_offset); 1240 } 1241 mutex_exit(&msp->ms_lock); 1242 mutex_exit(&msp->ms_sync_lock); 1243 1244 vca.vca_msp = msp; 1245 zfs_dbgmsg("copying %llu segments for metaslab %llu", 1246 avl_numnodes(&svr->svr_allocd_segs->rt_root), 1247 msp->ms_id); 1248 1249 while (!svr->svr_thread_exit && 1250 range_tree_space(svr->svr_allocd_segs) != 0) { 1251 1252 mutex_exit(&svr->svr_lock); 1253 1254 mutex_enter(&vca.vca_lock); 1255 while (vca.vca_outstanding_bytes > 1256 zfs_remove_max_copy_bytes) { 1257 cv_wait(&vca.vca_cv, &vca.vca_lock); 1258 } 1259 mutex_exit(&vca.vca_lock); 1260 1261 dmu_tx_t *tx = 1262 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1263 1264 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1265 uint64_t txg = dmu_tx_get_txg(tx); 1266 1267 if (txg != last_txg) 1268 max_alloc = zfs_remove_max_segment; 1269 last_txg = txg; 1270 1271 spa_vdev_copy_impl(svr, &vca, &max_alloc, tx); 1272 1273 dmu_tx_commit(tx); 1274 mutex_enter(&svr->svr_lock); 1275 } 1276 } 1277 1278 mutex_exit(&svr->svr_lock); 1279 /* 1280 * Wait for all copies to finish before cleaning up the vca. 1281 */ 1282 txg_wait_synced(spa->spa_dsl_pool, 0); 1283 ASSERT0(vca.vca_outstanding_bytes); 1284 1285 mutex_destroy(&vca.vca_lock); 1286 cv_destroy(&vca.vca_cv); 1287 1288 if (svr->svr_thread_exit) { 1289 mutex_enter(&svr->svr_lock); 1290 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); 1291 svr->svr_thread = NULL; 1292 cv_broadcast(&svr->svr_cv); 1293 mutex_exit(&svr->svr_lock); 1294 } else { 1295 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1296 vdev_remove_complete(vd); 1297 } 1298 } 1299 1300 void 1301 spa_vdev_remove_suspend(spa_t *spa) 1302 { 1303 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1304 1305 if (svr == NULL) 1306 return; 1307 1308 mutex_enter(&svr->svr_lock); 1309 svr->svr_thread_exit = B_TRUE; 1310 while (svr->svr_thread != NULL) 1311 cv_wait(&svr->svr_cv, &svr->svr_lock); 1312 svr->svr_thread_exit = B_FALSE; 1313 mutex_exit(&svr->svr_lock); 1314 } 1315 1316 /* ARGSUSED */ 1317 static int 1318 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) 1319 { 1320 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1321 1322 if (spa->spa_vdev_removal == NULL) 1323 return (ENOTACTIVE); 1324 return (0); 1325 } 1326 1327 /* 1328 * Cancel a removal by freeing all entries from the partial mapping 1329 * and marking the vdev as no longer being removing. 1330 */ 1331 /* ARGSUSED */ 1332 static void 1333 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) 1334 { 1335 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1336 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1337 vdev_t *vd = svr->svr_vdev; 1338 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1339 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1340 objset_t *mos = spa->spa_meta_objset; 1341 1342 ASSERT3P(svr->svr_thread, ==, NULL); 1343 1344 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 1345 if (vdev_obsolete_counts_are_precise(vd)) { 1346 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1347 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1348 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); 1349 } 1350 1351 if (vdev_obsolete_sm_object(vd) != 0) { 1352 ASSERT(vd->vdev_obsolete_sm != NULL); 1353 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 1354 space_map_object(vd->vdev_obsolete_sm)); 1355 1356 space_map_free(vd->vdev_obsolete_sm, tx); 1357 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1358 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 1359 space_map_close(vd->vdev_obsolete_sm); 1360 vd->vdev_obsolete_sm = NULL; 1361 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1362 } 1363 for (int i = 0; i < TXG_SIZE; i++) { 1364 ASSERT(list_is_empty(&svr->svr_new_segments[i])); 1365 ASSERT3U(svr->svr_max_offset_to_sync[i], <=, 1366 vdev_indirect_mapping_max_offset(vim)); 1367 } 1368 1369 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 1370 metaslab_t *msp = vd->vdev_ms[msi]; 1371 1372 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 1373 break; 1374 1375 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1376 1377 mutex_enter(&msp->ms_lock); 1378 1379 /* 1380 * Assert nothing in flight -- ms_*tree is empty. 1381 */ 1382 for (int i = 0; i < TXG_SIZE; i++) 1383 ASSERT0(range_tree_space(msp->ms_alloctree[i])); 1384 for (int i = 0; i < TXG_DEFER_SIZE; i++) 1385 ASSERT0(range_tree_space(msp->ms_defertree[i])); 1386 ASSERT0(range_tree_space(msp->ms_freedtree)); 1387 1388 if (msp->ms_sm != NULL) { 1389 /* 1390 * Assert that the in-core spacemap has the same 1391 * length as the on-disk one, so we can use the 1392 * existing in-core spacemap to load it from disk. 1393 */ 1394 ASSERT3U(msp->ms_sm->sm_alloc, ==, 1395 msp->ms_sm->sm_phys->smp_alloc); 1396 ASSERT3U(msp->ms_sm->sm_length, ==, 1397 msp->ms_sm->sm_phys->smp_objsize); 1398 1399 mutex_enter(&svr->svr_lock); 1400 VERIFY0(space_map_load(msp->ms_sm, 1401 svr->svr_allocd_segs, SM_ALLOC)); 1402 range_tree_walk(msp->ms_freeingtree, 1403 range_tree_remove, svr->svr_allocd_segs); 1404 1405 /* 1406 * Clear everything past what has been synced, 1407 * because we have not allocated mappings for it yet. 1408 */ 1409 uint64_t syncd = vdev_indirect_mapping_max_offset(vim); 1410 range_tree_clear(svr->svr_allocd_segs, syncd, 1411 msp->ms_sm->sm_start + msp->ms_sm->sm_size - syncd); 1412 1413 mutex_exit(&svr->svr_lock); 1414 } 1415 mutex_exit(&msp->ms_lock); 1416 1417 mutex_enter(&svr->svr_lock); 1418 range_tree_vacate(svr->svr_allocd_segs, 1419 free_mapped_segment_cb, vd); 1420 mutex_exit(&svr->svr_lock); 1421 } 1422 1423 /* 1424 * Note: this must happen after we invoke free_mapped_segment_cb, 1425 * because it adds to the obsolete_segments. 1426 */ 1427 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 1428 1429 ASSERT3U(vic->vic_mapping_object, ==, 1430 vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); 1431 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1432 vd->vdev_indirect_mapping = NULL; 1433 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 1434 vic->vic_mapping_object = 0; 1435 1436 ASSERT3U(vic->vic_births_object, ==, 1437 vdev_indirect_births_object(vd->vdev_indirect_births)); 1438 vdev_indirect_births_close(vd->vdev_indirect_births); 1439 vd->vdev_indirect_births = NULL; 1440 vdev_indirect_births_free(mos, vic->vic_births_object, tx); 1441 vic->vic_births_object = 0; 1442 1443 /* 1444 * We may have processed some frees from the removing vdev in this 1445 * txg, thus increasing svr_bytes_done; discard that here to 1446 * satisfy the assertions in spa_vdev_removal_destroy(). 1447 * Note that future txg's can not have any bytes_done, because 1448 * future TXG's are only modified from open context, and we have 1449 * already shut down the copying thread. 1450 */ 1451 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; 1452 spa_finish_removal(spa, DSS_CANCELED, tx); 1453 1454 vd->vdev_removing = B_FALSE; 1455 vdev_config_dirty(vd); 1456 1457 zfs_dbgmsg("canceled device removal for vdev %llu in %llu", 1458 vd->vdev_id, dmu_tx_get_txg(tx)); 1459 spa_history_log_internal(spa, "vdev remove canceled", tx, 1460 "%s vdev %llu %s", spa_name(spa), 1461 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1462 } 1463 1464 int 1465 spa_vdev_remove_cancel(spa_t *spa) 1466 { 1467 spa_vdev_remove_suspend(spa); 1468 1469 if (spa->spa_vdev_removal == NULL) 1470 return (ENOTACTIVE); 1471 1472 uint64_t vdid = spa->spa_vdev_removal->svr_vdev->vdev_id; 1473 1474 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, 1475 spa_vdev_remove_cancel_sync, NULL, 0, ZFS_SPACE_CHECK_NONE); 1476 1477 if (error == 0) { 1478 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); 1479 vdev_t *vd = vdev_lookup_top(spa, vdid); 1480 metaslab_group_activate(vd->vdev_mg); 1481 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); 1482 } 1483 1484 return (error); 1485 } 1486 1487 /* 1488 * Called every sync pass of every txg if there's a svr. 1489 */ 1490 void 1491 svr_sync(spa_t *spa, dmu_tx_t *tx) 1492 { 1493 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1494 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1495 1496 /* 1497 * This check is necessary so that we do not dirty the 1498 * DIRECTORY_OBJECT via spa_sync_removing_state() when there 1499 * is nothing to do. Dirtying it every time would prevent us 1500 * from syncing-to-convergence. 1501 */ 1502 if (svr->svr_bytes_done[txgoff] == 0) 1503 return; 1504 1505 /* 1506 * Update progress accounting. 1507 */ 1508 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; 1509 svr->svr_bytes_done[txgoff] = 0; 1510 1511 spa_sync_removing_state(spa, tx); 1512 } 1513 1514 static void 1515 vdev_remove_make_hole_and_free(vdev_t *vd) 1516 { 1517 uint64_t id = vd->vdev_id; 1518 spa_t *spa = vd->vdev_spa; 1519 vdev_t *rvd = spa->spa_root_vdev; 1520 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 1521 1522 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1523 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1524 1525 vdev_free(vd); 1526 1527 if (last_vdev) { 1528 vdev_compact_children(rvd); 1529 } else { 1530 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 1531 vdev_add_child(rvd, vd); 1532 } 1533 vdev_config_dirty(rvd); 1534 1535 /* 1536 * Reassess the health of our root vdev. 1537 */ 1538 vdev_reopen(rvd); 1539 } 1540 1541 /* 1542 * Remove a log device. The config lock is held for the specified TXG. 1543 */ 1544 static int 1545 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) 1546 { 1547 metaslab_group_t *mg = vd->vdev_mg; 1548 spa_t *spa = vd->vdev_spa; 1549 int error = 0; 1550 1551 ASSERT(vd->vdev_islog); 1552 ASSERT(vd == vd->vdev_top); 1553 1554 /* 1555 * Stop allocating from this vdev. 1556 */ 1557 metaslab_group_passivate(mg); 1558 1559 /* 1560 * Wait for the youngest allocations and frees to sync, 1561 * and then wait for the deferral of those frees to finish. 1562 */ 1563 spa_vdev_config_exit(spa, NULL, 1564 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1565 1566 /* 1567 * Evacuate the device. We don't hold the config lock as writer 1568 * since we need to do I/O but we do keep the 1569 * spa_namespace_lock held. Once this completes the device 1570 * should no longer have any blocks allocated on it. 1571 */ 1572 if (vd->vdev_islog) { 1573 if (vd->vdev_stat.vs_alloc != 0) 1574 error = spa_reset_logs(spa); 1575 } 1576 1577 *txg = spa_vdev_config_enter(spa); 1578 1579 if (error != 0) { 1580 metaslab_group_activate(mg); 1581 return (error); 1582 } 1583 ASSERT0(vd->vdev_stat.vs_alloc); 1584 1585 /* 1586 * The evacuation succeeded. Remove any remaining MOS metadata 1587 * associated with this vdev, and wait for these changes to sync. 1588 */ 1589 vd->vdev_removing = B_TRUE; 1590 1591 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1592 vdev_config_dirty(vd); 1593 1594 spa_history_log_internal(spa, "vdev remove", NULL, 1595 "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id, 1596 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1597 1598 /* Make sure these changes are sync'ed */ 1599 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 1600 1601 *txg = spa_vdev_config_enter(spa); 1602 1603 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1604 ESC_ZFS_VDEV_REMOVE_DEV); 1605 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1606 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1607 1608 /* The top ZAP should have been destroyed by vdev_remove_empty. */ 1609 ASSERT0(vd->vdev_top_zap); 1610 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ 1611 ASSERT0(vd->vdev_leaf_zap); 1612 1613 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1614 1615 if (list_link_active(&vd->vdev_state_dirty_node)) 1616 vdev_state_clean(vd); 1617 if (list_link_active(&vd->vdev_config_dirty_node)) 1618 vdev_config_clean(vd); 1619 1620 /* 1621 * Clean up the vdev namespace. 1622 */ 1623 vdev_remove_make_hole_and_free(vd); 1624 1625 if (ev != NULL) 1626 spa_event_post(ev); 1627 1628 return (0); 1629 } 1630 1631 static int 1632 spa_vdev_remove_top_check(vdev_t *vd) 1633 { 1634 spa_t *spa = vd->vdev_spa; 1635 1636 if (vd != vd->vdev_top) 1637 return (SET_ERROR(ENOTSUP)); 1638 1639 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) 1640 return (SET_ERROR(ENOTSUP)); 1641 1642 /* 1643 * There has to be enough free space to remove the 1644 * device and leave double the "slop" space (i.e. we 1645 * must leave at least 3% of the pool free, in addition to 1646 * the normal slop space). 1647 */ 1648 if (dsl_dir_space_available(spa->spa_dsl_pool->dp_root_dir, 1649 NULL, 0, B_TRUE) < 1650 vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) { 1651 return (SET_ERROR(ENOSPC)); 1652 } 1653 1654 /* 1655 * There can not be a removal in progress. 1656 */ 1657 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) 1658 return (SET_ERROR(EBUSY)); 1659 1660 /* 1661 * The device must have all its data. 1662 */ 1663 if (!vdev_dtl_empty(vd, DTL_MISSING) || 1664 !vdev_dtl_empty(vd, DTL_OUTAGE)) 1665 return (SET_ERROR(EBUSY)); 1666 1667 /* 1668 * The device must be healthy. 1669 */ 1670 if (!vdev_readable(vd)) 1671 return (SET_ERROR(EIO)); 1672 1673 /* 1674 * All vdevs in normal class must have the same ashift. 1675 */ 1676 if (spa->spa_max_ashift != spa->spa_min_ashift) { 1677 return (SET_ERROR(EINVAL)); 1678 } 1679 1680 /* 1681 * All vdevs in normal class must have the same ashift 1682 * and not be raidz. 1683 */ 1684 vdev_t *rvd = spa->spa_root_vdev; 1685 int num_indirect = 0; 1686 for (uint64_t id = 0; id < rvd->vdev_children; id++) { 1687 vdev_t *cvd = rvd->vdev_child[id]; 1688 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog) 1689 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); 1690 if (cvd->vdev_ops == &vdev_indirect_ops) 1691 num_indirect++; 1692 if (!vdev_is_concrete(cvd)) 1693 continue; 1694 if (cvd->vdev_ops == &vdev_raidz_ops) 1695 return (SET_ERROR(EINVAL)); 1696 /* 1697 * Need the mirror to be mirror of leaf vdevs only 1698 */ 1699 if (cvd->vdev_ops == &vdev_mirror_ops) { 1700 for (uint64_t cid = 0; 1701 cid < cvd->vdev_children; cid++) { 1702 vdev_t *tmp = cvd->vdev_child[cid]; 1703 if (!tmp->vdev_ops->vdev_op_leaf) 1704 return (SET_ERROR(EINVAL)); 1705 } 1706 } 1707 } 1708 1709 return (0); 1710 } 1711 1712 /* 1713 * Initiate removal of a top-level vdev, reducing the total space in the pool. 1714 * The config lock is held for the specified TXG. Once initiated, 1715 * evacuation of all allocated space (copying it to other vdevs) happens 1716 * in the background (see spa_vdev_remove_thread()), and can be canceled 1717 * (see spa_vdev_remove_cancel()). If successful, the vdev will 1718 * be transformed to an indirect vdev (see spa_vdev_remove_complete()). 1719 */ 1720 static int 1721 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) 1722 { 1723 spa_t *spa = vd->vdev_spa; 1724 int error; 1725 1726 /* 1727 * Check for errors up-front, so that we don't waste time 1728 * passivating the metaslab group and clearing the ZIL if there 1729 * are errors. 1730 */ 1731 error = spa_vdev_remove_top_check(vd); 1732 if (error != 0) 1733 return (error); 1734 1735 /* 1736 * Stop allocating from this vdev. Note that we must check 1737 * that this is not the only device in the pool before 1738 * passivating, otherwise we will not be able to make 1739 * progress because we can't allocate from any vdevs. 1740 * The above check for sufficient free space serves this 1741 * purpose. 1742 */ 1743 metaslab_group_t *mg = vd->vdev_mg; 1744 metaslab_group_passivate(mg); 1745 1746 /* 1747 * Wait for the youngest allocations and frees to sync, 1748 * and then wait for the deferral of those frees to finish. 1749 */ 1750 spa_vdev_config_exit(spa, NULL, 1751 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1752 1753 /* 1754 * We must ensure that no "stubby" log blocks are allocated 1755 * on the device to be removed. These blocks could be 1756 * written at any time, including while we are in the middle 1757 * of copying them. 1758 */ 1759 error = spa_reset_logs(spa); 1760 1761 *txg = spa_vdev_config_enter(spa); 1762 1763 /* 1764 * Things might have changed while the config lock was dropped 1765 * (e.g. space usage). Check for errors again. 1766 */ 1767 if (error == 0) 1768 error = spa_vdev_remove_top_check(vd); 1769 1770 if (error != 0) { 1771 metaslab_group_activate(mg); 1772 return (error); 1773 } 1774 1775 vd->vdev_removing = B_TRUE; 1776 1777 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1778 vdev_config_dirty(vd); 1779 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); 1780 dsl_sync_task_nowait(spa->spa_dsl_pool, 1781 vdev_remove_initiate_sync, 1782 vd, 0, ZFS_SPACE_CHECK_NONE, tx); 1783 dmu_tx_commit(tx); 1784 1785 return (0); 1786 } 1787 1788 /* 1789 * Remove a device from the pool. 1790 * 1791 * Removing a device from the vdev namespace requires several steps 1792 * and can take a significant amount of time. As a result we use 1793 * the spa_vdev_config_[enter/exit] functions which allow us to 1794 * grab and release the spa_config_lock while still holding the namespace 1795 * lock. During each step the configuration is synced out. 1796 */ 1797 int 1798 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 1799 { 1800 vdev_t *vd; 1801 nvlist_t **spares, **l2cache, *nv; 1802 uint64_t txg = 0; 1803 uint_t nspares, nl2cache; 1804 int error = 0; 1805 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 1806 sysevent_t *ev = NULL; 1807 1808 ASSERT(spa_writeable(spa)); 1809 1810 if (!locked) 1811 txg = spa_vdev_enter(spa); 1812 1813 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1814 1815 if (spa->spa_spares.sav_vdevs != NULL && 1816 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1817 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 1818 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 1819 /* 1820 * Only remove the hot spare if it's not currently in use 1821 * in this pool. 1822 */ 1823 if (vd == NULL || unspare) { 1824 char *nvstr = fnvlist_lookup_string(nv, 1825 ZPOOL_CONFIG_PATH); 1826 spa_history_log_internal(spa, "vdev remove", NULL, 1827 "%s vdev (%s) %s", spa_name(spa), 1828 VDEV_TYPE_SPARE, nvstr); 1829 if (vd == NULL) 1830 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 1831 ev = spa_event_create(spa, vd, NULL, 1832 ESC_ZFS_VDEV_REMOVE_AUX); 1833 spa_vdev_remove_aux(spa->spa_spares.sav_config, 1834 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 1835 spa_load_spares(spa); 1836 spa->spa_spares.sav_sync = B_TRUE; 1837 } else { 1838 error = SET_ERROR(EBUSY); 1839 } 1840 } else if (spa->spa_l2cache.sav_vdevs != NULL && 1841 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1842 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 1843 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 1844 char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 1845 spa_history_log_internal(spa, "vdev remove", NULL, 1846 "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr); 1847 /* 1848 * Cache devices can always be removed. 1849 */ 1850 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 1851 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); 1852 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 1853 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 1854 spa_load_l2cache(spa); 1855 spa->spa_l2cache.sav_sync = B_TRUE; 1856 } else if (vd != NULL && vd->vdev_islog) { 1857 ASSERT(!locked); 1858 error = spa_vdev_remove_log(vd, &txg); 1859 } else if (vd != NULL) { 1860 ASSERT(!locked); 1861 error = spa_vdev_remove_top(vd, &txg); 1862 } else { 1863 /* 1864 * There is no vdev of any kind with the specified guid. 1865 */ 1866 error = SET_ERROR(ENOENT); 1867 } 1868 1869 if (!locked) 1870 error = spa_vdev_exit(spa, NULL, txg, error); 1871 1872 if (ev != NULL) { 1873 if (error != 0) { 1874 spa_event_discard(ev); 1875 } else { 1876 spa_event_post(ev); 1877 } 1878 } 1879 1880 return (error); 1881 } 1882 1883 int 1884 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) 1885 { 1886 prs->prs_state = spa->spa_removing_phys.sr_state; 1887 1888 if (prs->prs_state == DSS_NONE) 1889 return (SET_ERROR(ENOENT)); 1890 1891 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; 1892 prs->prs_start_time = spa->spa_removing_phys.sr_start_time; 1893 prs->prs_end_time = spa->spa_removing_phys.sr_end_time; 1894 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; 1895 prs->prs_copied = spa->spa_removing_phys.sr_copied; 1896 1897 if (spa->spa_vdev_removal != NULL) { 1898 for (int i = 0; i < TXG_SIZE; i++) { 1899 prs->prs_copied += 1900 spa->spa_vdev_removal->svr_bytes_done[i]; 1901 } 1902 } 1903 1904 prs->prs_mapping_memory = 0; 1905 uint64_t indirect_vdev_id = 1906 spa->spa_removing_phys.sr_prev_indirect_vdev; 1907 while (indirect_vdev_id != -1) { 1908 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; 1909 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1910 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1911 1912 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 1913 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); 1914 indirect_vdev_id = vic->vic_prev_indirect_vdev; 1915 } 1916 1917 return (0); 1918 } 1919