1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy 22eda14cbcSMatt Macy /* 23eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24180f8225SMatt Macy * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 25eda14cbcSMatt Macy * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 26eda14cbcSMatt Macy */ 27eda14cbcSMatt Macy 28eda14cbcSMatt Macy #include <sys/zfs_context.h> 29eda14cbcSMatt Macy #include <sys/spa_impl.h> 30eda14cbcSMatt Macy #include <sys/dmu.h> 31eda14cbcSMatt Macy #include <sys/dmu_tx.h> 32eda14cbcSMatt Macy #include <sys/zap.h> 33eda14cbcSMatt Macy #include <sys/vdev_impl.h> 34eda14cbcSMatt Macy #include <sys/metaslab.h> 35eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 36eda14cbcSMatt Macy #include <sys/uberblock_impl.h> 37eda14cbcSMatt Macy #include <sys/txg.h> 38eda14cbcSMatt Macy #include <sys/avl.h> 39eda14cbcSMatt Macy #include <sys/bpobj.h> 40eda14cbcSMatt Macy #include <sys/dsl_pool.h> 41eda14cbcSMatt Macy #include <sys/dsl_synctask.h> 42eda14cbcSMatt Macy #include <sys/dsl_dir.h> 43eda14cbcSMatt Macy #include <sys/arc.h> 44eda14cbcSMatt Macy #include <sys/zfeature.h> 45eda14cbcSMatt Macy #include <sys/vdev_indirect_births.h> 46eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h> 47eda14cbcSMatt Macy #include <sys/abd.h> 48eda14cbcSMatt Macy #include <sys/vdev_initialize.h> 49eda14cbcSMatt Macy #include <sys/vdev_trim.h> 50eda14cbcSMatt Macy #include <sys/trace_zfs.h> 51eda14cbcSMatt Macy 52eda14cbcSMatt Macy /* 53eda14cbcSMatt Macy * This file contains the necessary logic to remove vdevs from a 54eda14cbcSMatt Macy * storage pool. Currently, the only devices that can be removed 55eda14cbcSMatt Macy * are log, cache, and spare devices; and top level vdevs from a pool 56eda14cbcSMatt Macy * w/o raidz or mirrors. (Note that members of a mirror can be removed 57eda14cbcSMatt Macy * by the detach operation.) 58eda14cbcSMatt Macy * 59eda14cbcSMatt Macy * Log vdevs are removed by evacuating them and then turning the vdev 60eda14cbcSMatt Macy * into a hole vdev while holding spa config locks. 61eda14cbcSMatt Macy * 62eda14cbcSMatt Macy * Top level vdevs are removed and converted into an indirect vdev via 63eda14cbcSMatt Macy * a multi-step process: 64eda14cbcSMatt Macy * 65eda14cbcSMatt Macy * - Disable allocations from this device (spa_vdev_remove_top). 66eda14cbcSMatt Macy * 67eda14cbcSMatt Macy * - From a new thread (spa_vdev_remove_thread), copy data from 68eda14cbcSMatt Macy * the removing vdev to a different vdev. The copy happens in open 69eda14cbcSMatt Macy * context (spa_vdev_copy_impl) and issues a sync task 70eda14cbcSMatt Macy * (vdev_mapping_sync) so the sync thread can update the partial 71eda14cbcSMatt Macy * indirect mappings in core and on disk. 72eda14cbcSMatt Macy * 73eda14cbcSMatt Macy * - If a free happens during a removal, it is freed from the 74eda14cbcSMatt Macy * removing vdev, and if it has already been copied, from the new 75eda14cbcSMatt Macy * location as well (free_from_removing_vdev). 76eda14cbcSMatt Macy * 77eda14cbcSMatt Macy * - After the removal is completed, the copy thread converts the vdev 78eda14cbcSMatt Macy * into an indirect vdev (vdev_remove_complete) before instructing 79eda14cbcSMatt Macy * the sync thread to destroy the space maps and finish the removal 80eda14cbcSMatt Macy * (spa_finish_removal). 81eda14cbcSMatt Macy */ 82eda14cbcSMatt Macy 83eda14cbcSMatt Macy typedef struct vdev_copy_arg { 84eda14cbcSMatt Macy metaslab_t *vca_msp; 85eda14cbcSMatt Macy uint64_t vca_outstanding_bytes; 86eda14cbcSMatt Macy uint64_t vca_read_error_bytes; 87eda14cbcSMatt Macy uint64_t vca_write_error_bytes; 88eda14cbcSMatt Macy kcondvar_t vca_cv; 89eda14cbcSMatt Macy kmutex_t vca_lock; 90eda14cbcSMatt Macy } vdev_copy_arg_t; 91eda14cbcSMatt Macy 92eda14cbcSMatt Macy /* 93eda14cbcSMatt Macy * The maximum amount of memory we can use for outstanding i/o while 94eda14cbcSMatt Macy * doing a device removal. This determines how much i/o we can have 95eda14cbcSMatt Macy * in flight concurrently. 96eda14cbcSMatt Macy */ 97be181ee2SMartin Matuska static const uint_t zfs_remove_max_copy_bytes = 64 * 1024 * 1024; 98eda14cbcSMatt Macy 99eda14cbcSMatt Macy /* 100eda14cbcSMatt Macy * The largest contiguous segment that we will attempt to allocate when 101eda14cbcSMatt Macy * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If 102eda14cbcSMatt Macy * there is a performance problem with attempting to allocate large blocks, 103eda14cbcSMatt Macy * consider decreasing this. 104eda14cbcSMatt Macy * 105eda14cbcSMatt Macy * See also the accessor function spa_remove_max_segment(). 106eda14cbcSMatt Macy */ 107be181ee2SMartin Matuska uint_t zfs_remove_max_segment = SPA_MAXBLOCKSIZE; 108eda14cbcSMatt Macy 109eda14cbcSMatt Macy /* 110eda14cbcSMatt Macy * Ignore hard IO errors during device removal. When set if a device 111eda14cbcSMatt Macy * encounters hard IO error during the removal process the removal will 112eda14cbcSMatt Macy * not be cancelled. This can result in a normally recoverable block 113eda14cbcSMatt Macy * becoming permanently damaged and is not recommended. 114eda14cbcSMatt Macy */ 115e92ffd9bSMartin Matuska static int zfs_removal_ignore_errors = 0; 116eda14cbcSMatt Macy 117eda14cbcSMatt Macy /* 118eda14cbcSMatt Macy * Allow a remap segment to span free chunks of at most this size. The main 119eda14cbcSMatt Macy * impact of a larger span is that we will read and write larger, more 120eda14cbcSMatt Macy * contiguous chunks, with more "unnecessary" data -- trading off bandwidth 121eda14cbcSMatt Macy * for iops. The value here was chosen to align with 122eda14cbcSMatt Macy * zfs_vdev_read_gap_limit, which is a similar concept when doing regular 123eda14cbcSMatt Macy * reads (but there's no reason it has to be the same). 124eda14cbcSMatt Macy * 125eda14cbcSMatt Macy * Additionally, a higher span will have the following relatively minor 126eda14cbcSMatt Macy * effects: 127eda14cbcSMatt Macy * - the mapping will be smaller, since one entry can cover more allocated 128eda14cbcSMatt Macy * segments 129eda14cbcSMatt Macy * - more of the fragmentation in the removing device will be preserved 130eda14cbcSMatt Macy * - we'll do larger allocations, which may fail and fall back on smaller 131eda14cbcSMatt Macy * allocations 132eda14cbcSMatt Macy */ 133be181ee2SMartin Matuska uint_t vdev_removal_max_span = 32 * 1024; 134eda14cbcSMatt Macy 135eda14cbcSMatt Macy /* 136eda14cbcSMatt Macy * This is used by the test suite so that it can ensure that certain 137eda14cbcSMatt Macy * actions happen while in the middle of a removal. 138eda14cbcSMatt Macy */ 139eda14cbcSMatt Macy int zfs_removal_suspend_progress = 0; 140eda14cbcSMatt Macy 141eda14cbcSMatt Macy #define VDEV_REMOVAL_ZAP_OBJS "lzap" 142eda14cbcSMatt Macy 143da5137abSMartin Matuska static __attribute__((noreturn)) void spa_vdev_remove_thread(void *arg); 144eda14cbcSMatt Macy static int spa_vdev_remove_cancel_impl(spa_t *spa); 145eda14cbcSMatt Macy 146eda14cbcSMatt Macy static void 147eda14cbcSMatt Macy spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) 148eda14cbcSMatt Macy { 149eda14cbcSMatt Macy VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, 150eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, 151eda14cbcSMatt Macy DMU_POOL_REMOVING, sizeof (uint64_t), 152eda14cbcSMatt Macy sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 153eda14cbcSMatt Macy &spa->spa_removing_phys, tx)); 154eda14cbcSMatt Macy } 155eda14cbcSMatt Macy 156eda14cbcSMatt Macy static nvlist_t * 157eda14cbcSMatt Macy spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 158eda14cbcSMatt Macy { 159eda14cbcSMatt Macy for (int i = 0; i < count; i++) { 160eda14cbcSMatt Macy uint64_t guid = 161eda14cbcSMatt Macy fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); 162eda14cbcSMatt Macy 163eda14cbcSMatt Macy if (guid == target_guid) 164eda14cbcSMatt Macy return (nvpp[i]); 165eda14cbcSMatt Macy } 166eda14cbcSMatt Macy 167eda14cbcSMatt Macy return (NULL); 168eda14cbcSMatt Macy } 169eda14cbcSMatt Macy 170eda14cbcSMatt Macy static void 171681ce946SMartin Matuska vdev_activate(vdev_t *vd) 172681ce946SMartin Matuska { 173681ce946SMartin Matuska metaslab_group_t *mg = vd->vdev_mg; 174681ce946SMartin Matuska spa_t *spa = vd->vdev_spa; 175681ce946SMartin Matuska uint64_t vdev_space = spa_deflate(spa) ? 176681ce946SMartin Matuska vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 177681ce946SMartin Matuska 178681ce946SMartin Matuska ASSERT(!vd->vdev_islog); 179681ce946SMartin Matuska ASSERT(vd->vdev_noalloc); 180681ce946SMartin Matuska 181681ce946SMartin Matuska metaslab_group_activate(mg); 182681ce946SMartin Matuska metaslab_group_activate(vd->vdev_log_mg); 183681ce946SMartin Matuska 184681ce946SMartin Matuska ASSERT3U(spa->spa_nonallocating_dspace, >=, vdev_space); 185681ce946SMartin Matuska 186681ce946SMartin Matuska spa->spa_nonallocating_dspace -= vdev_space; 187681ce946SMartin Matuska 188681ce946SMartin Matuska vd->vdev_noalloc = B_FALSE; 189681ce946SMartin Matuska } 190681ce946SMartin Matuska 191681ce946SMartin Matuska static int 192681ce946SMartin Matuska vdev_passivate(vdev_t *vd, uint64_t *txg) 193681ce946SMartin Matuska { 194681ce946SMartin Matuska spa_t *spa = vd->vdev_spa; 195681ce946SMartin Matuska int error; 196681ce946SMartin Matuska 197681ce946SMartin Matuska ASSERT(!vd->vdev_noalloc); 198681ce946SMartin Matuska 199681ce946SMartin Matuska vdev_t *rvd = spa->spa_root_vdev; 200681ce946SMartin Matuska metaslab_group_t *mg = vd->vdev_mg; 201681ce946SMartin Matuska metaslab_class_t *normal = spa_normal_class(spa); 202681ce946SMartin Matuska if (mg->mg_class == normal) { 203681ce946SMartin Matuska /* 204681ce946SMartin Matuska * We must check that this is not the only allocating device in 205681ce946SMartin Matuska * the pool before passivating, otherwise we will not be able 206681ce946SMartin Matuska * to make progress because we can't allocate from any vdevs. 207681ce946SMartin Matuska */ 208681ce946SMartin Matuska boolean_t last = B_TRUE; 209681ce946SMartin Matuska for (uint64_t id = 0; id < rvd->vdev_children; id++) { 210681ce946SMartin Matuska vdev_t *cvd = rvd->vdev_child[id]; 211681ce946SMartin Matuska 212681ce946SMartin Matuska if (cvd == vd || 213681ce946SMartin Matuska cvd->vdev_ops == &vdev_indirect_ops) 214681ce946SMartin Matuska continue; 215681ce946SMartin Matuska 216681ce946SMartin Matuska metaslab_class_t *mc = cvd->vdev_mg->mg_class; 217681ce946SMartin Matuska if (mc != normal) 218681ce946SMartin Matuska continue; 219681ce946SMartin Matuska 220681ce946SMartin Matuska if (!cvd->vdev_noalloc) { 221681ce946SMartin Matuska last = B_FALSE; 222681ce946SMartin Matuska break; 223681ce946SMartin Matuska } 224681ce946SMartin Matuska } 225681ce946SMartin Matuska if (last) 226681ce946SMartin Matuska return (SET_ERROR(EINVAL)); 227681ce946SMartin Matuska } 228681ce946SMartin Matuska 229681ce946SMartin Matuska metaslab_group_passivate(mg); 230681ce946SMartin Matuska ASSERT(!vd->vdev_islog); 231681ce946SMartin Matuska metaslab_group_passivate(vd->vdev_log_mg); 232681ce946SMartin Matuska 233681ce946SMartin Matuska /* 234681ce946SMartin Matuska * Wait for the youngest allocations and frees to sync, 235681ce946SMartin Matuska * and then wait for the deferral of those frees to finish. 236681ce946SMartin Matuska */ 237681ce946SMartin Matuska spa_vdev_config_exit(spa, NULL, 238681ce946SMartin Matuska *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 239681ce946SMartin Matuska 240681ce946SMartin Matuska /* 241681ce946SMartin Matuska * We must ensure that no "stubby" log blocks are allocated 242681ce946SMartin Matuska * on the device to be removed. These blocks could be 243681ce946SMartin Matuska * written at any time, including while we are in the middle 244681ce946SMartin Matuska * of copying them. 245681ce946SMartin Matuska */ 246681ce946SMartin Matuska error = spa_reset_logs(spa); 247681ce946SMartin Matuska 248681ce946SMartin Matuska *txg = spa_vdev_config_enter(spa); 249681ce946SMartin Matuska 250681ce946SMartin Matuska if (error != 0) { 251681ce946SMartin Matuska metaslab_group_activate(mg); 252681ce946SMartin Matuska ASSERT(!vd->vdev_islog); 253681ce946SMartin Matuska if (vd->vdev_log_mg != NULL) 254681ce946SMartin Matuska metaslab_group_activate(vd->vdev_log_mg); 255681ce946SMartin Matuska return (error); 256681ce946SMartin Matuska } 257681ce946SMartin Matuska 258681ce946SMartin Matuska spa->spa_nonallocating_dspace += spa_deflate(spa) ? 259681ce946SMartin Matuska vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 260681ce946SMartin Matuska vd->vdev_noalloc = B_TRUE; 261681ce946SMartin Matuska 262681ce946SMartin Matuska return (0); 263681ce946SMartin Matuska } 264681ce946SMartin Matuska 265681ce946SMartin Matuska /* 266681ce946SMartin Matuska * Turn off allocations for a top-level device from the pool. 267681ce946SMartin Matuska * 268681ce946SMartin Matuska * Turning off allocations for a top-level device can take a significant 269681ce946SMartin Matuska * amount of time. As a result we use the spa_vdev_config_[enter/exit] 270681ce946SMartin Matuska * functions which allow us to grab and release the spa_config_lock while 271681ce946SMartin Matuska * still holding the namespace lock. During each step the configuration 272681ce946SMartin Matuska * is synced out. 273681ce946SMartin Matuska */ 274681ce946SMartin Matuska int 275681ce946SMartin Matuska spa_vdev_noalloc(spa_t *spa, uint64_t guid) 276681ce946SMartin Matuska { 277681ce946SMartin Matuska vdev_t *vd; 278681ce946SMartin Matuska uint64_t txg; 279681ce946SMartin Matuska int error = 0; 280681ce946SMartin Matuska 281681ce946SMartin Matuska ASSERT(!MUTEX_HELD(&spa_namespace_lock)); 282681ce946SMartin Matuska ASSERT(spa_writeable(spa)); 283681ce946SMartin Matuska 284681ce946SMartin Matuska txg = spa_vdev_enter(spa); 285681ce946SMartin Matuska 286681ce946SMartin Matuska ASSERT(MUTEX_HELD(&spa_namespace_lock)); 287681ce946SMartin Matuska 288681ce946SMartin Matuska vd = spa_lookup_by_guid(spa, guid, B_FALSE); 289681ce946SMartin Matuska 290681ce946SMartin Matuska if (vd == NULL) 291681ce946SMartin Matuska error = SET_ERROR(ENOENT); 292681ce946SMartin Matuska else if (vd->vdev_mg == NULL) 293681ce946SMartin Matuska error = SET_ERROR(ZFS_ERR_VDEV_NOTSUP); 294681ce946SMartin Matuska else if (!vd->vdev_noalloc) 295681ce946SMartin Matuska error = vdev_passivate(vd, &txg); 296681ce946SMartin Matuska 297681ce946SMartin Matuska if (error == 0) { 298681ce946SMartin Matuska vdev_dirty_leaves(vd, VDD_DTL, txg); 299681ce946SMartin Matuska vdev_config_dirty(vd); 300681ce946SMartin Matuska } 301681ce946SMartin Matuska 302681ce946SMartin Matuska error = spa_vdev_exit(spa, NULL, txg, error); 303681ce946SMartin Matuska 304681ce946SMartin Matuska return (error); 305681ce946SMartin Matuska } 306681ce946SMartin Matuska 307681ce946SMartin Matuska int 308681ce946SMartin Matuska spa_vdev_alloc(spa_t *spa, uint64_t guid) 309681ce946SMartin Matuska { 310681ce946SMartin Matuska vdev_t *vd; 311681ce946SMartin Matuska uint64_t txg; 312681ce946SMartin Matuska int error = 0; 313681ce946SMartin Matuska 314681ce946SMartin Matuska ASSERT(!MUTEX_HELD(&spa_namespace_lock)); 315681ce946SMartin Matuska ASSERT(spa_writeable(spa)); 316681ce946SMartin Matuska 317681ce946SMartin Matuska txg = spa_vdev_enter(spa); 318681ce946SMartin Matuska 319681ce946SMartin Matuska ASSERT(MUTEX_HELD(&spa_namespace_lock)); 320681ce946SMartin Matuska 321681ce946SMartin Matuska vd = spa_lookup_by_guid(spa, guid, B_FALSE); 322681ce946SMartin Matuska 323681ce946SMartin Matuska if (vd == NULL) 324681ce946SMartin Matuska error = SET_ERROR(ENOENT); 325681ce946SMartin Matuska else if (vd->vdev_mg == NULL) 326681ce946SMartin Matuska error = SET_ERROR(ZFS_ERR_VDEV_NOTSUP); 327681ce946SMartin Matuska else if (!vd->vdev_removing) 328681ce946SMartin Matuska vdev_activate(vd); 329681ce946SMartin Matuska 330681ce946SMartin Matuska if (error == 0) { 331681ce946SMartin Matuska vdev_dirty_leaves(vd, VDD_DTL, txg); 332681ce946SMartin Matuska vdev_config_dirty(vd); 333681ce946SMartin Matuska } 334681ce946SMartin Matuska 335681ce946SMartin Matuska (void) spa_vdev_exit(spa, NULL, txg, error); 336681ce946SMartin Matuska 337681ce946SMartin Matuska return (error); 338681ce946SMartin Matuska } 339681ce946SMartin Matuska 340681ce946SMartin Matuska static void 341a0b956f5SMartin Matuska spa_vdev_remove_aux(nvlist_t *config, const char *name, nvlist_t **dev, 342a0b956f5SMartin Matuska int count, nvlist_t *dev_to_remove) 343eda14cbcSMatt Macy { 344eda14cbcSMatt Macy nvlist_t **newdev = NULL; 345eda14cbcSMatt Macy 346eda14cbcSMatt Macy if (count > 1) 347eda14cbcSMatt Macy newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 348eda14cbcSMatt Macy 349eda14cbcSMatt Macy for (int i = 0, j = 0; i < count; i++) { 350eda14cbcSMatt Macy if (dev[i] == dev_to_remove) 351eda14cbcSMatt Macy continue; 352eda14cbcSMatt Macy VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 353eda14cbcSMatt Macy } 354eda14cbcSMatt Macy 355eda14cbcSMatt Macy VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 356681ce946SMartin Matuska fnvlist_add_nvlist_array(config, name, (const nvlist_t * const *)newdev, 357681ce946SMartin Matuska count - 1); 358eda14cbcSMatt Macy 359eda14cbcSMatt Macy for (int i = 0; i < count - 1; i++) 360eda14cbcSMatt Macy nvlist_free(newdev[i]); 361eda14cbcSMatt Macy 362eda14cbcSMatt Macy if (count > 1) 363eda14cbcSMatt Macy kmem_free(newdev, (count - 1) * sizeof (void *)); 364eda14cbcSMatt Macy } 365eda14cbcSMatt Macy 366eda14cbcSMatt Macy static spa_vdev_removal_t * 367eda14cbcSMatt Macy spa_vdev_removal_create(vdev_t *vd) 368eda14cbcSMatt Macy { 369eda14cbcSMatt Macy spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); 370eda14cbcSMatt Macy mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); 371eda14cbcSMatt Macy cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); 372*b59a0cdeSMartin Matuska svr->svr_allocd_segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, 373*b59a0cdeSMartin Matuska NULL, 0, 0); 374eda14cbcSMatt Macy svr->svr_vdev_id = vd->vdev_id; 375eda14cbcSMatt Macy 376eda14cbcSMatt Macy for (int i = 0; i < TXG_SIZE; i++) { 377*b59a0cdeSMartin Matuska svr->svr_frees[i] = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, 378*b59a0cdeSMartin Matuska NULL, 0, 0); 379eda14cbcSMatt Macy list_create(&svr->svr_new_segments[i], 380eda14cbcSMatt Macy sizeof (vdev_indirect_mapping_entry_t), 381eda14cbcSMatt Macy offsetof(vdev_indirect_mapping_entry_t, vime_node)); 382eda14cbcSMatt Macy } 383eda14cbcSMatt Macy 384eda14cbcSMatt Macy return (svr); 385eda14cbcSMatt Macy } 386eda14cbcSMatt Macy 387eda14cbcSMatt Macy void 388eda14cbcSMatt Macy spa_vdev_removal_destroy(spa_vdev_removal_t *svr) 389eda14cbcSMatt Macy { 390eda14cbcSMatt Macy for (int i = 0; i < TXG_SIZE; i++) { 391eda14cbcSMatt Macy ASSERT0(svr->svr_bytes_done[i]); 392eda14cbcSMatt Macy ASSERT0(svr->svr_max_offset_to_sync[i]); 393*b59a0cdeSMartin Matuska zfs_range_tree_destroy(svr->svr_frees[i]); 394eda14cbcSMatt Macy list_destroy(&svr->svr_new_segments[i]); 395eda14cbcSMatt Macy } 396eda14cbcSMatt Macy 397*b59a0cdeSMartin Matuska zfs_range_tree_destroy(svr->svr_allocd_segs); 398eda14cbcSMatt Macy mutex_destroy(&svr->svr_lock); 399eda14cbcSMatt Macy cv_destroy(&svr->svr_cv); 400eda14cbcSMatt Macy kmem_free(svr, sizeof (*svr)); 401eda14cbcSMatt Macy } 402eda14cbcSMatt Macy 403eda14cbcSMatt Macy /* 404eda14cbcSMatt Macy * This is called as a synctask in the txg in which we will mark this vdev 405eda14cbcSMatt Macy * as removing (in the config stored in the MOS). 406eda14cbcSMatt Macy * 407eda14cbcSMatt Macy * It begins the evacuation of a toplevel vdev by: 408eda14cbcSMatt Macy * - initializing the spa_removing_phys which tracks this removal 409eda14cbcSMatt Macy * - computing the amount of space to remove for accounting purposes 410eda14cbcSMatt Macy * - dirtying all dbufs in the spa_config_object 411eda14cbcSMatt Macy * - creating the spa_vdev_removal 412eda14cbcSMatt Macy * - starting the spa_vdev_remove_thread 413eda14cbcSMatt Macy */ 414eda14cbcSMatt Macy static void 415eda14cbcSMatt Macy vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) 416eda14cbcSMatt Macy { 417eda14cbcSMatt Macy int vdev_id = (uintptr_t)arg; 418eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 419eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdev_id); 420eda14cbcSMatt Macy vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 421eda14cbcSMatt Macy objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; 422eda14cbcSMatt Macy spa_vdev_removal_t *svr = NULL; 423eda14cbcSMatt Macy uint64_t txg __maybe_unused = dmu_tx_get_txg(tx); 424eda14cbcSMatt Macy 4257877fdebSMatt Macy ASSERT0(vdev_get_nparity(vd)); 426eda14cbcSMatt Macy svr = spa_vdev_removal_create(vd); 427eda14cbcSMatt Macy 428eda14cbcSMatt Macy ASSERT(vd->vdev_removing); 429eda14cbcSMatt Macy ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 430eda14cbcSMatt Macy 431eda14cbcSMatt Macy spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 432eda14cbcSMatt Macy if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 433eda14cbcSMatt Macy /* 434eda14cbcSMatt Macy * By activating the OBSOLETE_COUNTS feature, we prevent 435eda14cbcSMatt Macy * the pool from being downgraded and ensure that the 436eda14cbcSMatt Macy * refcounts are precise. 437eda14cbcSMatt Macy */ 438eda14cbcSMatt Macy spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 439eda14cbcSMatt Macy uint64_t one = 1; 440eda14cbcSMatt Macy VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, 441eda14cbcSMatt Macy VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, 442eda14cbcSMatt Macy &one, tx)); 443eda14cbcSMatt Macy boolean_t are_precise __maybe_unused; 444eda14cbcSMatt Macy ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise)); 445eda14cbcSMatt Macy ASSERT3B(are_precise, ==, B_TRUE); 446eda14cbcSMatt Macy } 447eda14cbcSMatt Macy 448eda14cbcSMatt Macy vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); 449eda14cbcSMatt Macy vd->vdev_indirect_mapping = 450eda14cbcSMatt Macy vdev_indirect_mapping_open(mos, vic->vic_mapping_object); 451eda14cbcSMatt Macy vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); 452eda14cbcSMatt Macy vd->vdev_indirect_births = 453eda14cbcSMatt Macy vdev_indirect_births_open(mos, vic->vic_births_object); 454eda14cbcSMatt Macy spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; 455eda14cbcSMatt Macy spa->spa_removing_phys.sr_start_time = gethrestime_sec(); 456eda14cbcSMatt Macy spa->spa_removing_phys.sr_end_time = 0; 457eda14cbcSMatt Macy spa->spa_removing_phys.sr_state = DSS_SCANNING; 458eda14cbcSMatt Macy spa->spa_removing_phys.sr_to_copy = 0; 459eda14cbcSMatt Macy spa->spa_removing_phys.sr_copied = 0; 460eda14cbcSMatt Macy 461eda14cbcSMatt Macy /* 462eda14cbcSMatt Macy * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because 463eda14cbcSMatt Macy * there may be space in the defer tree, which is free, but still 464eda14cbcSMatt Macy * counted in vs_alloc. 465eda14cbcSMatt Macy */ 466eda14cbcSMatt Macy for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 467eda14cbcSMatt Macy metaslab_t *ms = vd->vdev_ms[i]; 468eda14cbcSMatt Macy if (ms->ms_sm == NULL) 469eda14cbcSMatt Macy continue; 470eda14cbcSMatt Macy 471eda14cbcSMatt Macy spa->spa_removing_phys.sr_to_copy += 472eda14cbcSMatt Macy metaslab_allocated_space(ms); 473eda14cbcSMatt Macy 474eda14cbcSMatt Macy /* 475eda14cbcSMatt Macy * Space which we are freeing this txg does not need to 476eda14cbcSMatt Macy * be copied. 477eda14cbcSMatt Macy */ 478eda14cbcSMatt Macy spa->spa_removing_phys.sr_to_copy -= 479*b59a0cdeSMartin Matuska zfs_range_tree_space(ms->ms_freeing); 480eda14cbcSMatt Macy 481*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(ms->ms_freed)); 482eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) 483*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(ms->ms_allocating[t])); 484eda14cbcSMatt Macy } 485eda14cbcSMatt Macy 486eda14cbcSMatt Macy /* 487eda14cbcSMatt Macy * Sync tasks are called before metaslab_sync(), so there should 488eda14cbcSMatt Macy * be no already-synced metaslabs in the TXG_CLEAN list. 489eda14cbcSMatt Macy */ 490eda14cbcSMatt Macy ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); 491eda14cbcSMatt Macy 492eda14cbcSMatt Macy spa_sync_removing_state(spa, tx); 493eda14cbcSMatt Macy 494eda14cbcSMatt Macy /* 495eda14cbcSMatt Macy * All blocks that we need to read the most recent mapping must be 496eda14cbcSMatt Macy * stored on concrete vdevs. Therefore, we must dirty anything that 497eda14cbcSMatt Macy * is read before spa_remove_init(). Specifically, the 498eda14cbcSMatt Macy * spa_config_object. (Note that although we already modified the 499eda14cbcSMatt Macy * spa_config_object in spa_sync_removing_state, that may not have 500eda14cbcSMatt Macy * modified all blocks of the object.) 501eda14cbcSMatt Macy */ 502eda14cbcSMatt Macy dmu_object_info_t doi; 503eda14cbcSMatt Macy VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); 504eda14cbcSMatt Macy for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { 505eda14cbcSMatt Macy dmu_buf_t *dbuf; 506eda14cbcSMatt Macy VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, 507eda14cbcSMatt Macy offset, FTAG, &dbuf, 0)); 508eda14cbcSMatt Macy dmu_buf_will_dirty(dbuf, tx); 509eda14cbcSMatt Macy offset += dbuf->db_size; 510eda14cbcSMatt Macy dmu_buf_rele(dbuf, FTAG); 511eda14cbcSMatt Macy } 512eda14cbcSMatt Macy 513eda14cbcSMatt Macy /* 514eda14cbcSMatt Macy * Now that we've allocated the im_object, dirty the vdev to ensure 515eda14cbcSMatt Macy * that the object gets written to the config on disk. 516eda14cbcSMatt Macy */ 517eda14cbcSMatt Macy vdev_config_dirty(vd); 518eda14cbcSMatt Macy 519eda14cbcSMatt Macy zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu " 52033b8c039SMartin Matuska "im_obj=%llu", (u_longlong_t)vd->vdev_id, vd, 52133b8c039SMartin Matuska (u_longlong_t)dmu_tx_get_txg(tx), 52233b8c039SMartin Matuska (u_longlong_t)vic->vic_mapping_object); 523eda14cbcSMatt Macy 524eda14cbcSMatt Macy spa_history_log_internal(spa, "vdev remove started", tx, 525eda14cbcSMatt Macy "%s vdev %llu %s", spa_name(spa), (u_longlong_t)vd->vdev_id, 526eda14cbcSMatt Macy (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 527eda14cbcSMatt Macy /* 528eda14cbcSMatt Macy * Setting spa_vdev_removal causes subsequent frees to call 529eda14cbcSMatt Macy * free_from_removing_vdev(). Note that we don't need any locking 530eda14cbcSMatt Macy * because we are the sync thread, and metaslab_free_impl() is only 531eda14cbcSMatt Macy * called from syncing context (potentially from a zio taskq thread, 532eda14cbcSMatt Macy * but in any case only when there are outstanding free i/os, which 533eda14cbcSMatt Macy * there are not). 534eda14cbcSMatt Macy */ 535eda14cbcSMatt Macy ASSERT3P(spa->spa_vdev_removal, ==, NULL); 536eda14cbcSMatt Macy spa->spa_vdev_removal = svr; 537eda14cbcSMatt Macy svr->svr_thread = thread_create(NULL, 0, 538eda14cbcSMatt Macy spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri); 539eda14cbcSMatt Macy } 540eda14cbcSMatt Macy 541eda14cbcSMatt Macy /* 542eda14cbcSMatt Macy * When we are opening a pool, we must read the mapping for each 543eda14cbcSMatt Macy * indirect vdev in order from most recently removed to least 544eda14cbcSMatt Macy * recently removed. We do this because the blocks for the mapping 545eda14cbcSMatt Macy * of older indirect vdevs may be stored on more recently removed vdevs. 546eda14cbcSMatt Macy * In order to read each indirect mapping object, we must have 547eda14cbcSMatt Macy * initialized all more recently removed vdevs. 548eda14cbcSMatt Macy */ 549eda14cbcSMatt Macy int 550eda14cbcSMatt Macy spa_remove_init(spa_t *spa) 551eda14cbcSMatt Macy { 552eda14cbcSMatt Macy int error; 553eda14cbcSMatt Macy 554eda14cbcSMatt Macy error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, 555eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, 556eda14cbcSMatt Macy DMU_POOL_REMOVING, sizeof (uint64_t), 557eda14cbcSMatt Macy sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 558eda14cbcSMatt Macy &spa->spa_removing_phys); 559eda14cbcSMatt Macy 560eda14cbcSMatt Macy if (error == ENOENT) { 561eda14cbcSMatt Macy spa->spa_removing_phys.sr_state = DSS_NONE; 562eda14cbcSMatt Macy spa->spa_removing_phys.sr_removing_vdev = -1; 563eda14cbcSMatt Macy spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 564eda14cbcSMatt Macy spa->spa_indirect_vdevs_loaded = B_TRUE; 565eda14cbcSMatt Macy return (0); 566eda14cbcSMatt Macy } else if (error != 0) { 567eda14cbcSMatt Macy return (error); 568eda14cbcSMatt Macy } 569eda14cbcSMatt Macy 570eda14cbcSMatt Macy if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { 571eda14cbcSMatt Macy /* 572eda14cbcSMatt Macy * We are currently removing a vdev. Create and 573eda14cbcSMatt Macy * initialize a spa_vdev_removal_t from the bonus 574eda14cbcSMatt Macy * buffer of the removing vdevs vdev_im_object, and 575eda14cbcSMatt Macy * initialize its partial mapping. 576eda14cbcSMatt Macy */ 577eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 578eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, 579eda14cbcSMatt Macy spa->spa_removing_phys.sr_removing_vdev); 580eda14cbcSMatt Macy 581eda14cbcSMatt Macy if (vd == NULL) { 582eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 583eda14cbcSMatt Macy return (EINVAL); 584eda14cbcSMatt Macy } 585eda14cbcSMatt Macy 586eda14cbcSMatt Macy vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 587eda14cbcSMatt Macy 588eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 589eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); 590eda14cbcSMatt Macy ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id); 591eda14cbcSMatt Macy ASSERT(vd->vdev_removing); 592eda14cbcSMatt Macy 593eda14cbcSMatt Macy vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 594eda14cbcSMatt Macy spa->spa_meta_objset, vic->vic_mapping_object); 595eda14cbcSMatt Macy vd->vdev_indirect_births = vdev_indirect_births_open( 596eda14cbcSMatt Macy spa->spa_meta_objset, vic->vic_births_object); 597eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 598eda14cbcSMatt Macy 599eda14cbcSMatt Macy spa->spa_vdev_removal = svr; 600eda14cbcSMatt Macy } 601eda14cbcSMatt Macy 602eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 603eda14cbcSMatt Macy uint64_t indirect_vdev_id = 604eda14cbcSMatt Macy spa->spa_removing_phys.sr_prev_indirect_vdev; 605eda14cbcSMatt Macy while (indirect_vdev_id != UINT64_MAX) { 606eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); 607eda14cbcSMatt Macy vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 608eda14cbcSMatt Macy 609eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 610eda14cbcSMatt Macy vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 611eda14cbcSMatt Macy spa->spa_meta_objset, vic->vic_mapping_object); 612eda14cbcSMatt Macy vd->vdev_indirect_births = vdev_indirect_births_open( 613eda14cbcSMatt Macy spa->spa_meta_objset, vic->vic_births_object); 614eda14cbcSMatt Macy 615eda14cbcSMatt Macy indirect_vdev_id = vic->vic_prev_indirect_vdev; 616eda14cbcSMatt Macy } 617eda14cbcSMatt Macy spa_config_exit(spa, SCL_STATE, FTAG); 618eda14cbcSMatt Macy 619eda14cbcSMatt Macy /* 620eda14cbcSMatt Macy * Now that we've loaded all the indirect mappings, we can allow 621eda14cbcSMatt Macy * reads from other blocks (e.g. via predictive prefetch). 622eda14cbcSMatt Macy */ 623eda14cbcSMatt Macy spa->spa_indirect_vdevs_loaded = B_TRUE; 624eda14cbcSMatt Macy return (0); 625eda14cbcSMatt Macy } 626eda14cbcSMatt Macy 627eda14cbcSMatt Macy void 628eda14cbcSMatt Macy spa_restart_removal(spa_t *spa) 629eda14cbcSMatt Macy { 630eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 631eda14cbcSMatt Macy 632eda14cbcSMatt Macy if (svr == NULL) 633eda14cbcSMatt Macy return; 634eda14cbcSMatt Macy 635eda14cbcSMatt Macy /* 636eda14cbcSMatt Macy * In general when this function is called there is no 637eda14cbcSMatt Macy * removal thread running. The only scenario where this 638eda14cbcSMatt Macy * is not true is during spa_import() where this function 639eda14cbcSMatt Macy * is called twice [once from spa_import_impl() and 640eda14cbcSMatt Macy * spa_async_resume()]. Thus, in the scenario where we 641eda14cbcSMatt Macy * import a pool that has an ongoing removal we don't 642eda14cbcSMatt Macy * want to spawn a second thread. 643eda14cbcSMatt Macy */ 644eda14cbcSMatt Macy if (svr->svr_thread != NULL) 645eda14cbcSMatt Macy return; 646eda14cbcSMatt Macy 647eda14cbcSMatt Macy if (!spa_writeable(spa)) 648eda14cbcSMatt Macy return; 649eda14cbcSMatt Macy 65033b8c039SMartin Matuska zfs_dbgmsg("restarting removal of %llu", 65133b8c039SMartin Matuska (u_longlong_t)svr->svr_vdev_id); 652eda14cbcSMatt Macy svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa, 653eda14cbcSMatt Macy 0, &p0, TS_RUN, minclsyspri); 654eda14cbcSMatt Macy } 655eda14cbcSMatt Macy 656eda14cbcSMatt Macy /* 657eda14cbcSMatt Macy * Process freeing from a device which is in the middle of being removed. 658eda14cbcSMatt Macy * We must handle this carefully so that we attempt to copy freed data, 659eda14cbcSMatt Macy * and we correctly free already-copied data. 660eda14cbcSMatt Macy */ 661eda14cbcSMatt Macy void 662eda14cbcSMatt Macy free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size) 663eda14cbcSMatt Macy { 664eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 665eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 666eda14cbcSMatt Macy vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 667eda14cbcSMatt Macy uint64_t txg = spa_syncing_txg(spa); 668eda14cbcSMatt Macy uint64_t max_offset_yet = 0; 669eda14cbcSMatt Macy 670eda14cbcSMatt Macy ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 671eda14cbcSMatt Macy ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, 672eda14cbcSMatt Macy vdev_indirect_mapping_object(vim)); 673eda14cbcSMatt Macy ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id); 674eda14cbcSMatt Macy 675eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 676eda14cbcSMatt Macy 677eda14cbcSMatt Macy /* 678eda14cbcSMatt Macy * Remove the segment from the removing vdev's spacemap. This 679eda14cbcSMatt Macy * ensures that we will not attempt to copy this space (if the 680eda14cbcSMatt Macy * removal thread has not yet visited it), and also ensures 681eda14cbcSMatt Macy * that we know what is actually allocated on the new vdevs 682eda14cbcSMatt Macy * (needed if we cancel the removal). 683eda14cbcSMatt Macy * 684eda14cbcSMatt Macy * Note: we must do the metaslab_free_concrete() with the svr_lock 685eda14cbcSMatt Macy * held, so that the remove_thread can not load this metaslab and then 686eda14cbcSMatt Macy * visit this offset between the time that we metaslab_free_concrete() 687eda14cbcSMatt Macy * and when we check to see if it has been visited. 688eda14cbcSMatt Macy * 689eda14cbcSMatt Macy * Note: The checkpoint flag is set to false as having/taking 690eda14cbcSMatt Macy * a checkpoint and removing a device can't happen at the same 691eda14cbcSMatt Macy * time. 692eda14cbcSMatt Macy */ 693eda14cbcSMatt Macy ASSERT(!spa_has_checkpoint(spa)); 694eda14cbcSMatt Macy metaslab_free_concrete(vd, offset, size, B_FALSE); 695eda14cbcSMatt Macy 696eda14cbcSMatt Macy uint64_t synced_size = 0; 697eda14cbcSMatt Macy uint64_t synced_offset = 0; 698eda14cbcSMatt Macy uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); 699eda14cbcSMatt Macy if (offset < max_offset_synced) { 700eda14cbcSMatt Macy /* 701eda14cbcSMatt Macy * The mapping for this offset is already on disk. 702eda14cbcSMatt Macy * Free from the new location. 703eda14cbcSMatt Macy * 704eda14cbcSMatt Macy * Note that we use svr_max_synced_offset because it is 705eda14cbcSMatt Macy * updated atomically with respect to the in-core mapping. 706eda14cbcSMatt Macy * By contrast, vim_max_offset is not. 707eda14cbcSMatt Macy * 708eda14cbcSMatt Macy * This block may be split between a synced entry and an 709eda14cbcSMatt Macy * in-flight or unvisited entry. Only process the synced 710eda14cbcSMatt Macy * portion of it here. 711eda14cbcSMatt Macy */ 712eda14cbcSMatt Macy synced_size = MIN(size, max_offset_synced - offset); 713eda14cbcSMatt Macy synced_offset = offset; 714eda14cbcSMatt Macy 715eda14cbcSMatt Macy ASSERT3U(max_offset_yet, <=, max_offset_synced); 716eda14cbcSMatt Macy max_offset_yet = max_offset_synced; 717eda14cbcSMatt Macy 718eda14cbcSMatt Macy DTRACE_PROBE3(remove__free__synced, 719eda14cbcSMatt Macy spa_t *, spa, 720eda14cbcSMatt Macy uint64_t, offset, 721eda14cbcSMatt Macy uint64_t, synced_size); 722eda14cbcSMatt Macy 723eda14cbcSMatt Macy size -= synced_size; 724eda14cbcSMatt Macy offset += synced_size; 725eda14cbcSMatt Macy } 726eda14cbcSMatt Macy 727eda14cbcSMatt Macy /* 728eda14cbcSMatt Macy * Look at all in-flight txgs starting from the currently syncing one 729eda14cbcSMatt Macy * and see if a section of this free is being copied. By starting from 730eda14cbcSMatt Macy * this txg and iterating forward, we might find that this region 731eda14cbcSMatt Macy * was copied in two different txgs and handle it appropriately. 732eda14cbcSMatt Macy */ 733eda14cbcSMatt Macy for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { 734eda14cbcSMatt Macy int txgoff = (txg + i) & TXG_MASK; 735eda14cbcSMatt Macy if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { 736eda14cbcSMatt Macy /* 737eda14cbcSMatt Macy * The mapping for this offset is in flight, and 738eda14cbcSMatt Macy * will be synced in txg+i. 739eda14cbcSMatt Macy */ 740eda14cbcSMatt Macy uint64_t inflight_size = MIN(size, 741eda14cbcSMatt Macy svr->svr_max_offset_to_sync[txgoff] - offset); 742eda14cbcSMatt Macy 743eda14cbcSMatt Macy DTRACE_PROBE4(remove__free__inflight, 744eda14cbcSMatt Macy spa_t *, spa, 745eda14cbcSMatt Macy uint64_t, offset, 746eda14cbcSMatt Macy uint64_t, inflight_size, 747eda14cbcSMatt Macy uint64_t, txg + i); 748eda14cbcSMatt Macy 749eda14cbcSMatt Macy /* 750eda14cbcSMatt Macy * We copy data in order of increasing offset. 751eda14cbcSMatt Macy * Therefore the max_offset_to_sync[] must increase 752eda14cbcSMatt Macy * (or be zero, indicating that nothing is being 753eda14cbcSMatt Macy * copied in that txg). 754eda14cbcSMatt Macy */ 755eda14cbcSMatt Macy if (svr->svr_max_offset_to_sync[txgoff] != 0) { 756eda14cbcSMatt Macy ASSERT3U(svr->svr_max_offset_to_sync[txgoff], 757eda14cbcSMatt Macy >=, max_offset_yet); 758eda14cbcSMatt Macy max_offset_yet = 759eda14cbcSMatt Macy svr->svr_max_offset_to_sync[txgoff]; 760eda14cbcSMatt Macy } 761eda14cbcSMatt Macy 762eda14cbcSMatt Macy /* 763eda14cbcSMatt Macy * We've already committed to copying this segment: 764eda14cbcSMatt Macy * we have allocated space elsewhere in the pool for 765eda14cbcSMatt Macy * it and have an IO outstanding to copy the data. We 766eda14cbcSMatt Macy * cannot free the space before the copy has 767eda14cbcSMatt Macy * completed, or else the copy IO might overwrite any 768eda14cbcSMatt Macy * new data. To free that space, we record the 769eda14cbcSMatt Macy * segment in the appropriate svr_frees tree and free 770eda14cbcSMatt Macy * the mapped space later, in the txg where we have 771eda14cbcSMatt Macy * completed the copy and synced the mapping (see 772eda14cbcSMatt Macy * vdev_mapping_sync). 773eda14cbcSMatt Macy */ 774*b59a0cdeSMartin Matuska zfs_range_tree_add(svr->svr_frees[txgoff], 775eda14cbcSMatt Macy offset, inflight_size); 776eda14cbcSMatt Macy size -= inflight_size; 777eda14cbcSMatt Macy offset += inflight_size; 778eda14cbcSMatt Macy 779eda14cbcSMatt Macy /* 780eda14cbcSMatt Macy * This space is already accounted for as being 781eda14cbcSMatt Macy * done, because it is being copied in txg+i. 782eda14cbcSMatt Macy * However, if i!=0, then it is being copied in 783eda14cbcSMatt Macy * a future txg. If we crash after this txg 784eda14cbcSMatt Macy * syncs but before txg+i syncs, then the space 785eda14cbcSMatt Macy * will be free. Therefore we must account 786eda14cbcSMatt Macy * for the space being done in *this* txg 787eda14cbcSMatt Macy * (when it is freed) rather than the future txg 788eda14cbcSMatt Macy * (when it will be copied). 789eda14cbcSMatt Macy */ 790eda14cbcSMatt Macy ASSERT3U(svr->svr_bytes_done[txgoff], >=, 791eda14cbcSMatt Macy inflight_size); 792eda14cbcSMatt Macy svr->svr_bytes_done[txgoff] -= inflight_size; 793eda14cbcSMatt Macy svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; 794eda14cbcSMatt Macy } 795eda14cbcSMatt Macy } 796eda14cbcSMatt Macy ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); 797eda14cbcSMatt Macy 798eda14cbcSMatt Macy if (size > 0) { 799eda14cbcSMatt Macy /* 800eda14cbcSMatt Macy * The copy thread has not yet visited this offset. Ensure 801eda14cbcSMatt Macy * that it doesn't. 802eda14cbcSMatt Macy */ 803eda14cbcSMatt Macy 804eda14cbcSMatt Macy DTRACE_PROBE3(remove__free__unvisited, 805eda14cbcSMatt Macy spa_t *, spa, 806eda14cbcSMatt Macy uint64_t, offset, 807eda14cbcSMatt Macy uint64_t, size); 808eda14cbcSMatt Macy 809eda14cbcSMatt Macy if (svr->svr_allocd_segs != NULL) 810*b59a0cdeSMartin Matuska zfs_range_tree_clear(svr->svr_allocd_segs, offset, 811*b59a0cdeSMartin Matuska size); 812eda14cbcSMatt Macy 813eda14cbcSMatt Macy /* 814eda14cbcSMatt Macy * Since we now do not need to copy this data, for 815eda14cbcSMatt Macy * accounting purposes we have done our job and can count 816eda14cbcSMatt Macy * it as completed. 817eda14cbcSMatt Macy */ 818eda14cbcSMatt Macy svr->svr_bytes_done[txg & TXG_MASK] += size; 819eda14cbcSMatt Macy } 820eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 821eda14cbcSMatt Macy 822eda14cbcSMatt Macy /* 823eda14cbcSMatt Macy * Now that we have dropped svr_lock, process the synced portion 824eda14cbcSMatt Macy * of this free. 825eda14cbcSMatt Macy */ 826eda14cbcSMatt Macy if (synced_size > 0) { 827eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, synced_offset, synced_size); 828eda14cbcSMatt Macy 829eda14cbcSMatt Macy /* 830eda14cbcSMatt Macy * Note: this can only be called from syncing context, 831eda14cbcSMatt Macy * and the vdev_indirect_mapping is only changed from the 832eda14cbcSMatt Macy * sync thread, so we don't need svr_lock while doing 833eda14cbcSMatt Macy * metaslab_free_impl_cb. 834eda14cbcSMatt Macy */ 835eda14cbcSMatt Macy boolean_t checkpoint = B_FALSE; 836eda14cbcSMatt Macy vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, 837eda14cbcSMatt Macy metaslab_free_impl_cb, &checkpoint); 838eda14cbcSMatt Macy } 839eda14cbcSMatt Macy } 840eda14cbcSMatt Macy 841eda14cbcSMatt Macy /* 842eda14cbcSMatt Macy * Stop an active removal and update the spa_removing phys. 843eda14cbcSMatt Macy */ 844eda14cbcSMatt Macy static void 845eda14cbcSMatt Macy spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) 846eda14cbcSMatt Macy { 847eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 848eda14cbcSMatt Macy ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); 849eda14cbcSMatt Macy 850eda14cbcSMatt Macy /* Ensure the removal thread has completed before we free the svr. */ 851eda14cbcSMatt Macy spa_vdev_remove_suspend(spa); 852eda14cbcSMatt Macy 853eda14cbcSMatt Macy ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); 854eda14cbcSMatt Macy 855eda14cbcSMatt Macy if (state == DSS_FINISHED) { 856eda14cbcSMatt Macy spa_removing_phys_t *srp = &spa->spa_removing_phys; 857eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 858eda14cbcSMatt Macy vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 859eda14cbcSMatt Macy 860eda14cbcSMatt Macy if (srp->sr_prev_indirect_vdev != -1) { 861eda14cbcSMatt Macy vdev_t *pvd; 862eda14cbcSMatt Macy pvd = vdev_lookup_top(spa, 863eda14cbcSMatt Macy srp->sr_prev_indirect_vdev); 864eda14cbcSMatt Macy ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); 865eda14cbcSMatt Macy } 866eda14cbcSMatt Macy 867eda14cbcSMatt Macy vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; 868eda14cbcSMatt Macy srp->sr_prev_indirect_vdev = vd->vdev_id; 869eda14cbcSMatt Macy } 870eda14cbcSMatt Macy spa->spa_removing_phys.sr_state = state; 871eda14cbcSMatt Macy spa->spa_removing_phys.sr_end_time = gethrestime_sec(); 872eda14cbcSMatt Macy 873eda14cbcSMatt Macy spa->spa_vdev_removal = NULL; 874eda14cbcSMatt Macy spa_vdev_removal_destroy(svr); 875eda14cbcSMatt Macy 876eda14cbcSMatt Macy spa_sync_removing_state(spa, tx); 877eda14cbcSMatt Macy spa_notify_waiters(spa); 878eda14cbcSMatt Macy 879eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 880eda14cbcSMatt Macy } 881eda14cbcSMatt Macy 882eda14cbcSMatt Macy static void 883eda14cbcSMatt Macy free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) 884eda14cbcSMatt Macy { 885eda14cbcSMatt Macy vdev_t *vd = arg; 886eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, offset, size); 887eda14cbcSMatt Macy boolean_t checkpoint = B_FALSE; 888eda14cbcSMatt Macy vdev_indirect_ops.vdev_op_remap(vd, offset, size, 889eda14cbcSMatt Macy metaslab_free_impl_cb, &checkpoint); 890eda14cbcSMatt Macy } 891eda14cbcSMatt Macy 892eda14cbcSMatt Macy /* 893eda14cbcSMatt Macy * On behalf of the removal thread, syncs an incremental bit more of 894eda14cbcSMatt Macy * the indirect mapping to disk and updates the in-memory mapping. 895eda14cbcSMatt Macy * Called as a sync task in every txg that the removal thread makes progress. 896eda14cbcSMatt Macy */ 897eda14cbcSMatt Macy static void 898eda14cbcSMatt Macy vdev_mapping_sync(void *arg, dmu_tx_t *tx) 899eda14cbcSMatt Macy { 900eda14cbcSMatt Macy spa_vdev_removal_t *svr = arg; 901eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 902eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 903eda14cbcSMatt Macy vdev_indirect_config_t *vic __maybe_unused = &vd->vdev_indirect_config; 904eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 905eda14cbcSMatt Macy vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 906eda14cbcSMatt Macy 907eda14cbcSMatt Macy ASSERT(vic->vic_mapping_object != 0); 908eda14cbcSMatt Macy ASSERT3U(txg, ==, spa_syncing_txg(spa)); 909eda14cbcSMatt Macy 910eda14cbcSMatt Macy vdev_indirect_mapping_add_entries(vim, 911eda14cbcSMatt Macy &svr->svr_new_segments[txg & TXG_MASK], tx); 912eda14cbcSMatt Macy vdev_indirect_births_add_entry(vd->vdev_indirect_births, 913eda14cbcSMatt Macy vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); 914eda14cbcSMatt Macy 915eda14cbcSMatt Macy /* 916eda14cbcSMatt Macy * Free the copied data for anything that was freed while the 917eda14cbcSMatt Macy * mapping entries were in flight. 918eda14cbcSMatt Macy */ 919eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 920*b59a0cdeSMartin Matuska zfs_range_tree_vacate(svr->svr_frees[txg & TXG_MASK], 921eda14cbcSMatt Macy free_mapped_segment_cb, vd); 922eda14cbcSMatt Macy ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, 923eda14cbcSMatt Macy vdev_indirect_mapping_max_offset(vim)); 924eda14cbcSMatt Macy svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; 925eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 926eda14cbcSMatt Macy 927eda14cbcSMatt Macy spa_sync_removing_state(spa, tx); 928eda14cbcSMatt Macy } 929eda14cbcSMatt Macy 930eda14cbcSMatt Macy typedef struct vdev_copy_segment_arg { 931eda14cbcSMatt Macy spa_t *vcsa_spa; 932eda14cbcSMatt Macy dva_t *vcsa_dest_dva; 933eda14cbcSMatt Macy uint64_t vcsa_txg; 934*b59a0cdeSMartin Matuska zfs_range_tree_t *vcsa_obsolete_segs; 935eda14cbcSMatt Macy } vdev_copy_segment_arg_t; 936eda14cbcSMatt Macy 937eda14cbcSMatt Macy static void 938eda14cbcSMatt Macy unalloc_seg(void *arg, uint64_t start, uint64_t size) 939eda14cbcSMatt Macy { 940eda14cbcSMatt Macy vdev_copy_segment_arg_t *vcsa = arg; 941eda14cbcSMatt Macy spa_t *spa = vcsa->vcsa_spa; 942eda14cbcSMatt Macy blkptr_t bp = { { { {0} } } }; 943eda14cbcSMatt Macy 944eda14cbcSMatt Macy BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL); 945eda14cbcSMatt Macy BP_SET_LSIZE(&bp, size); 946eda14cbcSMatt Macy BP_SET_PSIZE(&bp, size); 947eda14cbcSMatt Macy BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); 948eda14cbcSMatt Macy BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF); 949eda14cbcSMatt Macy BP_SET_TYPE(&bp, DMU_OT_NONE); 950eda14cbcSMatt Macy BP_SET_LEVEL(&bp, 0); 951eda14cbcSMatt Macy BP_SET_DEDUP(&bp, 0); 952eda14cbcSMatt Macy BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER); 953eda14cbcSMatt Macy 954eda14cbcSMatt Macy DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva)); 955eda14cbcSMatt Macy DVA_SET_OFFSET(&bp.blk_dva[0], 956eda14cbcSMatt Macy DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start); 957eda14cbcSMatt Macy DVA_SET_ASIZE(&bp.blk_dva[0], size); 958eda14cbcSMatt Macy 959eda14cbcSMatt Macy zio_free(spa, vcsa->vcsa_txg, &bp); 960eda14cbcSMatt Macy } 961eda14cbcSMatt Macy 962eda14cbcSMatt Macy /* 963eda14cbcSMatt Macy * All reads and writes associated with a call to spa_vdev_copy_segment() 964eda14cbcSMatt Macy * are done. 965eda14cbcSMatt Macy */ 966eda14cbcSMatt Macy static void 967eda14cbcSMatt Macy spa_vdev_copy_segment_done(zio_t *zio) 968eda14cbcSMatt Macy { 969eda14cbcSMatt Macy vdev_copy_segment_arg_t *vcsa = zio->io_private; 970eda14cbcSMatt Macy 971*b59a0cdeSMartin Matuska zfs_range_tree_vacate(vcsa->vcsa_obsolete_segs, 972eda14cbcSMatt Macy unalloc_seg, vcsa); 973*b59a0cdeSMartin Matuska zfs_range_tree_destroy(vcsa->vcsa_obsolete_segs); 974eda14cbcSMatt Macy kmem_free(vcsa, sizeof (*vcsa)); 975eda14cbcSMatt Macy 976eda14cbcSMatt Macy spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); 977eda14cbcSMatt Macy } 978eda14cbcSMatt Macy 979eda14cbcSMatt Macy /* 980eda14cbcSMatt Macy * The write of the new location is done. 981eda14cbcSMatt Macy */ 982eda14cbcSMatt Macy static void 983eda14cbcSMatt Macy spa_vdev_copy_segment_write_done(zio_t *zio) 984eda14cbcSMatt Macy { 985eda14cbcSMatt Macy vdev_copy_arg_t *vca = zio->io_private; 986eda14cbcSMatt Macy 987eda14cbcSMatt Macy abd_free(zio->io_abd); 988eda14cbcSMatt Macy 989eda14cbcSMatt Macy mutex_enter(&vca->vca_lock); 990eda14cbcSMatt Macy vca->vca_outstanding_bytes -= zio->io_size; 991eda14cbcSMatt Macy 992eda14cbcSMatt Macy if (zio->io_error != 0) 993eda14cbcSMatt Macy vca->vca_write_error_bytes += zio->io_size; 994eda14cbcSMatt Macy 995eda14cbcSMatt Macy cv_signal(&vca->vca_cv); 996eda14cbcSMatt Macy mutex_exit(&vca->vca_lock); 997eda14cbcSMatt Macy } 998eda14cbcSMatt Macy 999eda14cbcSMatt Macy /* 1000eda14cbcSMatt Macy * The read of the old location is done. The parent zio is the write to 1001eda14cbcSMatt Macy * the new location. Allow it to start. 1002eda14cbcSMatt Macy */ 1003eda14cbcSMatt Macy static void 1004eda14cbcSMatt Macy spa_vdev_copy_segment_read_done(zio_t *zio) 1005eda14cbcSMatt Macy { 1006eda14cbcSMatt Macy vdev_copy_arg_t *vca = zio->io_private; 1007eda14cbcSMatt Macy 1008eda14cbcSMatt Macy if (zio->io_error != 0) { 1009eda14cbcSMatt Macy mutex_enter(&vca->vca_lock); 1010eda14cbcSMatt Macy vca->vca_read_error_bytes += zio->io_size; 1011eda14cbcSMatt Macy mutex_exit(&vca->vca_lock); 1012eda14cbcSMatt Macy } 1013eda14cbcSMatt Macy 1014eda14cbcSMatt Macy zio_nowait(zio_unique_parent(zio)); 1015eda14cbcSMatt Macy } 1016eda14cbcSMatt Macy 1017eda14cbcSMatt Macy /* 1018eda14cbcSMatt Macy * If the old and new vdevs are mirrors, we will read both sides of the old 1019eda14cbcSMatt Macy * mirror, and write each copy to the corresponding side of the new mirror. 1020eda14cbcSMatt Macy * If the old and new vdevs have a different number of children, we will do 1021eda14cbcSMatt Macy * this as best as possible. Since we aren't verifying checksums, this 1022eda14cbcSMatt Macy * ensures that as long as there's a good copy of the data, we'll have a 1023eda14cbcSMatt Macy * good copy after the removal, even if there's silent damage to one side 1024eda14cbcSMatt Macy * of the mirror. If we're removing a mirror that has some silent damage, 1025eda14cbcSMatt Macy * we'll have exactly the same damage in the new location (assuming that 1026eda14cbcSMatt Macy * the new location is also a mirror). 1027eda14cbcSMatt Macy * 1028eda14cbcSMatt Macy * We accomplish this by creating a tree of zio_t's, with as many writes as 1029eda14cbcSMatt Macy * there are "children" of the new vdev (a non-redundant vdev counts as one 1030eda14cbcSMatt Macy * child, a 2-way mirror has 2 children, etc). Each write has an associated 1031eda14cbcSMatt Macy * read from a child of the old vdev. Typically there will be the same 1032eda14cbcSMatt Macy * number of children of the old and new vdevs. However, if there are more 1033eda14cbcSMatt Macy * children of the new vdev, some child(ren) of the old vdev will be issued 1034eda14cbcSMatt Macy * multiple reads. If there are more children of the old vdev, some copies 1035eda14cbcSMatt Macy * will be dropped. 1036eda14cbcSMatt Macy * 1037eda14cbcSMatt Macy * For example, the tree of zio_t's for a 2-way mirror is: 1038eda14cbcSMatt Macy * 1039eda14cbcSMatt Macy * null 1040eda14cbcSMatt Macy * / \ 1041eda14cbcSMatt Macy * write(new vdev, child 0) write(new vdev, child 1) 1042eda14cbcSMatt Macy * | | 1043eda14cbcSMatt Macy * read(old vdev, child 0) read(old vdev, child 1) 1044eda14cbcSMatt Macy * 1045eda14cbcSMatt Macy * Child zio's complete before their parents complete. However, zio's 1046eda14cbcSMatt Macy * created with zio_vdev_child_io() may be issued before their children 1047eda14cbcSMatt Macy * complete. In this case we need to make sure that the children (reads) 1048eda14cbcSMatt Macy * complete before the parents (writes) are *issued*. We do this by not 1049eda14cbcSMatt Macy * calling zio_nowait() on each write until its corresponding read has 1050eda14cbcSMatt Macy * completed. 1051eda14cbcSMatt Macy * 1052eda14cbcSMatt Macy * The spa_config_lock must be held while zio's created by 1053eda14cbcSMatt Macy * zio_vdev_child_io() are in progress, to ensure that the vdev tree does 1054eda14cbcSMatt Macy * not change (e.g. due to a concurrent "zpool attach/detach"). The "null" 1055eda14cbcSMatt Macy * zio is needed to release the spa_config_lock after all the reads and 1056eda14cbcSMatt Macy * writes complete. (Note that we can't grab the config lock for each read, 1057eda14cbcSMatt Macy * because it is not reentrant - we could deadlock with a thread waiting 1058eda14cbcSMatt Macy * for a write lock.) 1059eda14cbcSMatt Macy */ 1060eda14cbcSMatt Macy static void 1061eda14cbcSMatt Macy spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio, 1062eda14cbcSMatt Macy vdev_t *source_vd, uint64_t source_offset, 1063eda14cbcSMatt Macy vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size) 1064eda14cbcSMatt Macy { 1065eda14cbcSMatt Macy ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0); 1066eda14cbcSMatt Macy 1067eda14cbcSMatt Macy /* 1068eda14cbcSMatt Macy * If the destination child in unwritable then there is no point 1069eda14cbcSMatt Macy * in issuing the source reads which cannot be written. 1070eda14cbcSMatt Macy */ 1071eda14cbcSMatt Macy if (!vdev_writeable(dest_child_vd)) 1072eda14cbcSMatt Macy return; 1073eda14cbcSMatt Macy 1074eda14cbcSMatt Macy mutex_enter(&vca->vca_lock); 1075eda14cbcSMatt Macy vca->vca_outstanding_bytes += size; 1076eda14cbcSMatt Macy mutex_exit(&vca->vca_lock); 1077eda14cbcSMatt Macy 1078eda14cbcSMatt Macy abd_t *abd = abd_alloc_for_io(size, B_FALSE); 1079eda14cbcSMatt Macy 1080eda14cbcSMatt Macy vdev_t *source_child_vd = NULL; 1081eda14cbcSMatt Macy if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) { 1082eda14cbcSMatt Macy /* 1083eda14cbcSMatt Macy * Source and dest are both mirrors. Copy from the same 1084eda14cbcSMatt Macy * child id as we are copying to (wrapping around if there 1085eda14cbcSMatt Macy * are more dest children than source children). If the 1086eda14cbcSMatt Macy * preferred source child is unreadable select another. 1087eda14cbcSMatt Macy */ 1088eda14cbcSMatt Macy for (int i = 0; i < source_vd->vdev_children; i++) { 1089eda14cbcSMatt Macy source_child_vd = source_vd->vdev_child[ 1090eda14cbcSMatt Macy (dest_id + i) % source_vd->vdev_children]; 1091eda14cbcSMatt Macy if (vdev_readable(source_child_vd)) 1092eda14cbcSMatt Macy break; 1093eda14cbcSMatt Macy } 1094eda14cbcSMatt Macy } else { 1095eda14cbcSMatt Macy source_child_vd = source_vd; 1096eda14cbcSMatt Macy } 1097eda14cbcSMatt Macy 1098eda14cbcSMatt Macy /* 1099eda14cbcSMatt Macy * There should always be at least one readable source child or 1100eda14cbcSMatt Macy * the pool would be in a suspended state. Somehow selecting an 1101eda14cbcSMatt Macy * unreadable child would result in IO errors, the removal process 1102eda14cbcSMatt Macy * being cancelled, and the pool reverting to its pre-removal state. 1103eda14cbcSMatt Macy */ 1104eda14cbcSMatt Macy ASSERT3P(source_child_vd, !=, NULL); 1105eda14cbcSMatt Macy 1106eda14cbcSMatt Macy zio_t *write_zio = zio_vdev_child_io(nzio, NULL, 1107eda14cbcSMatt Macy dest_child_vd, dest_offset, abd, size, 1108eda14cbcSMatt Macy ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL, 1109eda14cbcSMatt Macy ZIO_FLAG_CANFAIL, 1110eda14cbcSMatt Macy spa_vdev_copy_segment_write_done, vca); 1111eda14cbcSMatt Macy 1112eda14cbcSMatt Macy zio_nowait(zio_vdev_child_io(write_zio, NULL, 1113eda14cbcSMatt Macy source_child_vd, source_offset, abd, size, 1114eda14cbcSMatt Macy ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, 1115eda14cbcSMatt Macy ZIO_FLAG_CANFAIL, 1116eda14cbcSMatt Macy spa_vdev_copy_segment_read_done, vca)); 1117eda14cbcSMatt Macy } 1118eda14cbcSMatt Macy 1119eda14cbcSMatt Macy /* 1120eda14cbcSMatt Macy * Allocate a new location for this segment, and create the zio_t's to 1121eda14cbcSMatt Macy * read from the old location and write to the new location. 1122eda14cbcSMatt Macy */ 1123eda14cbcSMatt Macy static int 1124*b59a0cdeSMartin Matuska spa_vdev_copy_segment(vdev_t *vd, zfs_range_tree_t *segs, 1125eda14cbcSMatt Macy uint64_t maxalloc, uint64_t txg, 1126eda14cbcSMatt Macy vdev_copy_arg_t *vca, zio_alloc_list_t *zal) 1127eda14cbcSMatt Macy { 1128eda14cbcSMatt Macy metaslab_group_t *mg = vd->vdev_mg; 1129eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 1130eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1131eda14cbcSMatt Macy vdev_indirect_mapping_entry_t *entry; 1132eda14cbcSMatt Macy dva_t dst = {{ 0 }}; 1133*b59a0cdeSMartin Matuska uint64_t start = zfs_range_tree_min(segs); 1134eda14cbcSMatt Macy ASSERT0(P2PHASE(start, 1 << spa->spa_min_ashift)); 1135eda14cbcSMatt Macy 1136eda14cbcSMatt Macy ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE); 1137eda14cbcSMatt Macy ASSERT0(P2PHASE(maxalloc, 1 << spa->spa_min_ashift)); 1138eda14cbcSMatt Macy 1139*b59a0cdeSMartin Matuska uint64_t size = zfs_range_tree_span(segs); 1140*b59a0cdeSMartin Matuska if (zfs_range_tree_span(segs) > maxalloc) { 1141eda14cbcSMatt Macy /* 1142eda14cbcSMatt Macy * We can't allocate all the segments. Prefer to end 1143eda14cbcSMatt Macy * the allocation at the end of a segment, thus avoiding 1144eda14cbcSMatt Macy * additional split blocks. 1145eda14cbcSMatt Macy */ 1146*b59a0cdeSMartin Matuska zfs_range_seg_max_t search; 1147eda14cbcSMatt Macy zfs_btree_index_t where; 1148*b59a0cdeSMartin Matuska zfs_rs_set_start(&search, segs, start + maxalloc); 1149*b59a0cdeSMartin Matuska zfs_rs_set_end(&search, segs, start + maxalloc); 1150eda14cbcSMatt Macy (void) zfs_btree_find(&segs->rt_root, &search, &where); 1151*b59a0cdeSMartin Matuska zfs_range_seg_t *rs = zfs_btree_prev(&segs->rt_root, &where, 1152eda14cbcSMatt Macy &where); 1153eda14cbcSMatt Macy if (rs != NULL) { 1154*b59a0cdeSMartin Matuska size = zfs_rs_get_end(rs, segs) - start; 1155eda14cbcSMatt Macy } else { 1156eda14cbcSMatt Macy /* 1157eda14cbcSMatt Macy * There are no segments that end before maxalloc. 1158eda14cbcSMatt Macy * I.e. the first segment is larger than maxalloc, 1159eda14cbcSMatt Macy * so we must split it. 1160eda14cbcSMatt Macy */ 1161eda14cbcSMatt Macy size = maxalloc; 1162eda14cbcSMatt Macy } 1163eda14cbcSMatt Macy } 1164eda14cbcSMatt Macy ASSERT3U(size, <=, maxalloc); 1165eda14cbcSMatt Macy ASSERT0(P2PHASE(size, 1 << spa->spa_min_ashift)); 1166eda14cbcSMatt Macy 1167eda14cbcSMatt Macy /* 1168eda14cbcSMatt Macy * An allocation class might not have any remaining vdevs or space 1169eda14cbcSMatt Macy */ 1170eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 11717877fdebSMatt Macy if (mc->mc_groups == 0) 1172eda14cbcSMatt Macy mc = spa_normal_class(spa); 117315f0b8c3SMartin Matuska int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 117415f0b8c3SMartin Matuska METASLAB_DONT_THROTTLE, zal, 0); 1175eda14cbcSMatt Macy if (error == ENOSPC && mc != spa_normal_class(spa)) { 1176eda14cbcSMatt Macy error = metaslab_alloc_dva(spa, spa_normal_class(spa), size, 117715f0b8c3SMartin Matuska &dst, 0, NULL, txg, METASLAB_DONT_THROTTLE, zal, 0); 1178eda14cbcSMatt Macy } 1179eda14cbcSMatt Macy if (error != 0) 1180eda14cbcSMatt Macy return (error); 1181eda14cbcSMatt Macy 1182eda14cbcSMatt Macy /* 1183eda14cbcSMatt Macy * Determine the ranges that are not actually needed. Offsets are 1184eda14cbcSMatt Macy * relative to the start of the range to be copied (i.e. relative to the 1185eda14cbcSMatt Macy * local variable "start"). 1186eda14cbcSMatt Macy */ 1187*b59a0cdeSMartin Matuska zfs_range_tree_t *obsolete_segs = zfs_range_tree_create(NULL, 1188*b59a0cdeSMartin Matuska ZFS_RANGE_SEG64, NULL, 0, 0); 1189eda14cbcSMatt Macy 1190eda14cbcSMatt Macy zfs_btree_index_t where; 1191*b59a0cdeSMartin Matuska zfs_range_seg_t *rs = zfs_btree_first(&segs->rt_root, &where); 1192*b59a0cdeSMartin Matuska ASSERT3U(zfs_rs_get_start(rs, segs), ==, start); 1193*b59a0cdeSMartin Matuska uint64_t prev_seg_end = zfs_rs_get_end(rs, segs); 1194eda14cbcSMatt Macy while ((rs = zfs_btree_next(&segs->rt_root, &where, &where)) != NULL) { 1195*b59a0cdeSMartin Matuska if (zfs_rs_get_start(rs, segs) >= start + size) { 1196eda14cbcSMatt Macy break; 1197eda14cbcSMatt Macy } else { 1198*b59a0cdeSMartin Matuska zfs_range_tree_add(obsolete_segs, 1199eda14cbcSMatt Macy prev_seg_end - start, 1200*b59a0cdeSMartin Matuska zfs_rs_get_start(rs, segs) - prev_seg_end); 1201eda14cbcSMatt Macy } 1202*b59a0cdeSMartin Matuska prev_seg_end = zfs_rs_get_end(rs, segs); 1203eda14cbcSMatt Macy } 1204eda14cbcSMatt Macy /* We don't end in the middle of an obsolete range */ 1205eda14cbcSMatt Macy ASSERT3U(start + size, <=, prev_seg_end); 1206eda14cbcSMatt Macy 1207*b59a0cdeSMartin Matuska zfs_range_tree_clear(segs, start, size); 1208eda14cbcSMatt Macy 1209eda14cbcSMatt Macy /* 1210eda14cbcSMatt Macy * We can't have any padding of the allocated size, otherwise we will 1211eda14cbcSMatt Macy * misunderstand what's allocated, and the size of the mapping. We 1212eda14cbcSMatt Macy * prevent padding by ensuring that all devices in the pool have the 1213eda14cbcSMatt Macy * same ashift, and the allocation size is a multiple of the ashift. 1214eda14cbcSMatt Macy */ 1215eda14cbcSMatt Macy VERIFY3U(DVA_GET_ASIZE(&dst), ==, size); 1216eda14cbcSMatt Macy 1217eda14cbcSMatt Macy entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); 1218eda14cbcSMatt Macy DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); 1219eda14cbcSMatt Macy entry->vime_mapping.vimep_dst = dst; 1220eda14cbcSMatt Macy if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 1221*b59a0cdeSMartin Matuska entry->vime_obsolete_count = 1222*b59a0cdeSMartin Matuska zfs_range_tree_space(obsolete_segs); 1223eda14cbcSMatt Macy } 1224eda14cbcSMatt Macy 1225eda14cbcSMatt Macy vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP); 1226eda14cbcSMatt Macy vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst; 1227eda14cbcSMatt Macy vcsa->vcsa_obsolete_segs = obsolete_segs; 1228eda14cbcSMatt Macy vcsa->vcsa_spa = spa; 1229eda14cbcSMatt Macy vcsa->vcsa_txg = txg; 1230eda14cbcSMatt Macy 1231eda14cbcSMatt Macy /* 1232eda14cbcSMatt Macy * See comment before spa_vdev_copy_one_child(). 1233eda14cbcSMatt Macy */ 1234eda14cbcSMatt Macy spa_config_enter(spa, SCL_STATE, spa, RW_READER); 1235eda14cbcSMatt Macy zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, 1236eda14cbcSMatt Macy spa_vdev_copy_segment_done, vcsa, 0); 1237eda14cbcSMatt Macy vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst)); 1238eda14cbcSMatt Macy if (dest_vd->vdev_ops == &vdev_mirror_ops) { 1239eda14cbcSMatt Macy for (int i = 0; i < dest_vd->vdev_children; i++) { 1240eda14cbcSMatt Macy vdev_t *child = dest_vd->vdev_child[i]; 1241eda14cbcSMatt Macy spa_vdev_copy_one_child(vca, nzio, vd, start, 1242eda14cbcSMatt Macy child, DVA_GET_OFFSET(&dst), i, size); 1243eda14cbcSMatt Macy } 1244eda14cbcSMatt Macy } else { 1245eda14cbcSMatt Macy spa_vdev_copy_one_child(vca, nzio, vd, start, 1246eda14cbcSMatt Macy dest_vd, DVA_GET_OFFSET(&dst), -1, size); 1247eda14cbcSMatt Macy } 1248eda14cbcSMatt Macy zio_nowait(nzio); 1249eda14cbcSMatt Macy 1250eda14cbcSMatt Macy list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); 1251eda14cbcSMatt Macy ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); 1252eda14cbcSMatt Macy vdev_dirty(vd, 0, NULL, txg); 1253eda14cbcSMatt Macy 1254eda14cbcSMatt Macy return (0); 1255eda14cbcSMatt Macy } 1256eda14cbcSMatt Macy 1257eda14cbcSMatt Macy /* 1258eda14cbcSMatt Macy * Complete the removal of a toplevel vdev. This is called as a 1259eda14cbcSMatt Macy * synctask in the same txg that we will sync out the new config (to the 1260eda14cbcSMatt Macy * MOS object) which indicates that this vdev is indirect. 1261eda14cbcSMatt Macy */ 1262eda14cbcSMatt Macy static void 1263eda14cbcSMatt Macy vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) 1264eda14cbcSMatt Macy { 1265eda14cbcSMatt Macy spa_vdev_removal_t *svr = arg; 1266eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1267eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1268eda14cbcSMatt Macy 1269eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 1270eda14cbcSMatt Macy 1271eda14cbcSMatt Macy for (int i = 0; i < TXG_SIZE; i++) { 1272eda14cbcSMatt Macy ASSERT0(svr->svr_bytes_done[i]); 1273eda14cbcSMatt Macy } 1274eda14cbcSMatt Macy 1275eda14cbcSMatt Macy ASSERT3U(spa->spa_removing_phys.sr_copied, ==, 1276eda14cbcSMatt Macy spa->spa_removing_phys.sr_to_copy); 1277eda14cbcSMatt Macy 1278eda14cbcSMatt Macy vdev_destroy_spacemaps(vd, tx); 1279eda14cbcSMatt Macy 1280eda14cbcSMatt Macy /* destroy leaf zaps, if any */ 1281eda14cbcSMatt Macy ASSERT3P(svr->svr_zaplist, !=, NULL); 1282eda14cbcSMatt Macy for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); 1283eda14cbcSMatt Macy pair != NULL; 1284eda14cbcSMatt Macy pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { 1285eda14cbcSMatt Macy vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); 1286eda14cbcSMatt Macy } 1287eda14cbcSMatt Macy fnvlist_free(svr->svr_zaplist); 1288eda14cbcSMatt Macy 1289eda14cbcSMatt Macy spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); 1290eda14cbcSMatt Macy /* vd->vdev_path is not available here */ 1291eda14cbcSMatt Macy spa_history_log_internal(spa, "vdev remove completed", tx, 1292eda14cbcSMatt Macy "%s vdev %llu", spa_name(spa), (u_longlong_t)vd->vdev_id); 1293eda14cbcSMatt Macy } 1294eda14cbcSMatt Macy 1295eda14cbcSMatt Macy static void 1296eda14cbcSMatt Macy vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) 1297eda14cbcSMatt Macy { 1298eda14cbcSMatt Macy ASSERT3P(zlist, !=, NULL); 12997877fdebSMatt Macy ASSERT0(vdev_get_nparity(vd)); 1300eda14cbcSMatt Macy 1301eda14cbcSMatt Macy if (vd->vdev_leaf_zap != 0) { 1302eda14cbcSMatt Macy char zkey[32]; 1303eda14cbcSMatt Macy (void) snprintf(zkey, sizeof (zkey), "%s-%llu", 1304eda14cbcSMatt Macy VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap); 1305eda14cbcSMatt Macy fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); 1306eda14cbcSMatt Macy } 1307eda14cbcSMatt Macy 1308eda14cbcSMatt Macy for (uint64_t id = 0; id < vd->vdev_children; id++) { 1309eda14cbcSMatt Macy vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); 1310eda14cbcSMatt Macy } 1311eda14cbcSMatt Macy } 1312eda14cbcSMatt Macy 1313eda14cbcSMatt Macy static void 1314eda14cbcSMatt Macy vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) 1315eda14cbcSMatt Macy { 1316eda14cbcSMatt Macy vdev_t *ivd; 1317eda14cbcSMatt Macy dmu_tx_t *tx; 1318eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 1319eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1320eda14cbcSMatt Macy 1321eda14cbcSMatt Macy /* 1322eda14cbcSMatt Macy * First, build a list of leaf zaps to be destroyed. 1323eda14cbcSMatt Macy * This is passed to the sync context thread, 1324eda14cbcSMatt Macy * which does the actual unlinking. 1325eda14cbcSMatt Macy */ 1326eda14cbcSMatt Macy svr->svr_zaplist = fnvlist_alloc(); 1327eda14cbcSMatt Macy vdev_remove_enlist_zaps(vd, svr->svr_zaplist); 1328eda14cbcSMatt Macy 1329eda14cbcSMatt Macy ivd = vdev_add_parent(vd, &vdev_indirect_ops); 1330eda14cbcSMatt Macy ivd->vdev_removing = 0; 1331eda14cbcSMatt Macy 1332eda14cbcSMatt Macy vd->vdev_leaf_zap = 0; 1333eda14cbcSMatt Macy 1334eda14cbcSMatt Macy vdev_remove_child(ivd, vd); 1335eda14cbcSMatt Macy vdev_compact_children(ivd); 1336eda14cbcSMatt Macy 1337eda14cbcSMatt Macy ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 1338eda14cbcSMatt Macy 1339eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1340eda14cbcSMatt Macy svr->svr_thread = NULL; 1341eda14cbcSMatt Macy cv_broadcast(&svr->svr_cv); 1342eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1343eda14cbcSMatt Macy 1344eda14cbcSMatt Macy /* After this, we can not use svr. */ 1345eda14cbcSMatt Macy tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 13462c48331dSMatt Macy dsl_sync_task_nowait(spa->spa_dsl_pool, 13472c48331dSMatt Macy vdev_remove_complete_sync, svr, tx); 1348eda14cbcSMatt Macy dmu_tx_commit(tx); 1349eda14cbcSMatt Macy } 1350eda14cbcSMatt Macy 1351eda14cbcSMatt Macy /* 1352eda14cbcSMatt Macy * Complete the removal of a toplevel vdev. This is called in open 1353eda14cbcSMatt Macy * context by the removal thread after we have copied all vdev's data. 1354eda14cbcSMatt Macy */ 1355eda14cbcSMatt Macy static void 1356eda14cbcSMatt Macy vdev_remove_complete(spa_t *spa) 1357eda14cbcSMatt Macy { 1358eda14cbcSMatt Macy uint64_t txg; 1359eda14cbcSMatt Macy 1360eda14cbcSMatt Macy /* 1361eda14cbcSMatt Macy * Wait for any deferred frees to be synced before we call 1362eda14cbcSMatt Macy * vdev_metaslab_fini() 1363eda14cbcSMatt Macy */ 1364eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, 0); 1365eda14cbcSMatt Macy txg = spa_vdev_enter(spa); 1366eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1367eda14cbcSMatt Macy ASSERT3P(vd->vdev_initialize_thread, ==, NULL); 1368eda14cbcSMatt Macy ASSERT3P(vd->vdev_trim_thread, ==, NULL); 1369eda14cbcSMatt Macy ASSERT3P(vd->vdev_autotrim_thread, ==, NULL); 1370e3aa18adSMartin Matuska vdev_rebuild_stop_wait(vd); 1371e3aa18adSMartin Matuska ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 1372681ce946SMartin Matuska uint64_t vdev_space = spa_deflate(spa) ? 1373681ce946SMartin Matuska vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1374eda14cbcSMatt Macy 1375eda14cbcSMatt Macy sysevent_t *ev = spa_event_create(spa, vd, NULL, 1376eda14cbcSMatt Macy ESC_ZFS_VDEV_REMOVE_DEV); 1377eda14cbcSMatt Macy 1378eda14cbcSMatt Macy zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", 137933b8c039SMartin Matuska (u_longlong_t)vd->vdev_id, (u_longlong_t)txg); 1380eda14cbcSMatt Macy 1381681ce946SMartin Matuska ASSERT3U(0, !=, vdev_space); 1382681ce946SMartin Matuska ASSERT3U(spa->spa_nonallocating_dspace, >=, vdev_space); 1383681ce946SMartin Matuska 1384681ce946SMartin Matuska /* the vdev is no longer part of the dspace */ 1385681ce946SMartin Matuska spa->spa_nonallocating_dspace -= vdev_space; 1386681ce946SMartin Matuska 1387eda14cbcSMatt Macy /* 1388eda14cbcSMatt Macy * Discard allocation state. 1389eda14cbcSMatt Macy */ 1390eda14cbcSMatt Macy if (vd->vdev_mg != NULL) { 1391eda14cbcSMatt Macy vdev_metaslab_fini(vd); 1392eda14cbcSMatt Macy metaslab_group_destroy(vd->vdev_mg); 1393eda14cbcSMatt Macy vd->vdev_mg = NULL; 1394eda14cbcSMatt Macy } 1395184c1b94SMartin Matuska if (vd->vdev_log_mg != NULL) { 1396184c1b94SMartin Matuska ASSERT0(vd->vdev_ms_count); 1397184c1b94SMartin Matuska metaslab_group_destroy(vd->vdev_log_mg); 1398184c1b94SMartin Matuska vd->vdev_log_mg = NULL; 1399184c1b94SMartin Matuska } 1400eda14cbcSMatt Macy ASSERT0(vd->vdev_stat.vs_space); 1401eda14cbcSMatt Macy ASSERT0(vd->vdev_stat.vs_dspace); 1402eda14cbcSMatt Macy 1403eda14cbcSMatt Macy vdev_remove_replace_with_indirect(vd, txg); 1404eda14cbcSMatt Macy 1405eda14cbcSMatt Macy /* 1406eda14cbcSMatt Macy * We now release the locks, allowing spa_sync to run and finish the 1407eda14cbcSMatt Macy * removal via vdev_remove_complete_sync in syncing context. 1408eda14cbcSMatt Macy * 1409eda14cbcSMatt Macy * Note that we hold on to the vdev_t that has been replaced. Since 1410eda14cbcSMatt Macy * it isn't part of the vdev tree any longer, it can't be concurrently 1411eda14cbcSMatt Macy * manipulated, even while we don't have the config lock. 1412eda14cbcSMatt Macy */ 1413eda14cbcSMatt Macy (void) spa_vdev_exit(spa, NULL, txg, 0); 1414eda14cbcSMatt Macy 1415eda14cbcSMatt Macy /* 1416eda14cbcSMatt Macy * Top ZAP should have been transferred to the indirect vdev in 1417eda14cbcSMatt Macy * vdev_remove_replace_with_indirect. 1418eda14cbcSMatt Macy */ 1419eda14cbcSMatt Macy ASSERT0(vd->vdev_top_zap); 1420eda14cbcSMatt Macy 1421eda14cbcSMatt Macy /* 1422eda14cbcSMatt Macy * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. 1423eda14cbcSMatt Macy */ 1424eda14cbcSMatt Macy ASSERT0(vd->vdev_leaf_zap); 1425eda14cbcSMatt Macy 1426eda14cbcSMatt Macy txg = spa_vdev_enter(spa); 1427eda14cbcSMatt Macy (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1428eda14cbcSMatt Macy /* 1429eda14cbcSMatt Macy * Request to update the config and the config cachefile. 1430eda14cbcSMatt Macy */ 1431eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 1432eda14cbcSMatt Macy (void) spa_vdev_exit(spa, vd, txg, 0); 1433eda14cbcSMatt Macy 1434eda14cbcSMatt Macy if (ev != NULL) 1435eda14cbcSMatt Macy spa_event_post(ev); 1436eda14cbcSMatt Macy } 1437eda14cbcSMatt Macy 1438eda14cbcSMatt Macy /* 1439eda14cbcSMatt Macy * Evacuates a segment of size at most max_alloc from the vdev 1440eda14cbcSMatt Macy * via repeated calls to spa_vdev_copy_segment. If an allocation 1441eda14cbcSMatt Macy * fails, the pool is probably too fragmented to handle such a 1442eda14cbcSMatt Macy * large size, so decrease max_alloc so that the caller will not try 1443eda14cbcSMatt Macy * this size again this txg. 1444eda14cbcSMatt Macy */ 1445eda14cbcSMatt Macy static void 1446eda14cbcSMatt Macy spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, 1447eda14cbcSMatt Macy uint64_t *max_alloc, dmu_tx_t *tx) 1448eda14cbcSMatt Macy { 1449eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 1450eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1451eda14cbcSMatt Macy 1452eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1453eda14cbcSMatt Macy 1454eda14cbcSMatt Macy /* 1455eda14cbcSMatt Macy * Determine how big of a chunk to copy. We can allocate up 1456eda14cbcSMatt Macy * to max_alloc bytes, and we can span up to vdev_removal_max_span 1457eda14cbcSMatt Macy * bytes of unallocated space at a time. "segs" will track the 1458eda14cbcSMatt Macy * allocated segments that we are copying. We may also be copying 1459eda14cbcSMatt Macy * free segments (of up to vdev_removal_max_span bytes). 1460eda14cbcSMatt Macy */ 1461*b59a0cdeSMartin Matuska zfs_range_tree_t *segs = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, 1462*b59a0cdeSMartin Matuska NULL, 0, 0); 1463eda14cbcSMatt Macy for (;;) { 1464*b59a0cdeSMartin Matuska zfs_range_tree_t *rt = svr->svr_allocd_segs; 1465*b59a0cdeSMartin Matuska zfs_range_seg_t *rs = zfs_range_tree_first(rt); 1466eda14cbcSMatt Macy 1467eda14cbcSMatt Macy if (rs == NULL) 1468eda14cbcSMatt Macy break; 1469eda14cbcSMatt Macy 1470eda14cbcSMatt Macy uint64_t seg_length; 1471eda14cbcSMatt Macy 1472*b59a0cdeSMartin Matuska if (zfs_range_tree_is_empty(segs)) { 1473eda14cbcSMatt Macy /* need to truncate the first seg based on max_alloc */ 1474*b59a0cdeSMartin Matuska seg_length = MIN(zfs_rs_get_end(rs, rt) - 1475*b59a0cdeSMartin Matuska zfs_rs_get_start(rs, rt), *max_alloc); 1476eda14cbcSMatt Macy } else { 1477*b59a0cdeSMartin Matuska if (zfs_rs_get_start(rs, rt) - zfs_range_tree_max(segs) 1478*b59a0cdeSMartin Matuska > vdev_removal_max_span) { 1479eda14cbcSMatt Macy /* 1480eda14cbcSMatt Macy * Including this segment would cause us to 1481eda14cbcSMatt Macy * copy a larger unneeded chunk than is allowed. 1482eda14cbcSMatt Macy */ 1483eda14cbcSMatt Macy break; 1484*b59a0cdeSMartin Matuska } else if (zfs_rs_get_end(rs, rt) - 1485*b59a0cdeSMartin Matuska zfs_range_tree_min(segs) > *max_alloc) { 1486eda14cbcSMatt Macy /* 1487eda14cbcSMatt Macy * This additional segment would extend past 1488eda14cbcSMatt Macy * max_alloc. Rather than splitting this 1489eda14cbcSMatt Macy * segment, leave it for the next mapping. 1490eda14cbcSMatt Macy */ 1491eda14cbcSMatt Macy break; 1492eda14cbcSMatt Macy } else { 1493*b59a0cdeSMartin Matuska seg_length = zfs_rs_get_end(rs, rt) - 1494*b59a0cdeSMartin Matuska zfs_rs_get_start(rs, rt); 1495eda14cbcSMatt Macy } 1496eda14cbcSMatt Macy } 1497eda14cbcSMatt Macy 1498*b59a0cdeSMartin Matuska zfs_range_tree_add(segs, zfs_rs_get_start(rs, rt), seg_length); 1499*b59a0cdeSMartin Matuska zfs_range_tree_remove(svr->svr_allocd_segs, 1500*b59a0cdeSMartin Matuska zfs_rs_get_start(rs, rt), seg_length); 1501eda14cbcSMatt Macy } 1502eda14cbcSMatt Macy 1503*b59a0cdeSMartin Matuska if (zfs_range_tree_is_empty(segs)) { 1504eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1505*b59a0cdeSMartin Matuska zfs_range_tree_destroy(segs); 1506eda14cbcSMatt Macy return; 1507eda14cbcSMatt Macy } 1508eda14cbcSMatt Macy 1509eda14cbcSMatt Macy if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { 1510eda14cbcSMatt Macy dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, 15112c48331dSMatt Macy svr, tx); 1512eda14cbcSMatt Macy } 1513eda14cbcSMatt Macy 1514*b59a0cdeSMartin Matuska svr->svr_max_offset_to_sync[txg & TXG_MASK] = zfs_range_tree_max(segs); 1515eda14cbcSMatt Macy 1516eda14cbcSMatt Macy /* 1517eda14cbcSMatt Macy * Note: this is the amount of *allocated* space 1518eda14cbcSMatt Macy * that we are taking care of each txg. 1519eda14cbcSMatt Macy */ 1520*b59a0cdeSMartin Matuska svr->svr_bytes_done[txg & TXG_MASK] += zfs_range_tree_space(segs); 1521eda14cbcSMatt Macy 1522eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1523eda14cbcSMatt Macy 1524eda14cbcSMatt Macy zio_alloc_list_t zal; 1525eda14cbcSMatt Macy metaslab_trace_init(&zal); 1526eda14cbcSMatt Macy uint64_t thismax = SPA_MAXBLOCKSIZE; 1527*b59a0cdeSMartin Matuska while (!zfs_range_tree_is_empty(segs)) { 1528eda14cbcSMatt Macy int error = spa_vdev_copy_segment(vd, 1529eda14cbcSMatt Macy segs, thismax, txg, vca, &zal); 1530eda14cbcSMatt Macy 1531eda14cbcSMatt Macy if (error == ENOSPC) { 1532eda14cbcSMatt Macy /* 1533eda14cbcSMatt Macy * Cut our segment in half, and don't try this 1534eda14cbcSMatt Macy * segment size again this txg. Note that the 1535eda14cbcSMatt Macy * allocation size must be aligned to the highest 1536eda14cbcSMatt Macy * ashift in the pool, so that the allocation will 1537eda14cbcSMatt Macy * not be padded out to a multiple of the ashift, 1538eda14cbcSMatt Macy * which could cause us to think that this mapping 1539eda14cbcSMatt Macy * is larger than we intended. 1540eda14cbcSMatt Macy */ 1541eda14cbcSMatt Macy ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); 1542eda14cbcSMatt Macy ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); 1543eda14cbcSMatt Macy uint64_t attempted = 1544*b59a0cdeSMartin Matuska MIN(zfs_range_tree_span(segs), thismax); 1545eda14cbcSMatt Macy thismax = P2ROUNDUP(attempted / 2, 1546eda14cbcSMatt Macy 1 << spa->spa_max_ashift); 1547eda14cbcSMatt Macy /* 1548eda14cbcSMatt Macy * The minimum-size allocation can not fail. 1549eda14cbcSMatt Macy */ 1550eda14cbcSMatt Macy ASSERT3U(attempted, >, 1 << spa->spa_max_ashift); 1551eda14cbcSMatt Macy *max_alloc = attempted - (1 << spa->spa_max_ashift); 1552eda14cbcSMatt Macy } else { 1553eda14cbcSMatt Macy ASSERT0(error); 1554eda14cbcSMatt Macy 1555eda14cbcSMatt Macy /* 1556eda14cbcSMatt Macy * We've performed an allocation, so reset the 1557eda14cbcSMatt Macy * alloc trace list. 1558eda14cbcSMatt Macy */ 1559eda14cbcSMatt Macy metaslab_trace_fini(&zal); 1560eda14cbcSMatt Macy metaslab_trace_init(&zal); 1561eda14cbcSMatt Macy } 1562eda14cbcSMatt Macy } 1563eda14cbcSMatt Macy metaslab_trace_fini(&zal); 1564*b59a0cdeSMartin Matuska zfs_range_tree_destroy(segs); 1565eda14cbcSMatt Macy } 1566eda14cbcSMatt Macy 1567eda14cbcSMatt Macy /* 1568eda14cbcSMatt Macy * The size of each removal mapping is limited by the tunable 1569eda14cbcSMatt Macy * zfs_remove_max_segment, but we must adjust this to be a multiple of the 1570eda14cbcSMatt Macy * pool's ashift, so that we don't try to split individual sectors regardless 1571eda14cbcSMatt Macy * of the tunable value. (Note that device removal requires that all devices 1572eda14cbcSMatt Macy * have the same ashift, so there's no difference between spa_min_ashift and 1573eda14cbcSMatt Macy * spa_max_ashift.) The raw tunable should not be used elsewhere. 1574eda14cbcSMatt Macy */ 1575eda14cbcSMatt Macy uint64_t 1576eda14cbcSMatt Macy spa_remove_max_segment(spa_t *spa) 1577eda14cbcSMatt Macy { 1578eda14cbcSMatt Macy return (P2ROUNDUP(zfs_remove_max_segment, 1 << spa->spa_max_ashift)); 1579eda14cbcSMatt Macy } 1580eda14cbcSMatt Macy 1581eda14cbcSMatt Macy /* 1582eda14cbcSMatt Macy * The removal thread operates in open context. It iterates over all 1583eda14cbcSMatt Macy * allocated space in the vdev, by loading each metaslab's spacemap. 1584eda14cbcSMatt Macy * For each contiguous segment of allocated space (capping the segment 1585eda14cbcSMatt Macy * size at SPA_MAXBLOCKSIZE), we: 1586eda14cbcSMatt Macy * - Allocate space for it on another vdev. 1587eda14cbcSMatt Macy * - Create a new mapping from the old location to the new location 1588eda14cbcSMatt Macy * (as a record in svr_new_segments). 1589eda14cbcSMatt Macy * - Initiate a physical read zio to get the data off the removing disk. 1590eda14cbcSMatt Macy * - In the read zio's done callback, initiate a physical write zio to 1591eda14cbcSMatt Macy * write it to the new vdev. 1592eda14cbcSMatt Macy * Note that all of this will take effect when a particular TXG syncs. 1593eda14cbcSMatt Macy * The sync thread ensures that all the phys reads and writes for the syncing 1594eda14cbcSMatt Macy * TXG have completed (see spa_txg_zio) and writes the new mappings to disk 1595eda14cbcSMatt Macy * (see vdev_mapping_sync()). 1596eda14cbcSMatt Macy */ 1597da5137abSMartin Matuska static __attribute__((noreturn)) void 1598eda14cbcSMatt Macy spa_vdev_remove_thread(void *arg) 1599eda14cbcSMatt Macy { 1600eda14cbcSMatt Macy spa_t *spa = arg; 1601eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1602eda14cbcSMatt Macy vdev_copy_arg_t vca; 1603eda14cbcSMatt Macy uint64_t max_alloc = spa_remove_max_segment(spa); 1604eda14cbcSMatt Macy uint64_t last_txg = 0; 1605eda14cbcSMatt Macy 1606eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1607eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1608eda14cbcSMatt Macy vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1609eda14cbcSMatt Macy uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); 1610eda14cbcSMatt Macy 1611eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); 1612eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 1613eda14cbcSMatt Macy ASSERT(vd->vdev_removing); 1614eda14cbcSMatt Macy ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 1615eda14cbcSMatt Macy ASSERT(vim != NULL); 1616eda14cbcSMatt Macy 1617eda14cbcSMatt Macy mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); 1618eda14cbcSMatt Macy cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); 1619eda14cbcSMatt Macy vca.vca_outstanding_bytes = 0; 1620eda14cbcSMatt Macy vca.vca_read_error_bytes = 0; 1621eda14cbcSMatt Macy vca.vca_write_error_bytes = 0; 1622eda14cbcSMatt Macy 1623eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1624eda14cbcSMatt Macy 1625eda14cbcSMatt Macy /* 1626eda14cbcSMatt Macy * Start from vim_max_offset so we pick up where we left off 1627eda14cbcSMatt Macy * if we are restarting the removal after opening the pool. 1628eda14cbcSMatt Macy */ 1629eda14cbcSMatt Macy uint64_t msi; 1630eda14cbcSMatt Macy for (msi = start_offset >> vd->vdev_ms_shift; 1631eda14cbcSMatt Macy msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { 1632eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_ms[msi]; 1633eda14cbcSMatt Macy ASSERT3U(msi, <=, vd->vdev_ms_count); 1634eda14cbcSMatt Macy 1635*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs)); 1636eda14cbcSMatt Macy 1637eda14cbcSMatt Macy mutex_enter(&msp->ms_sync_lock); 1638eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1639eda14cbcSMatt Macy 1640eda14cbcSMatt Macy /* 1641eda14cbcSMatt Macy * Assert nothing in flight -- ms_*tree is empty. 1642eda14cbcSMatt Macy */ 1643eda14cbcSMatt Macy for (int i = 0; i < TXG_SIZE; i++) { 1644*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(msp->ms_allocating[i])); 1645eda14cbcSMatt Macy } 1646eda14cbcSMatt Macy 1647eda14cbcSMatt Macy /* 1648eda14cbcSMatt Macy * If the metaslab has ever been allocated from (ms_sm!=NULL), 1649eda14cbcSMatt Macy * read the allocated segments from the space map object 1650eda14cbcSMatt Macy * into svr_allocd_segs. Since we do this while holding 1651eda14cbcSMatt Macy * svr_lock and ms_sync_lock, concurrent frees (which 1652eda14cbcSMatt Macy * would have modified the space map) will wait for us 1653eda14cbcSMatt Macy * to finish loading the spacemap, and then take the 1654eda14cbcSMatt Macy * appropriate action (see free_from_removing_vdev()). 1655eda14cbcSMatt Macy */ 1656eda14cbcSMatt Macy if (msp->ms_sm != NULL) { 1657eda14cbcSMatt Macy VERIFY0(space_map_load(msp->ms_sm, 1658eda14cbcSMatt Macy svr->svr_allocd_segs, SM_ALLOC)); 1659eda14cbcSMatt Macy 1660*b59a0cdeSMartin Matuska zfs_range_tree_walk(msp->ms_unflushed_allocs, 1661*b59a0cdeSMartin Matuska zfs_range_tree_add, svr->svr_allocd_segs); 1662*b59a0cdeSMartin Matuska zfs_range_tree_walk(msp->ms_unflushed_frees, 1663*b59a0cdeSMartin Matuska zfs_range_tree_remove, svr->svr_allocd_segs); 1664*b59a0cdeSMartin Matuska zfs_range_tree_walk(msp->ms_freeing, 1665*b59a0cdeSMartin Matuska zfs_range_tree_remove, svr->svr_allocd_segs); 1666eda14cbcSMatt Macy 1667eda14cbcSMatt Macy /* 1668eda14cbcSMatt Macy * When we are resuming from a paused removal (i.e. 1669eda14cbcSMatt Macy * when importing a pool with a removal in progress), 1670eda14cbcSMatt Macy * discard any state that we have already processed. 1671eda14cbcSMatt Macy */ 1672*b59a0cdeSMartin Matuska zfs_range_tree_clear(svr->svr_allocd_segs, 0, 1673*b59a0cdeSMartin Matuska start_offset); 1674eda14cbcSMatt Macy } 1675eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1676eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 1677eda14cbcSMatt Macy 1678eda14cbcSMatt Macy vca.vca_msp = msp; 1679eda14cbcSMatt Macy zfs_dbgmsg("copying %llu segments for metaslab %llu", 168033b8c039SMartin Matuska (u_longlong_t)zfs_btree_numnodes( 168133b8c039SMartin Matuska &svr->svr_allocd_segs->rt_root), 168233b8c039SMartin Matuska (u_longlong_t)msp->ms_id); 1683eda14cbcSMatt Macy 1684eda14cbcSMatt Macy while (!svr->svr_thread_exit && 1685*b59a0cdeSMartin Matuska !zfs_range_tree_is_empty(svr->svr_allocd_segs)) { 1686eda14cbcSMatt Macy 1687eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1688eda14cbcSMatt Macy 1689eda14cbcSMatt Macy /* 1690eda14cbcSMatt Macy * We need to periodically drop the config lock so that 1691eda14cbcSMatt Macy * writers can get in. Additionally, we can't wait 1692eda14cbcSMatt Macy * for a txg to sync while holding a config lock 1693eda14cbcSMatt Macy * (since a waiting writer could cause a 3-way deadlock 1694eda14cbcSMatt Macy * with the sync thread, which also gets a config 1695eda14cbcSMatt Macy * lock for reader). So we can't hold the config lock 1696eda14cbcSMatt Macy * while calling dmu_tx_assign(). 1697eda14cbcSMatt Macy */ 1698eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 1699eda14cbcSMatt Macy 1700eda14cbcSMatt Macy /* 1701eda14cbcSMatt Macy * This delay will pause the removal around the point 1702eda14cbcSMatt Macy * specified by zfs_removal_suspend_progress. We do this 1703eda14cbcSMatt Macy * solely from the test suite or during debugging. 1704eda14cbcSMatt Macy */ 1705eda14cbcSMatt Macy while (zfs_removal_suspend_progress && 1706eda14cbcSMatt Macy !svr->svr_thread_exit) 1707eda14cbcSMatt Macy delay(hz); 1708eda14cbcSMatt Macy 1709eda14cbcSMatt Macy mutex_enter(&vca.vca_lock); 1710eda14cbcSMatt Macy while (vca.vca_outstanding_bytes > 1711eda14cbcSMatt Macy zfs_remove_max_copy_bytes) { 1712eda14cbcSMatt Macy cv_wait(&vca.vca_cv, &vca.vca_lock); 1713eda14cbcSMatt Macy } 1714eda14cbcSMatt Macy mutex_exit(&vca.vca_lock); 1715eda14cbcSMatt Macy 1716eda14cbcSMatt Macy dmu_tx_t *tx = 1717eda14cbcSMatt Macy dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1718eda14cbcSMatt Macy 1719eda14cbcSMatt Macy VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1720eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 1721eda14cbcSMatt Macy 1722eda14cbcSMatt Macy /* 1723eda14cbcSMatt Macy * Reacquire the vdev_config lock. The vdev_t 1724eda14cbcSMatt Macy * that we're removing may have changed, e.g. due 1725eda14cbcSMatt Macy * to a vdev_attach or vdev_detach. 1726eda14cbcSMatt Macy */ 1727eda14cbcSMatt Macy spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1728eda14cbcSMatt Macy vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1729eda14cbcSMatt Macy 1730eda14cbcSMatt Macy if (txg != last_txg) 1731eda14cbcSMatt Macy max_alloc = spa_remove_max_segment(spa); 1732eda14cbcSMatt Macy last_txg = txg; 1733eda14cbcSMatt Macy 1734eda14cbcSMatt Macy spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx); 1735eda14cbcSMatt Macy 1736eda14cbcSMatt Macy dmu_tx_commit(tx); 1737eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1738eda14cbcSMatt Macy } 1739eda14cbcSMatt Macy 1740eda14cbcSMatt Macy mutex_enter(&vca.vca_lock); 1741eda14cbcSMatt Macy if (zfs_removal_ignore_errors == 0 && 1742eda14cbcSMatt Macy (vca.vca_read_error_bytes > 0 || 1743eda14cbcSMatt Macy vca.vca_write_error_bytes > 0)) { 1744eda14cbcSMatt Macy svr->svr_thread_exit = B_TRUE; 1745eda14cbcSMatt Macy } 1746eda14cbcSMatt Macy mutex_exit(&vca.vca_lock); 1747eda14cbcSMatt Macy } 1748eda14cbcSMatt Macy 1749eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1750eda14cbcSMatt Macy 1751eda14cbcSMatt Macy spa_config_exit(spa, SCL_CONFIG, FTAG); 1752eda14cbcSMatt Macy 1753eda14cbcSMatt Macy /* 1754eda14cbcSMatt Macy * Wait for all copies to finish before cleaning up the vca. 1755eda14cbcSMatt Macy */ 1756eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, 0); 1757eda14cbcSMatt Macy ASSERT0(vca.vca_outstanding_bytes); 1758eda14cbcSMatt Macy 1759eda14cbcSMatt Macy mutex_destroy(&vca.vca_lock); 1760eda14cbcSMatt Macy cv_destroy(&vca.vca_cv); 1761eda14cbcSMatt Macy 1762eda14cbcSMatt Macy if (svr->svr_thread_exit) { 1763eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1764*b59a0cdeSMartin Matuska zfs_range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); 1765eda14cbcSMatt Macy svr->svr_thread = NULL; 1766eda14cbcSMatt Macy cv_broadcast(&svr->svr_cv); 1767eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1768eda14cbcSMatt Macy 1769eda14cbcSMatt Macy /* 1770eda14cbcSMatt Macy * During the removal process an unrecoverable read or write 1771eda14cbcSMatt Macy * error was encountered. The removal process must be 1772eda14cbcSMatt Macy * cancelled or this damage may become permanent. 1773eda14cbcSMatt Macy */ 1774eda14cbcSMatt Macy if (zfs_removal_ignore_errors == 0 && 1775eda14cbcSMatt Macy (vca.vca_read_error_bytes > 0 || 1776eda14cbcSMatt Macy vca.vca_write_error_bytes > 0)) { 1777eda14cbcSMatt Macy zfs_dbgmsg("canceling removal due to IO errors: " 1778eda14cbcSMatt Macy "[read_error_bytes=%llu] [write_error_bytes=%llu]", 177933b8c039SMartin Matuska (u_longlong_t)vca.vca_read_error_bytes, 178033b8c039SMartin Matuska (u_longlong_t)vca.vca_write_error_bytes); 1781eda14cbcSMatt Macy spa_vdev_remove_cancel_impl(spa); 1782eda14cbcSMatt Macy } 1783eda14cbcSMatt Macy } else { 1784*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs)); 1785eda14cbcSMatt Macy vdev_remove_complete(spa); 1786eda14cbcSMatt Macy } 1787eda14cbcSMatt Macy 1788eda14cbcSMatt Macy thread_exit(); 1789eda14cbcSMatt Macy } 1790eda14cbcSMatt Macy 1791eda14cbcSMatt Macy void 1792eda14cbcSMatt Macy spa_vdev_remove_suspend(spa_t *spa) 1793eda14cbcSMatt Macy { 1794eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1795eda14cbcSMatt Macy 1796eda14cbcSMatt Macy if (svr == NULL) 1797eda14cbcSMatt Macy return; 1798eda14cbcSMatt Macy 1799eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1800eda14cbcSMatt Macy svr->svr_thread_exit = B_TRUE; 1801eda14cbcSMatt Macy while (svr->svr_thread != NULL) 1802eda14cbcSMatt Macy cv_wait(&svr->svr_cv, &svr->svr_lock); 1803eda14cbcSMatt Macy svr->svr_thread_exit = B_FALSE; 1804eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1805eda14cbcSMatt Macy } 1806eda14cbcSMatt Macy 1807681ce946SMartin Matuska /* 1808681ce946SMartin Matuska * Return true if the "allocating" property has been set to "off" 1809681ce946SMartin Matuska */ 1810681ce946SMartin Matuska static boolean_t 1811681ce946SMartin Matuska vdev_prop_allocating_off(vdev_t *vd) 1812681ce946SMartin Matuska { 1813681ce946SMartin Matuska uint64_t objid = vd->vdev_top_zap; 1814681ce946SMartin Matuska uint64_t allocating = 1; 1815681ce946SMartin Matuska 1816681ce946SMartin Matuska /* no vdev property object => no props */ 1817681ce946SMartin Matuska if (objid != 0) { 1818681ce946SMartin Matuska spa_t *spa = vd->vdev_spa; 1819681ce946SMartin Matuska objset_t *mos = spa->spa_meta_objset; 1820681ce946SMartin Matuska 1821681ce946SMartin Matuska mutex_enter(&spa->spa_props_lock); 1822681ce946SMartin Matuska (void) zap_lookup(mos, objid, "allocating", sizeof (uint64_t), 1823681ce946SMartin Matuska 1, &allocating); 1824681ce946SMartin Matuska mutex_exit(&spa->spa_props_lock); 1825681ce946SMartin Matuska } 1826681ce946SMartin Matuska return (allocating == 0); 1827681ce946SMartin Matuska } 1828681ce946SMartin Matuska 1829eda14cbcSMatt Macy static int 1830eda14cbcSMatt Macy spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) 1831eda14cbcSMatt Macy { 1832e92ffd9bSMartin Matuska (void) arg; 1833eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1834eda14cbcSMatt Macy 1835eda14cbcSMatt Macy if (spa->spa_vdev_removal == NULL) 1836eda14cbcSMatt Macy return (ENOTACTIVE); 1837eda14cbcSMatt Macy return (0); 1838eda14cbcSMatt Macy } 1839eda14cbcSMatt Macy 1840eda14cbcSMatt Macy /* 1841eda14cbcSMatt Macy * Cancel a removal by freeing all entries from the partial mapping 1842eda14cbcSMatt Macy * and marking the vdev as no longer being removing. 1843eda14cbcSMatt Macy */ 1844eda14cbcSMatt Macy static void 1845eda14cbcSMatt Macy spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) 1846eda14cbcSMatt Macy { 1847e92ffd9bSMartin Matuska (void) arg; 1848eda14cbcSMatt Macy spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1849eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1850eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1851eda14cbcSMatt Macy vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1852eda14cbcSMatt Macy vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1853eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 1854eda14cbcSMatt Macy 1855eda14cbcSMatt Macy ASSERT3P(svr->svr_thread, ==, NULL); 1856eda14cbcSMatt Macy 1857eda14cbcSMatt Macy spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 1858eda14cbcSMatt Macy 1859eda14cbcSMatt Macy boolean_t are_precise; 1860eda14cbcSMatt Macy VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise)); 1861eda14cbcSMatt Macy if (are_precise) { 1862eda14cbcSMatt Macy spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1863eda14cbcSMatt Macy VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1864eda14cbcSMatt Macy VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); 1865eda14cbcSMatt Macy } 1866eda14cbcSMatt Macy 1867eda14cbcSMatt Macy uint64_t obsolete_sm_object; 1868eda14cbcSMatt Macy VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); 1869eda14cbcSMatt Macy if (obsolete_sm_object != 0) { 1870eda14cbcSMatt Macy ASSERT(vd->vdev_obsolete_sm != NULL); 1871eda14cbcSMatt Macy ASSERT3U(obsolete_sm_object, ==, 1872eda14cbcSMatt Macy space_map_object(vd->vdev_obsolete_sm)); 1873eda14cbcSMatt Macy 1874eda14cbcSMatt Macy space_map_free(vd->vdev_obsolete_sm, tx); 1875eda14cbcSMatt Macy VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1876eda14cbcSMatt Macy VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 1877eda14cbcSMatt Macy space_map_close(vd->vdev_obsolete_sm); 1878eda14cbcSMatt Macy vd->vdev_obsolete_sm = NULL; 1879eda14cbcSMatt Macy spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1880eda14cbcSMatt Macy } 1881eda14cbcSMatt Macy for (int i = 0; i < TXG_SIZE; i++) { 1882eda14cbcSMatt Macy ASSERT(list_is_empty(&svr->svr_new_segments[i])); 1883eda14cbcSMatt Macy ASSERT3U(svr->svr_max_offset_to_sync[i], <=, 1884eda14cbcSMatt Macy vdev_indirect_mapping_max_offset(vim)); 1885eda14cbcSMatt Macy } 1886eda14cbcSMatt Macy 1887eda14cbcSMatt Macy for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 1888eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_ms[msi]; 1889eda14cbcSMatt Macy 1890eda14cbcSMatt Macy if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 1891eda14cbcSMatt Macy break; 1892eda14cbcSMatt Macy 1893*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(svr->svr_allocd_segs)); 1894eda14cbcSMatt Macy 1895eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1896eda14cbcSMatt Macy 1897eda14cbcSMatt Macy /* 1898eda14cbcSMatt Macy * Assert nothing in flight -- ms_*tree is empty. 1899eda14cbcSMatt Macy */ 1900eda14cbcSMatt Macy for (int i = 0; i < TXG_SIZE; i++) 1901*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(msp->ms_allocating[i])); 1902eda14cbcSMatt Macy for (int i = 0; i < TXG_DEFER_SIZE; i++) 1903*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(msp->ms_defer[i])); 1904*b59a0cdeSMartin Matuska ASSERT0(zfs_range_tree_space(msp->ms_freed)); 1905eda14cbcSMatt Macy 1906eda14cbcSMatt Macy if (msp->ms_sm != NULL) { 1907eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1908eda14cbcSMatt Macy VERIFY0(space_map_load(msp->ms_sm, 1909eda14cbcSMatt Macy svr->svr_allocd_segs, SM_ALLOC)); 1910eda14cbcSMatt Macy 1911*b59a0cdeSMartin Matuska zfs_range_tree_walk(msp->ms_unflushed_allocs, 1912*b59a0cdeSMartin Matuska zfs_range_tree_add, svr->svr_allocd_segs); 1913*b59a0cdeSMartin Matuska zfs_range_tree_walk(msp->ms_unflushed_frees, 1914*b59a0cdeSMartin Matuska zfs_range_tree_remove, svr->svr_allocd_segs); 1915*b59a0cdeSMartin Matuska zfs_range_tree_walk(msp->ms_freeing, 1916*b59a0cdeSMartin Matuska zfs_range_tree_remove, svr->svr_allocd_segs); 1917eda14cbcSMatt Macy 1918eda14cbcSMatt Macy /* 1919eda14cbcSMatt Macy * Clear everything past what has been synced, 1920eda14cbcSMatt Macy * because we have not allocated mappings for it yet. 1921eda14cbcSMatt Macy */ 1922eda14cbcSMatt Macy uint64_t syncd = vdev_indirect_mapping_max_offset(vim); 1923eda14cbcSMatt Macy uint64_t sm_end = msp->ms_sm->sm_start + 1924eda14cbcSMatt Macy msp->ms_sm->sm_size; 1925eda14cbcSMatt Macy if (sm_end > syncd) 1926*b59a0cdeSMartin Matuska zfs_range_tree_clear(svr->svr_allocd_segs, 1927eda14cbcSMatt Macy syncd, sm_end - syncd); 1928eda14cbcSMatt Macy 1929eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1930eda14cbcSMatt Macy } 1931eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1932eda14cbcSMatt Macy 1933eda14cbcSMatt Macy mutex_enter(&svr->svr_lock); 1934*b59a0cdeSMartin Matuska zfs_range_tree_vacate(svr->svr_allocd_segs, 1935eda14cbcSMatt Macy free_mapped_segment_cb, vd); 1936eda14cbcSMatt Macy mutex_exit(&svr->svr_lock); 1937eda14cbcSMatt Macy } 1938eda14cbcSMatt Macy 1939eda14cbcSMatt Macy /* 1940eda14cbcSMatt Macy * Note: this must happen after we invoke free_mapped_segment_cb, 1941eda14cbcSMatt Macy * because it adds to the obsolete_segments. 1942eda14cbcSMatt Macy */ 1943*b59a0cdeSMartin Matuska zfs_range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 1944eda14cbcSMatt Macy 1945eda14cbcSMatt Macy ASSERT3U(vic->vic_mapping_object, ==, 1946eda14cbcSMatt Macy vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); 1947eda14cbcSMatt Macy vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1948eda14cbcSMatt Macy vd->vdev_indirect_mapping = NULL; 1949eda14cbcSMatt Macy vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 1950eda14cbcSMatt Macy vic->vic_mapping_object = 0; 1951eda14cbcSMatt Macy 1952eda14cbcSMatt Macy ASSERT3U(vic->vic_births_object, ==, 1953eda14cbcSMatt Macy vdev_indirect_births_object(vd->vdev_indirect_births)); 1954eda14cbcSMatt Macy vdev_indirect_births_close(vd->vdev_indirect_births); 1955eda14cbcSMatt Macy vd->vdev_indirect_births = NULL; 1956eda14cbcSMatt Macy vdev_indirect_births_free(mos, vic->vic_births_object, tx); 1957eda14cbcSMatt Macy vic->vic_births_object = 0; 1958eda14cbcSMatt Macy 1959eda14cbcSMatt Macy /* 1960eda14cbcSMatt Macy * We may have processed some frees from the removing vdev in this 1961eda14cbcSMatt Macy * txg, thus increasing svr_bytes_done; discard that here to 1962eda14cbcSMatt Macy * satisfy the assertions in spa_vdev_removal_destroy(). 1963eda14cbcSMatt Macy * Note that future txg's can not have any bytes_done, because 1964eda14cbcSMatt Macy * future TXG's are only modified from open context, and we have 1965eda14cbcSMatt Macy * already shut down the copying thread. 1966eda14cbcSMatt Macy */ 1967eda14cbcSMatt Macy svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; 1968eda14cbcSMatt Macy spa_finish_removal(spa, DSS_CANCELED, tx); 1969eda14cbcSMatt Macy 1970eda14cbcSMatt Macy vd->vdev_removing = B_FALSE; 1971681ce946SMartin Matuska 1972681ce946SMartin Matuska if (!vdev_prop_allocating_off(vd)) { 1973681ce946SMartin Matuska spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); 1974681ce946SMartin Matuska vdev_activate(vd); 1975681ce946SMartin Matuska spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); 1976681ce946SMartin Matuska } 1977681ce946SMartin Matuska 1978eda14cbcSMatt Macy vdev_config_dirty(vd); 1979eda14cbcSMatt Macy 1980eda14cbcSMatt Macy zfs_dbgmsg("canceled device removal for vdev %llu in %llu", 198133b8c039SMartin Matuska (u_longlong_t)vd->vdev_id, (u_longlong_t)dmu_tx_get_txg(tx)); 1982eda14cbcSMatt Macy spa_history_log_internal(spa, "vdev remove canceled", tx, 1983eda14cbcSMatt Macy "%s vdev %llu %s", spa_name(spa), 1984eda14cbcSMatt Macy (u_longlong_t)vd->vdev_id, 1985eda14cbcSMatt Macy (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1986eda14cbcSMatt Macy } 1987eda14cbcSMatt Macy 1988eda14cbcSMatt Macy static int 1989eda14cbcSMatt Macy spa_vdev_remove_cancel_impl(spa_t *spa) 1990eda14cbcSMatt Macy { 1991eda14cbcSMatt Macy int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, 1992eda14cbcSMatt Macy spa_vdev_remove_cancel_sync, NULL, 0, 1993eda14cbcSMatt Macy ZFS_SPACE_CHECK_EXTRA_RESERVED); 1994eda14cbcSMatt Macy return (error); 1995eda14cbcSMatt Macy } 1996eda14cbcSMatt Macy 1997eda14cbcSMatt Macy int 1998eda14cbcSMatt Macy spa_vdev_remove_cancel(spa_t *spa) 1999eda14cbcSMatt Macy { 2000eda14cbcSMatt Macy spa_vdev_remove_suspend(spa); 2001eda14cbcSMatt Macy 2002eda14cbcSMatt Macy if (spa->spa_vdev_removal == NULL) 2003eda14cbcSMatt Macy return (ENOTACTIVE); 2004eda14cbcSMatt Macy 2005eda14cbcSMatt Macy return (spa_vdev_remove_cancel_impl(spa)); 2006eda14cbcSMatt Macy } 2007eda14cbcSMatt Macy 2008eda14cbcSMatt Macy void 2009eda14cbcSMatt Macy svr_sync(spa_t *spa, dmu_tx_t *tx) 2010eda14cbcSMatt Macy { 2011eda14cbcSMatt Macy spa_vdev_removal_t *svr = spa->spa_vdev_removal; 2012eda14cbcSMatt Macy int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 2013eda14cbcSMatt Macy 2014eda14cbcSMatt Macy if (svr == NULL) 2015eda14cbcSMatt Macy return; 2016eda14cbcSMatt Macy 2017eda14cbcSMatt Macy /* 2018eda14cbcSMatt Macy * This check is necessary so that we do not dirty the 2019eda14cbcSMatt Macy * DIRECTORY_OBJECT via spa_sync_removing_state() when there 2020eda14cbcSMatt Macy * is nothing to do. Dirtying it every time would prevent us 2021eda14cbcSMatt Macy * from syncing-to-convergence. 2022eda14cbcSMatt Macy */ 2023eda14cbcSMatt Macy if (svr->svr_bytes_done[txgoff] == 0) 2024eda14cbcSMatt Macy return; 2025eda14cbcSMatt Macy 2026eda14cbcSMatt Macy /* 2027eda14cbcSMatt Macy * Update progress accounting. 2028eda14cbcSMatt Macy */ 2029eda14cbcSMatt Macy spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; 2030eda14cbcSMatt Macy svr->svr_bytes_done[txgoff] = 0; 2031eda14cbcSMatt Macy 2032eda14cbcSMatt Macy spa_sync_removing_state(spa, tx); 2033eda14cbcSMatt Macy } 2034eda14cbcSMatt Macy 2035eda14cbcSMatt Macy static void 2036eda14cbcSMatt Macy vdev_remove_make_hole_and_free(vdev_t *vd) 2037eda14cbcSMatt Macy { 2038eda14cbcSMatt Macy uint64_t id = vd->vdev_id; 2039eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2040eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2041eda14cbcSMatt Macy 2042eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2043eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2044eda14cbcSMatt Macy 2045eda14cbcSMatt Macy vdev_free(vd); 2046eda14cbcSMatt Macy 2047eda14cbcSMatt Macy vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 2048eda14cbcSMatt Macy vdev_add_child(rvd, vd); 2049eda14cbcSMatt Macy vdev_config_dirty(rvd); 2050eda14cbcSMatt Macy 2051eda14cbcSMatt Macy /* 2052eda14cbcSMatt Macy * Reassess the health of our root vdev. 2053eda14cbcSMatt Macy */ 2054eda14cbcSMatt Macy vdev_reopen(rvd); 2055eda14cbcSMatt Macy } 2056eda14cbcSMatt Macy 2057eda14cbcSMatt Macy /* 2058eda14cbcSMatt Macy * Remove a log device. The config lock is held for the specified TXG. 2059eda14cbcSMatt Macy */ 2060eda14cbcSMatt Macy static int 2061eda14cbcSMatt Macy spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) 2062eda14cbcSMatt Macy { 2063eda14cbcSMatt Macy metaslab_group_t *mg = vd->vdev_mg; 2064eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2065eda14cbcSMatt Macy int error = 0; 2066eda14cbcSMatt Macy 2067eda14cbcSMatt Macy ASSERT(vd->vdev_islog); 2068eda14cbcSMatt Macy ASSERT(vd == vd->vdev_top); 2069184c1b94SMartin Matuska ASSERT3P(vd->vdev_log_mg, ==, NULL); 2070eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2071eda14cbcSMatt Macy 2072eda14cbcSMatt Macy /* 2073eda14cbcSMatt Macy * Stop allocating from this vdev. 2074eda14cbcSMatt Macy */ 2075eda14cbcSMatt Macy metaslab_group_passivate(mg); 2076eda14cbcSMatt Macy 2077eda14cbcSMatt Macy /* 2078eda14cbcSMatt Macy * Wait for the youngest allocations and frees to sync, 2079eda14cbcSMatt Macy * and then wait for the deferral of those frees to finish. 2080eda14cbcSMatt Macy */ 2081eda14cbcSMatt Macy spa_vdev_config_exit(spa, NULL, 2082eda14cbcSMatt Macy *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 2083eda14cbcSMatt Macy 2084eda14cbcSMatt Macy /* 2085eda14cbcSMatt Macy * Cancel any initialize or TRIM which was in progress. 2086eda14cbcSMatt Macy */ 2087eda14cbcSMatt Macy vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED); 2088eda14cbcSMatt Macy vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED); 2089eda14cbcSMatt Macy vdev_autotrim_stop_wait(vd); 2090eda14cbcSMatt Macy 2091eda14cbcSMatt Macy /* 2092eda14cbcSMatt Macy * Evacuate the device. We don't hold the config lock as 2093eda14cbcSMatt Macy * writer since we need to do I/O but we do keep the 2094eda14cbcSMatt Macy * spa_namespace_lock held. Once this completes the device 2095eda14cbcSMatt Macy * should no longer have any blocks allocated on it. 2096eda14cbcSMatt Macy */ 2097eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2098eda14cbcSMatt Macy if (vd->vdev_stat.vs_alloc != 0) 2099eda14cbcSMatt Macy error = spa_reset_logs(spa); 2100eda14cbcSMatt Macy 2101eda14cbcSMatt Macy *txg = spa_vdev_config_enter(spa); 2102eda14cbcSMatt Macy 2103eda14cbcSMatt Macy if (error != 0) { 2104eda14cbcSMatt Macy metaslab_group_activate(mg); 2105184c1b94SMartin Matuska ASSERT3P(vd->vdev_log_mg, ==, NULL); 2106eda14cbcSMatt Macy return (error); 2107eda14cbcSMatt Macy } 2108eda14cbcSMatt Macy ASSERT0(vd->vdev_stat.vs_alloc); 2109eda14cbcSMatt Macy 2110eda14cbcSMatt Macy /* 2111eda14cbcSMatt Macy * The evacuation succeeded. Remove any remaining MOS metadata 2112eda14cbcSMatt Macy * associated with this vdev, and wait for these changes to sync. 2113eda14cbcSMatt Macy */ 2114eda14cbcSMatt Macy vd->vdev_removing = B_TRUE; 2115eda14cbcSMatt Macy 2116eda14cbcSMatt Macy vdev_dirty_leaves(vd, VDD_DTL, *txg); 2117eda14cbcSMatt Macy vdev_config_dirty(vd); 2118eda14cbcSMatt Macy 2119eda14cbcSMatt Macy /* 2120eda14cbcSMatt Macy * When the log space map feature is enabled we look at 2121eda14cbcSMatt Macy * the vdev's top_zap to find the on-disk flush data of 2122eda14cbcSMatt Macy * the metaslab we just flushed. Thus, while removing a 2123eda14cbcSMatt Macy * log vdev we make sure to call vdev_metaslab_fini() 2124eda14cbcSMatt Macy * first, which removes all metaslabs of this vdev from 2125eda14cbcSMatt Macy * spa_metaslabs_by_flushed before vdev_remove_empty() 2126eda14cbcSMatt Macy * destroys the top_zap of this log vdev. 2127eda14cbcSMatt Macy * 2128eda14cbcSMatt Macy * This avoids the scenario where we flush a metaslab 2129eda14cbcSMatt Macy * from the log vdev being removed that doesn't have a 2130eda14cbcSMatt Macy * top_zap and end up failing to lookup its on-disk flush 2131eda14cbcSMatt Macy * data. 2132eda14cbcSMatt Macy * 2133eda14cbcSMatt Macy * We don't call metaslab_group_destroy() right away 2134eda14cbcSMatt Macy * though (it will be called in vdev_free() later) as 2135eda14cbcSMatt Macy * during metaslab_sync() of metaslabs from other vdevs 2136eda14cbcSMatt Macy * we may touch the metaslab group of this vdev through 2137eda14cbcSMatt Macy * metaslab_class_histogram_verify() 2138eda14cbcSMatt Macy */ 2139eda14cbcSMatt Macy vdev_metaslab_fini(vd); 2140eda14cbcSMatt Macy 2141eda14cbcSMatt Macy spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 2142eda14cbcSMatt Macy *txg = spa_vdev_config_enter(spa); 2143eda14cbcSMatt Macy 2144eda14cbcSMatt Macy sysevent_t *ev = spa_event_create(spa, vd, NULL, 2145eda14cbcSMatt Macy ESC_ZFS_VDEV_REMOVE_DEV); 2146eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2147eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 2148eda14cbcSMatt Macy 2149eda14cbcSMatt Macy /* The top ZAP should have been destroyed by vdev_remove_empty. */ 2150eda14cbcSMatt Macy ASSERT0(vd->vdev_top_zap); 2151eda14cbcSMatt Macy /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ 2152eda14cbcSMatt Macy ASSERT0(vd->vdev_leaf_zap); 2153eda14cbcSMatt Macy 2154eda14cbcSMatt Macy (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 2155eda14cbcSMatt Macy 2156eda14cbcSMatt Macy if (list_link_active(&vd->vdev_state_dirty_node)) 2157eda14cbcSMatt Macy vdev_state_clean(vd); 2158eda14cbcSMatt Macy if (list_link_active(&vd->vdev_config_dirty_node)) 2159eda14cbcSMatt Macy vdev_config_clean(vd); 2160eda14cbcSMatt Macy 2161eda14cbcSMatt Macy ASSERT0(vd->vdev_stat.vs_alloc); 2162eda14cbcSMatt Macy 2163eda14cbcSMatt Macy /* 2164eda14cbcSMatt Macy * Clean up the vdev namespace. 2165eda14cbcSMatt Macy */ 2166eda14cbcSMatt Macy vdev_remove_make_hole_and_free(vd); 2167eda14cbcSMatt Macy 2168eda14cbcSMatt Macy if (ev != NULL) 2169eda14cbcSMatt Macy spa_event_post(ev); 2170eda14cbcSMatt Macy 2171eda14cbcSMatt Macy return (0); 2172eda14cbcSMatt Macy } 2173eda14cbcSMatt Macy 2174eda14cbcSMatt Macy static int 2175eda14cbcSMatt Macy spa_vdev_remove_top_check(vdev_t *vd) 2176eda14cbcSMatt Macy { 2177eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2178eda14cbcSMatt Macy 2179eda14cbcSMatt Macy if (vd != vd->vdev_top) 2180eda14cbcSMatt Macy return (SET_ERROR(ENOTSUP)); 2181eda14cbcSMatt Macy 2182eda14cbcSMatt Macy if (!vdev_is_concrete(vd)) 2183eda14cbcSMatt Macy return (SET_ERROR(ENOTSUP)); 2184eda14cbcSMatt Macy 2185eda14cbcSMatt Macy if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) 2186eda14cbcSMatt Macy return (SET_ERROR(ENOTSUP)); 2187eda14cbcSMatt Macy 2188681ce946SMartin Matuska /* 2189681ce946SMartin Matuska * This device is already being removed 2190681ce946SMartin Matuska */ 2191681ce946SMartin Matuska if (vd->vdev_removing) 2192681ce946SMartin Matuska return (SET_ERROR(EALREADY)); 21937877fdebSMatt Macy 21947877fdebSMatt Macy metaslab_class_t *mc = vd->vdev_mg->mg_class; 21957877fdebSMatt Macy metaslab_class_t *normal = spa_normal_class(spa); 21967877fdebSMatt Macy if (mc != normal) { 21977877fdebSMatt Macy /* 21987877fdebSMatt Macy * Space allocated from the special (or dedup) class is 21997877fdebSMatt Macy * included in the DMU's space usage, but it's not included 22007877fdebSMatt Macy * in spa_dspace (or dsl_pool_adjustedsize()). Therefore 22017877fdebSMatt Macy * there is always at least as much free space in the normal 22027877fdebSMatt Macy * class, as is allocated from the special (and dedup) class. 22037877fdebSMatt Macy * As a backup check, we will return ENOSPC if this is 22047877fdebSMatt Macy * violated. See also spa_update_dspace(). 22057877fdebSMatt Macy */ 22067877fdebSMatt Macy uint64_t available = metaslab_class_get_space(normal) - 22077877fdebSMatt Macy metaslab_class_get_alloc(normal); 22087877fdebSMatt Macy ASSERT3U(available, >=, vd->vdev_stat.vs_alloc); 22097877fdebSMatt Macy if (available < vd->vdev_stat.vs_alloc) 22107877fdebSMatt Macy return (SET_ERROR(ENOSPC)); 2211681ce946SMartin Matuska } else if (!vd->vdev_noalloc) { 2212eda14cbcSMatt Macy /* available space in the pool's normal class */ 2213eda14cbcSMatt Macy uint64_t available = dsl_dir_space_available( 2214eda14cbcSMatt Macy spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE); 2215681ce946SMartin Matuska if (available < vd->vdev_stat.vs_dspace) 2216eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 2217eda14cbcSMatt Macy } 2218eda14cbcSMatt Macy 2219eda14cbcSMatt Macy /* 2220eda14cbcSMatt Macy * There can not be a removal in progress. 2221eda14cbcSMatt Macy */ 2222eda14cbcSMatt Macy if (spa->spa_removing_phys.sr_state == DSS_SCANNING) 2223eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 2224eda14cbcSMatt Macy 2225eda14cbcSMatt Macy /* 2226eda14cbcSMatt Macy * The device must have all its data. 2227eda14cbcSMatt Macy */ 2228eda14cbcSMatt Macy if (!vdev_dtl_empty(vd, DTL_MISSING) || 2229eda14cbcSMatt Macy !vdev_dtl_empty(vd, DTL_OUTAGE)) 2230eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 2231eda14cbcSMatt Macy 2232eda14cbcSMatt Macy /* 2233eda14cbcSMatt Macy * The device must be healthy. 2234eda14cbcSMatt Macy */ 2235eda14cbcSMatt Macy if (!vdev_readable(vd)) 2236eda14cbcSMatt Macy return (SET_ERROR(EIO)); 2237eda14cbcSMatt Macy 2238eda14cbcSMatt Macy /* 2239eda14cbcSMatt Macy * All vdevs in normal class must have the same ashift. 2240eda14cbcSMatt Macy */ 2241eda14cbcSMatt Macy if (spa->spa_max_ashift != spa->spa_min_ashift) { 2242eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 2243eda14cbcSMatt Macy } 2244eda14cbcSMatt Macy 2245eda14cbcSMatt Macy /* 2246180f8225SMatt Macy * A removed special/dedup vdev must have same ashift as normal class. 2247180f8225SMatt Macy */ 2248180f8225SMatt Macy ASSERT(!vd->vdev_islog); 2249180f8225SMatt Macy if (vd->vdev_alloc_bias != VDEV_BIAS_NONE && 2250180f8225SMatt Macy vd->vdev_ashift != spa->spa_max_ashift) { 2251180f8225SMatt Macy return (SET_ERROR(EINVAL)); 2252180f8225SMatt Macy } 2253180f8225SMatt Macy 2254180f8225SMatt Macy /* 2255eda14cbcSMatt Macy * All vdevs in normal class must have the same ashift 22567877fdebSMatt Macy * and not be raidz or draid. 2257eda14cbcSMatt Macy */ 2258eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2259eda14cbcSMatt Macy for (uint64_t id = 0; id < rvd->vdev_children; id++) { 2260eda14cbcSMatt Macy vdev_t *cvd = rvd->vdev_child[id]; 2261180f8225SMatt Macy 2262180f8225SMatt Macy /* 2263180f8225SMatt Macy * A removed special/dedup vdev must have the same ashift 2264180f8225SMatt Macy * across all vdevs in its class. 2265180f8225SMatt Macy */ 2266180f8225SMatt Macy if (vd->vdev_alloc_bias != VDEV_BIAS_NONE && 2267180f8225SMatt Macy cvd->vdev_alloc_bias == vd->vdev_alloc_bias && 2268180f8225SMatt Macy cvd->vdev_ashift != vd->vdev_ashift) { 2269180f8225SMatt Macy return (SET_ERROR(EINVAL)); 2270180f8225SMatt Macy } 2271180f8225SMatt Macy if (cvd->vdev_ashift != 0 && 2272180f8225SMatt Macy cvd->vdev_alloc_bias == VDEV_BIAS_NONE) 2273eda14cbcSMatt Macy ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); 2274eda14cbcSMatt Macy if (!vdev_is_concrete(cvd)) 2275eda14cbcSMatt Macy continue; 22767877fdebSMatt Macy if (vdev_get_nparity(cvd) != 0) 2277eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 2278eda14cbcSMatt Macy /* 2279eda14cbcSMatt Macy * Need the mirror to be mirror of leaf vdevs only 2280eda14cbcSMatt Macy */ 2281eda14cbcSMatt Macy if (cvd->vdev_ops == &vdev_mirror_ops) { 2282eda14cbcSMatt Macy for (uint64_t cid = 0; 2283eda14cbcSMatt Macy cid < cvd->vdev_children; cid++) { 2284eda14cbcSMatt Macy if (!cvd->vdev_child[cid]->vdev_ops-> 2285eda14cbcSMatt Macy vdev_op_leaf) 2286eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 2287eda14cbcSMatt Macy } 2288eda14cbcSMatt Macy } 2289eda14cbcSMatt Macy } 2290eda14cbcSMatt Macy 2291eda14cbcSMatt Macy return (0); 2292eda14cbcSMatt Macy } 2293eda14cbcSMatt Macy 2294eda14cbcSMatt Macy /* 2295eda14cbcSMatt Macy * Initiate removal of a top-level vdev, reducing the total space in the pool. 2296eda14cbcSMatt Macy * The config lock is held for the specified TXG. Once initiated, 2297eda14cbcSMatt Macy * evacuation of all allocated space (copying it to other vdevs) happens 2298eda14cbcSMatt Macy * in the background (see spa_vdev_remove_thread()), and can be canceled 2299eda14cbcSMatt Macy * (see spa_vdev_remove_cancel()). If successful, the vdev will 2300eda14cbcSMatt Macy * be transformed to an indirect vdev (see spa_vdev_remove_complete()). 2301eda14cbcSMatt Macy */ 2302eda14cbcSMatt Macy static int 2303eda14cbcSMatt Macy spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) 2304eda14cbcSMatt Macy { 2305eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2306681ce946SMartin Matuska boolean_t set_noalloc = B_FALSE; 2307eda14cbcSMatt Macy int error; 2308eda14cbcSMatt Macy 2309eda14cbcSMatt Macy /* 2310eda14cbcSMatt Macy * Check for errors up-front, so that we don't waste time 2311eda14cbcSMatt Macy * passivating the metaslab group and clearing the ZIL if there 2312eda14cbcSMatt Macy * are errors. 2313eda14cbcSMatt Macy */ 2314eda14cbcSMatt Macy error = spa_vdev_remove_top_check(vd); 2315eda14cbcSMatt Macy 2316eda14cbcSMatt Macy /* 2317eda14cbcSMatt Macy * Stop allocating from this vdev. Note that we must check 2318eda14cbcSMatt Macy * that this is not the only device in the pool before 2319eda14cbcSMatt Macy * passivating, otherwise we will not be able to make 2320eda14cbcSMatt Macy * progress because we can't allocate from any vdevs. 2321eda14cbcSMatt Macy * The above check for sufficient free space serves this 2322eda14cbcSMatt Macy * purpose. 2323eda14cbcSMatt Macy */ 2324681ce946SMartin Matuska if (error == 0 && !vd->vdev_noalloc) { 2325681ce946SMartin Matuska set_noalloc = B_TRUE; 2326681ce946SMartin Matuska error = vdev_passivate(vd, txg); 2327681ce946SMartin Matuska } 2328eda14cbcSMatt Macy 2329681ce946SMartin Matuska if (error != 0) 2330681ce946SMartin Matuska return (error); 2331eda14cbcSMatt Macy 2332eda14cbcSMatt Macy /* 2333eda14cbcSMatt Macy * We stop any initializing and TRIM that is currently in progress 2334eda14cbcSMatt Macy * but leave the state as "active". This will allow the process to 2335eda14cbcSMatt Macy * resume if the removal is canceled sometime later. 2336eda14cbcSMatt Macy */ 2337681ce946SMartin Matuska 2338681ce946SMartin Matuska spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 2339681ce946SMartin Matuska 2340eda14cbcSMatt Macy vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE); 2341eda14cbcSMatt Macy vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE); 2342eda14cbcSMatt Macy vdev_autotrim_stop_wait(vd); 2343eda14cbcSMatt Macy 2344eda14cbcSMatt Macy *txg = spa_vdev_config_enter(spa); 2345eda14cbcSMatt Macy 2346eda14cbcSMatt Macy /* 2347eda14cbcSMatt Macy * Things might have changed while the config lock was dropped 2348eda14cbcSMatt Macy * (e.g. space usage). Check for errors again. 2349eda14cbcSMatt Macy */ 2350eda14cbcSMatt Macy error = spa_vdev_remove_top_check(vd); 2351eda14cbcSMatt Macy 2352eda14cbcSMatt Macy if (error != 0) { 2353681ce946SMartin Matuska if (set_noalloc) 2354681ce946SMartin Matuska vdev_activate(vd); 2355eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART); 2356eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_TRIM_RESTART); 2357eda14cbcSMatt Macy spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART); 2358eda14cbcSMatt Macy return (error); 2359eda14cbcSMatt Macy } 2360eda14cbcSMatt Macy 2361eda14cbcSMatt Macy vd->vdev_removing = B_TRUE; 2362eda14cbcSMatt Macy 2363eda14cbcSMatt Macy vdev_dirty_leaves(vd, VDD_DTL, *txg); 2364eda14cbcSMatt Macy vdev_config_dirty(vd); 2365eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); 2366eda14cbcSMatt Macy dsl_sync_task_nowait(spa->spa_dsl_pool, 23672c48331dSMatt Macy vdev_remove_initiate_sync, (void *)(uintptr_t)vd->vdev_id, tx); 2368eda14cbcSMatt Macy dmu_tx_commit(tx); 2369eda14cbcSMatt Macy 2370eda14cbcSMatt Macy return (0); 2371eda14cbcSMatt Macy } 2372eda14cbcSMatt Macy 2373eda14cbcSMatt Macy /* 2374eda14cbcSMatt Macy * Remove a device from the pool. 2375eda14cbcSMatt Macy * 2376eda14cbcSMatt Macy * Removing a device from the vdev namespace requires several steps 2377eda14cbcSMatt Macy * and can take a significant amount of time. As a result we use 2378eda14cbcSMatt Macy * the spa_vdev_config_[enter/exit] functions which allow us to 2379eda14cbcSMatt Macy * grab and release the spa_config_lock while still holding the namespace 2380eda14cbcSMatt Macy * lock. During each step the configuration is synced out. 2381eda14cbcSMatt Macy */ 2382eda14cbcSMatt Macy int 2383eda14cbcSMatt Macy spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 2384eda14cbcSMatt Macy { 2385eda14cbcSMatt Macy vdev_t *vd; 2386eda14cbcSMatt Macy nvlist_t **spares, **l2cache, *nv; 2387eda14cbcSMatt Macy uint64_t txg = 0; 2388eda14cbcSMatt Macy uint_t nspares, nl2cache; 2389eda14cbcSMatt Macy int error = 0, error_log; 2390eda14cbcSMatt Macy boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 2391eda14cbcSMatt Macy sysevent_t *ev = NULL; 2392a0b956f5SMartin Matuska const char *vd_type = NULL; 2393a0b956f5SMartin Matuska char *vd_path = NULL; 2394eda14cbcSMatt Macy 2395eda14cbcSMatt Macy ASSERT(spa_writeable(spa)); 2396eda14cbcSMatt Macy 2397eda14cbcSMatt Macy if (!locked) 2398eda14cbcSMatt Macy txg = spa_vdev_enter(spa); 2399eda14cbcSMatt Macy 2400eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2401eda14cbcSMatt Macy if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 2402eda14cbcSMatt Macy error = (spa_has_checkpoint(spa)) ? 2403eda14cbcSMatt Macy ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 2404eda14cbcSMatt Macy 2405eda14cbcSMatt Macy if (!locked) 2406eda14cbcSMatt Macy return (spa_vdev_exit(spa, NULL, txg, error)); 2407eda14cbcSMatt Macy 2408eda14cbcSMatt Macy return (error); 2409eda14cbcSMatt Macy } 2410eda14cbcSMatt Macy 2411eda14cbcSMatt Macy vd = spa_lookup_by_guid(spa, guid, B_FALSE); 2412eda14cbcSMatt Macy 2413eda14cbcSMatt Macy if (spa->spa_spares.sav_vdevs != NULL && 2414eda14cbcSMatt Macy nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 2415eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 2416eda14cbcSMatt Macy (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 2417eda14cbcSMatt Macy /* 2418eda14cbcSMatt Macy * Only remove the hot spare if it's not currently in use 2419eda14cbcSMatt Macy * in this pool. 2420eda14cbcSMatt Macy */ 2421eda14cbcSMatt Macy if (vd == NULL || unspare) { 24222a58b312SMartin Matuska const char *type; 24237877fdebSMatt Macy boolean_t draid_spare = B_FALSE; 24247877fdebSMatt Macy 24257877fdebSMatt Macy if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) 24267877fdebSMatt Macy == 0 && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) 24277877fdebSMatt Macy draid_spare = B_TRUE; 24287877fdebSMatt Macy 24297877fdebSMatt Macy if (vd == NULL && draid_spare) { 24307877fdebSMatt Macy error = SET_ERROR(ENOTSUP); 24317877fdebSMatt Macy } else { 2432eda14cbcSMatt Macy if (vd == NULL) 24337877fdebSMatt Macy vd = spa_lookup_by_guid(spa, 24347877fdebSMatt Macy guid, B_TRUE); 2435eda14cbcSMatt Macy ev = spa_event_create(spa, vd, NULL, 2436eda14cbcSMatt Macy ESC_ZFS_VDEV_REMOVE_AUX); 2437eda14cbcSMatt Macy 2438eda14cbcSMatt Macy vd_type = VDEV_TYPE_SPARE; 2439eda14cbcSMatt Macy vd_path = spa_strdup(fnvlist_lookup_string( 2440eda14cbcSMatt Macy nv, ZPOOL_CONFIG_PATH)); 2441eda14cbcSMatt Macy spa_vdev_remove_aux(spa->spa_spares.sav_config, 2442eda14cbcSMatt Macy ZPOOL_CONFIG_SPARES, spares, nspares, nv); 2443eda14cbcSMatt Macy spa_load_spares(spa); 2444eda14cbcSMatt Macy spa->spa_spares.sav_sync = B_TRUE; 24457877fdebSMatt Macy } 2446eda14cbcSMatt Macy } else { 2447eda14cbcSMatt Macy error = SET_ERROR(EBUSY); 2448eda14cbcSMatt Macy } 2449eda14cbcSMatt Macy } else if (spa->spa_l2cache.sav_vdevs != NULL && 2450eda14cbcSMatt Macy nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 2451eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 2452eda14cbcSMatt Macy (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 2453eda14cbcSMatt Macy vd_type = VDEV_TYPE_L2CACHE; 2454eda14cbcSMatt Macy vd_path = spa_strdup(fnvlist_lookup_string( 2455eda14cbcSMatt Macy nv, ZPOOL_CONFIG_PATH)); 2456eda14cbcSMatt Macy /* 2457eda14cbcSMatt Macy * Cache devices can always be removed. 2458eda14cbcSMatt Macy */ 2459eda14cbcSMatt Macy vd = spa_lookup_by_guid(spa, guid, B_TRUE); 2460eda14cbcSMatt Macy 2461eda14cbcSMatt Macy /* 2462eda14cbcSMatt Macy * Stop trimming the cache device. We need to release the 2463eda14cbcSMatt Macy * config lock to allow the syncing of TRIM transactions 2464eda14cbcSMatt Macy * without releasing the spa_namespace_lock. The same 2465eda14cbcSMatt Macy * strategy is employed in spa_vdev_remove_top(). 2466eda14cbcSMatt Macy */ 2467eda14cbcSMatt Macy spa_vdev_config_exit(spa, NULL, 2468eda14cbcSMatt Macy txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 2469eda14cbcSMatt Macy mutex_enter(&vd->vdev_trim_lock); 2470eda14cbcSMatt Macy vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 2471eda14cbcSMatt Macy mutex_exit(&vd->vdev_trim_lock); 2472eda14cbcSMatt Macy txg = spa_vdev_config_enter(spa); 2473eda14cbcSMatt Macy 2474eda14cbcSMatt Macy ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); 2475eda14cbcSMatt Macy spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 2476eda14cbcSMatt Macy ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 2477eda14cbcSMatt Macy spa_load_l2cache(spa); 2478eda14cbcSMatt Macy spa->spa_l2cache.sav_sync = B_TRUE; 2479eda14cbcSMatt Macy } else if (vd != NULL && vd->vdev_islog) { 2480eda14cbcSMatt Macy ASSERT(!locked); 2481eda14cbcSMatt Macy vd_type = VDEV_TYPE_LOG; 2482eda14cbcSMatt Macy vd_path = spa_strdup((vd->vdev_path != NULL) ? 2483eda14cbcSMatt Macy vd->vdev_path : "-"); 2484eda14cbcSMatt Macy error = spa_vdev_remove_log(vd, &txg); 2485eda14cbcSMatt Macy } else if (vd != NULL) { 2486eda14cbcSMatt Macy ASSERT(!locked); 2487eda14cbcSMatt Macy error = spa_vdev_remove_top(vd, &txg); 2488eda14cbcSMatt Macy } else { 2489eda14cbcSMatt Macy /* 2490eda14cbcSMatt Macy * There is no vdev of any kind with the specified guid. 2491eda14cbcSMatt Macy */ 2492eda14cbcSMatt Macy error = SET_ERROR(ENOENT); 2493eda14cbcSMatt Macy } 2494eda14cbcSMatt Macy 2495eda14cbcSMatt Macy error_log = error; 2496eda14cbcSMatt Macy 2497eda14cbcSMatt Macy if (!locked) 2498eda14cbcSMatt Macy error = spa_vdev_exit(spa, NULL, txg, error); 2499eda14cbcSMatt Macy 2500eda14cbcSMatt Macy /* 2501eda14cbcSMatt Macy * Logging must be done outside the spa config lock. Otherwise, 2502eda14cbcSMatt Macy * this code path could end up holding the spa config lock while 2503eda14cbcSMatt Macy * waiting for a txg_sync so it can write to the internal log. 2504eda14cbcSMatt Macy * Doing that would prevent the txg sync from actually happening, 2505eda14cbcSMatt Macy * causing a deadlock. 2506eda14cbcSMatt Macy */ 2507eda14cbcSMatt Macy if (error_log == 0 && vd_type != NULL && vd_path != NULL) { 2508eda14cbcSMatt Macy spa_history_log_internal(spa, "vdev remove", NULL, 2509eda14cbcSMatt Macy "%s vdev (%s) %s", spa_name(spa), vd_type, vd_path); 2510eda14cbcSMatt Macy } 2511eda14cbcSMatt Macy if (vd_path != NULL) 2512eda14cbcSMatt Macy spa_strfree(vd_path); 2513eda14cbcSMatt Macy 2514eda14cbcSMatt Macy if (ev != NULL) 2515eda14cbcSMatt Macy spa_event_post(ev); 2516eda14cbcSMatt Macy 2517eda14cbcSMatt Macy return (error); 2518eda14cbcSMatt Macy } 2519eda14cbcSMatt Macy 2520eda14cbcSMatt Macy int 2521eda14cbcSMatt Macy spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) 2522eda14cbcSMatt Macy { 2523eda14cbcSMatt Macy prs->prs_state = spa->spa_removing_phys.sr_state; 2524eda14cbcSMatt Macy 2525eda14cbcSMatt Macy if (prs->prs_state == DSS_NONE) 2526eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 2527eda14cbcSMatt Macy 2528eda14cbcSMatt Macy prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; 2529eda14cbcSMatt Macy prs->prs_start_time = spa->spa_removing_phys.sr_start_time; 2530eda14cbcSMatt Macy prs->prs_end_time = spa->spa_removing_phys.sr_end_time; 2531eda14cbcSMatt Macy prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; 2532eda14cbcSMatt Macy prs->prs_copied = spa->spa_removing_phys.sr_copied; 2533eda14cbcSMatt Macy 2534eda14cbcSMatt Macy prs->prs_mapping_memory = 0; 2535eda14cbcSMatt Macy uint64_t indirect_vdev_id = 2536eda14cbcSMatt Macy spa->spa_removing_phys.sr_prev_indirect_vdev; 2537eda14cbcSMatt Macy while (indirect_vdev_id != -1) { 2538eda14cbcSMatt Macy vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; 2539eda14cbcSMatt Macy vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 2540eda14cbcSMatt Macy vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 2541eda14cbcSMatt Macy 2542eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2543eda14cbcSMatt Macy prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); 2544eda14cbcSMatt Macy indirect_vdev_id = vic->vic_prev_indirect_vdev; 2545eda14cbcSMatt Macy } 2546eda14cbcSMatt Macy 2547eda14cbcSMatt Macy return (0); 2548eda14cbcSMatt Macy } 2549eda14cbcSMatt Macy 2550eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_ignore_errors, INT, ZMOD_RW, 2551eda14cbcSMatt Macy "Ignore hard IO errors when removing device"); 2552eda14cbcSMatt Macy 2553be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, UINT, ZMOD_RW, 2554eda14cbcSMatt Macy "Largest contiguous segment to allocate when removing device"); 2555eda14cbcSMatt Macy 2556be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, UINT, ZMOD_RW, 2557eda14cbcSMatt Macy "Largest span of free chunks a remap segment can span"); 2558eda14cbcSMatt Macy 2559be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, UINT, ZMOD_RW, 2560eda14cbcSMatt Macy "Pause device removal after this many bytes are copied " 2561eda14cbcSMatt Macy "(debug use only - causes removal to hang)"); 2562eda14cbcSMatt Macy 2563eda14cbcSMatt Macy EXPORT_SYMBOL(free_from_removing_vdev); 2564eda14cbcSMatt Macy EXPORT_SYMBOL(spa_removal_get_stats); 2565eda14cbcSMatt Macy EXPORT_SYMBOL(spa_remove_init); 2566eda14cbcSMatt Macy EXPORT_SYMBOL(spa_restart_removal); 2567eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_removal_destroy); 2568eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_remove); 2569eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_remove_cancel); 2570eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_remove_suspend); 2571eda14cbcSMatt Macy EXPORT_SYMBOL(svr_sync); 2572